Merge tag 'mmc-v5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Jun 2022 21:24:30 +0000 (14:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Jun 2022 21:24:30 +0000 (14:24 -0700)
Pull MMC fixes from Ulf Hansson:
 "MMC core:

   - Fix CQE recovery reset success for block I/O

  MMC host:

   - sdhci-pci-gli: Fix support for runtime resume

   - Fix unevaluatedProperties warnings in DT examples"

* tag 'mmc-v5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  dt-bindings: mmc: Fix unevaluatedProperties warnings in examples
  mmc: block: Fix CQE recovery reset success
  mmc: sdhci-pci-gli: Fix GL9763E runtime PM when the system resumes from suspend

2780 files changed:
.mailmap
Documentation/ABI/stable/sysfs-bus-mhi
Documentation/ABI/testing/configfs-usb-gadget-uvc
Documentation/ABI/testing/debugfs-driver-habanalabs
Documentation/ABI/testing/sysfs-bus-thunderbolt
Documentation/ABI/testing/sysfs-class-firmware [new file with mode: 0644]
Documentation/ABI/testing/sysfs-devices-physical_location [new file with mode: 0644]
Documentation/accounting/delay-accounting.rst
Documentation/admin-guide/blockdev/index.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/nfs/nfs-client.rst
Documentation/admin-guide/pm/intel-speed-select.rst
Documentation/arch.rst
Documentation/arm/marvell.rst
Documentation/conf.py
Documentation/devicetree/bindings/arm/hpe,gxp.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/arm/intel,socfpga.yaml
Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
Documentation/devicetree/bindings/gpio/gpio-altera.txt
Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml
Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt [deleted file]
Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt [deleted file]
Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
Documentation/devicetree/bindings/iio/adc/sprd,sc2720-adc.yaml
Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
Documentation/devicetree/bindings/iio/dac/lltc,ltc2632.yaml
Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
Documentation/devicetree/bindings/iio/light/stk33xx.yaml
Documentation/devicetree/bindings/iio/potentiometer/microchip,mcp4131.yaml
Documentation/devicetree/bindings/iio/st,st-sensors.yaml
Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
Documentation/devicetree/bindings/iommu/arm,smmu.yaml
Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
Documentation/devicetree/bindings/leds/kinetic,ktd2692.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
Documentation/devicetree/bindings/leds/leds-ktd2692.txt [deleted file]
Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/leds/regulator-led.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml
Documentation/devicetree/bindings/mfd/da9063.txt
Documentation/devicetree/bindings/mtd/ingenic,nand.yaml
Documentation/devicetree/bindings/mtd/spi-nand.yaml
Documentation/devicetree/bindings/net/adi,adin.yaml
Documentation/devicetree/bindings/net/cdns,macb.yaml
Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
Documentation/devicetree/bindings/net/dsa/realtek.yaml
Documentation/devicetree/bindings/net/mediatek,net.yaml
Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
Documentation/devicetree/bindings/nvmem/apple,efuses.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
Documentation/devicetree/bindings/pci/apple,pcie.yaml
Documentation/devicetree/bindings/pci/socionext,uniphier-pcie.yaml
Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml
Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.txt [deleted file]
Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pwm/atmel-pwm.txt [deleted file]
Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.yaml
Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt [deleted file]
Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
Documentation/devicetree/bindings/reset/qcom,aoss-reset.yaml
Documentation/devicetree/bindings/reset/qcom,pdc-global.yaml
Documentation/devicetree/bindings/riscv/microchip.yaml
Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
Documentation/devicetree/bindings/serial/renesas,hscif.yaml
Documentation/devicetree/bindings/serial/renesas,scif.yaml
Documentation/devicetree/bindings/serial/rs485.yaml
Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml
Documentation/devicetree/bindings/soc/imx/fsl,imx8mp-media-blk-ctrl.yaml
Documentation/devicetree/bindings/soc/intel/intel,hps-copy-engine.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
Documentation/devicetree/bindings/soundwire/qcom,sdw.txt
Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
Documentation/devicetree/bindings/timer/xlnx,xps-timer.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timestamp/hte-consumer.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timestamp/nvidia,tegra194-hte.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/usb/am33xx-usb.txt
Documentation/devicetree/bindings/usb/da8xx-usb.txt
Documentation/devicetree/bindings/usb/dwc2.yaml
Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/usb/generic-ehci.yaml
Documentation/devicetree/bindings/usb/generic-ohci.yaml
Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
Documentation/devicetree/bindings/usb/snps,dwc3.yaml
Documentation/devicetree/bindings/usb/ti,am62-usb.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/watchdog/da9062-wdt.txt
Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.txt [deleted file]
Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml
Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml [new file with mode: 0644]
Documentation/driver-api/firmware/fw_upload.rst [new file with mode: 0644]
Documentation/driver-api/firmware/index.rst
Documentation/driver-api/index.rst
Documentation/driver-api/pwm.rst
Documentation/driver-api/serial/driver.rst
Documentation/driver-api/serial/index.rst
Documentation/driver-api/serial/n_gsm.rst [deleted file]
Documentation/driver-api/tty/index.rst [moved from Documentation/tty/index.rst with 81% similarity]
Documentation/driver-api/tty/moxa-smartio.rst [moved from Documentation/driver-api/serial/moxa-smartio.rst with 100% similarity]
Documentation/driver-api/tty/n_gsm.rst [new file with mode: 0644]
Documentation/driver-api/tty/n_tty.rst [moved from Documentation/tty/n_tty.rst with 100% similarity]
Documentation/driver-api/tty/tty_buffer.rst [moved from Documentation/tty/tty_buffer.rst with 100% similarity]
Documentation/driver-api/tty/tty_driver.rst [moved from Documentation/tty/tty_driver.rst with 100% similarity]
Documentation/driver-api/tty/tty_internals.rst [moved from Documentation/tty/tty_internals.rst with 100% similarity]
Documentation/driver-api/tty/tty_ldisc.rst [moved from Documentation/tty/tty_ldisc.rst with 100% similarity]
Documentation/driver-api/tty/tty_port.rst [moved from Documentation/tty/tty_port.rst with 100% similarity]
Documentation/driver-api/tty/tty_struct.rst [moved from Documentation/tty/tty_struct.rst with 100% similarity]
Documentation/driver-api/vfio-mediated-device.rst
Documentation/filesystems/erofs.rst
Documentation/filesystems/nfs/client-identifier.rst [new file with mode: 0644]
Documentation/filesystems/nfs/index.rst
Documentation/firmware-guide/acpi/enumeration.rst
Documentation/fpga/dfl.rst
Documentation/hte/hte.rst [new file with mode: 0644]
Documentation/hte/index.rst [new file with mode: 0644]
Documentation/hte/tegra194-hte.rst [new file with mode: 0644]
Documentation/i2c/writing-clients.rst
Documentation/images/COPYING-logo [moved from Documentation/COPYING-logo with 64% similarity]
Documentation/images/logo.gif [moved from Documentation/logo.gif with 100% similarity]
Documentation/images/logo.svg [new file with mode: 0644]
Documentation/index.rst
Documentation/input/input-programming.rst
Documentation/leds/leds-qcom-lpg.rst [new file with mode: 0644]
Documentation/loongarch/features.rst [new file with mode: 0644]
Documentation/loongarch/index.rst [new file with mode: 0644]
Documentation/loongarch/introduction.rst [new file with mode: 0644]
Documentation/loongarch/irq-chip-model.rst [new file with mode: 0644]
Documentation/misc-devices/index.rst
Documentation/misc-devices/oxsemi-tornado.rst [new file with mode: 0644]
Documentation/networking/ip-sysctl.rst
Documentation/riscv/vm-layout.rst
Documentation/translations/zh_CN/index.rst
Documentation/translations/zh_CN/loongarch/features.rst [new file with mode: 0644]
Documentation/translations/zh_CN/loongarch/index.rst [new file with mode: 0644]
Documentation/translations/zh_CN/loongarch/introduction.rst [new file with mode: 0644]
Documentation/translations/zh_CN/loongarch/irq-chip-model.rst [new file with mode: 0644]
Documentation/usb/gadget-testing.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/uapi/asm/termbits.h
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/process.c
arch/arc/kernel/process.c
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/aspeed-ast2600-evb.dts
arch/arm/boot/dts/aspeed-bmc-facebook-bletchley.dts
arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts [new file with mode: 0644]
arch/arm/boot/dts/aspeed-g4.dtsi
arch/arm/boot/dts/aspeed-g5.dtsi
arch/arm/boot/dts/aspeed-g6.dtsi
arch/arm/boot/dts/at91-sama7g5ek.dts
arch/arm/boot/dts/at91sam9261ek.dts
arch/arm/boot/dts/at91sam9263ek.dts
arch/arm/boot/dts/at91sam9rlek.dts
arch/arm/boot/dts/da850.dtsi
arch/arm/boot/dts/hpe-bmc-dl360gen10.dts [new file with mode: 0644]
arch/arm/boot/dts/hpe-gxp.dtsi [new file with mode: 0644]
arch/arm/boot/dts/mmp2.dtsi
arch/arm/boot/dts/pxa25x.dtsi
arch/arm/boot/dts/pxa27x.dtsi
arch/arm/boot/dts/pxa3xx.dtsi
arch/arm/boot/dts/qcom-ipq4019.dtsi
arch/arm/boot/dts/qcom-sdx55.dtsi
arch/arm/boot/dts/rk3036.dtsi
arch/arm/boot/dts/rk3066a.dtsi
arch/arm/boot/dts/rk3188.dtsi
arch/arm/boot/dts/rk322x.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/rv1108.dtsi
arch/arm/boot/dts/sam9x60.dtsi
arch/arm/boot/dts/sama7g5.dtsi
arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
arch/arm/common/locomo.c
arch/arm/common/sa1111.c
arch/arm/configs/am200epdkit_defconfig
arch/arm/configs/cm_x300_defconfig
arch/arm/configs/colibri_pxa270_defconfig
arch/arm/configs/colibri_pxa300_defconfig
arch/arm/configs/corgi_defconfig
arch/arm/configs/eseries_pxa_defconfig
arch/arm/configs/ezx_defconfig
arch/arm/configs/h5000_defconfig
arch/arm/configs/lpd270_defconfig
arch/arm/configs/lubbock_defconfig
arch/arm/configs/magician_defconfig
arch/arm/configs/mainstone_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap1_defconfig
arch/arm/configs/palmz72_defconfig
arch/arm/configs/pcm027_defconfig
arch/arm/configs/pxa255-idp_defconfig
arch/arm/configs/pxa3xx_defconfig
arch/arm/configs/pxa_defconfig
arch/arm/configs/spitz_defconfig
arch/arm/configs/trizeps4_defconfig
arch/arm/configs/viper_defconfig
arch/arm/configs/xcep_defconfig
arch/arm/configs/zeus_defconfig
arch/arm/include/asm/hardware/sa1111.h
arch/arm/include/asm/io.h
arch/arm/kernel/process.c
arch/arm/kernel/reboot.c
arch/arm/mach-at91/Kconfig
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-ep93xx/ts72xx.c
arch/arm/mach-hpe/Kconfig [new file with mode: 0644]
arch/arm/mach-hpe/Makefile [new file with mode: 0644]
arch/arm/mach-hpe/gxp.c [new file with mode: 0644]
arch/arm/mach-mmp/Kconfig
arch/arm/mach-mmp/Makefile
arch/arm/mach-mmp/devices.c
arch/arm/mach-mmp/devices.h
arch/arm/mach-mmp/mfp.h
arch/arm/mach-mmp/mmp2.h
arch/arm/mach-mmp/pxa168.h
arch/arm/mach-mmp/pxa910.h
arch/arm/mach-mmp/tavorevb.c [deleted file]
arch/arm/mach-mmp/ttc_dkb.c
arch/arm/mach-omap1/Kconfig
arch/arm/mach-omap1/clock.c
arch/arm/mach-omap1/clock.h
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap1/hardware.h
arch/arm/mach-omap1/include/mach/uncompress.h [deleted file]
arch/arm/mach-omap1/io.c
arch/arm/mach-omap1/serial.c
arch/arm/mach-omap1/serial.h [moved from arch/arm/mach-omap1/include/mach/serial.h with 100% similarity]
arch/arm/mach-omap1/time.c
arch/arm/mach-pxa/Kconfig
arch/arm/mach-pxa/Makefile
arch/arm/mach-pxa/addr-map.h [moved from arch/arm/mach-pxa/include/mach/addr-map.h with 100% similarity]
arch/arm/mach-pxa/am300epd.c
arch/arm/mach-pxa/balloon3-pcmcia.c [moved from drivers/pcmcia/pxa2xx_balloon3.c with 98% similarity]
arch/arm/mach-pxa/balloon3.c
arch/arm/mach-pxa/balloon3.h [moved from arch/arm/mach-pxa/include/mach/balloon3.h with 100% similarity]
arch/arm/mach-pxa/cm-x300.c
arch/arm/mach-pxa/colibri-evalboard.c
arch/arm/mach-pxa/colibri-pcmcia.c [moved from drivers/pcmcia/pxa2xx_colibri.c with 99% similarity]
arch/arm/mach-pxa/colibri-pxa270-income.c
arch/arm/mach-pxa/colibri-pxa270.c
arch/arm/mach-pxa/colibri-pxa300.c
arch/arm/mach-pxa/colibri-pxa320.c
arch/arm/mach-pxa/colibri-pxa3xx.c
arch/arm/mach-pxa/colibri.h
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/corgi.h [moved from arch/arm/mach-pxa/include/mach/corgi.h with 100% similarity]
arch/arm/mach-pxa/corgi_pm.c
arch/arm/mach-pxa/csb726.c
arch/arm/mach-pxa/csb726.h
arch/arm/mach-pxa/devices.c
arch/arm/mach-pxa/e740-pcmcia.c [moved from drivers/pcmcia/pxa2xx_e740.c with 98% similarity]
arch/arm/mach-pxa/eseries-gpio.h [moved from arch/arm/mach-pxa/include/mach/eseries-gpio.h with 100% similarity]
arch/arm/mach-pxa/eseries.c
arch/arm/mach-pxa/ezx.c
arch/arm/mach-pxa/generic.c
arch/arm/mach-pxa/generic.h
arch/arm/mach-pxa/gumstix.c
arch/arm/mach-pxa/gumstix.h
arch/arm/mach-pxa/h5000.c
arch/arm/mach-pxa/hx4700-pcmcia.c [moved from drivers/pcmcia/pxa2xx_hx4700.c with 98% similarity]
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/hx4700.h [moved from arch/arm/mach-pxa/include/mach/hx4700.h with 100% similarity]
arch/arm/mach-pxa/idp.c
arch/arm/mach-pxa/idp.h
arch/arm/mach-pxa/include/mach/bitfield.h [deleted file]
arch/arm/mach-pxa/include/mach/dma.h [deleted file]
arch/arm/mach-pxa/include/mach/generic.h [deleted file]
arch/arm/mach-pxa/include/mach/mtd-xip.h [deleted file]
arch/arm/mach-pxa/include/mach/uncompress.h [deleted file]
arch/arm/mach-pxa/irq.c
arch/arm/mach-pxa/irqs.h [moved from arch/arm/mach-pxa/include/mach/irqs.h with 100% similarity]
arch/arm/mach-pxa/littleton.c
arch/arm/mach-pxa/lpd270.c
arch/arm/mach-pxa/lubbock.c
arch/arm/mach-pxa/lubbock.h [moved from arch/arm/mach-pxa/include/mach/lubbock.h with 95% similarity]
arch/arm/mach-pxa/magician.c
arch/arm/mach-pxa/magician.h [moved from arch/arm/mach-pxa/include/mach/magician.h with 99% similarity]
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-pxa/mainstone.h [moved from arch/arm/mach-pxa/include/mach/mainstone.h with 98% similarity]
arch/arm/mach-pxa/mfp-pxa2xx.c
arch/arm/mach-pxa/mfp-pxa2xx.h
arch/arm/mach-pxa/mfp-pxa3xx.c
arch/arm/mach-pxa/mfp-pxa3xx.h
arch/arm/mach-pxa/mfp.h [moved from arch/arm/mach-pxa/include/mach/mfp.h with 91% similarity]
arch/arm/mach-pxa/mioa701.c
arch/arm/mach-pxa/mxm8x10.c
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/palmld-pcmcia.c [moved from drivers/pcmcia/pxa2xx_palmld.c with 98% similarity]
arch/arm/mach-pxa/palmld.c
arch/arm/mach-pxa/palmld.h [moved from arch/arm/mach-pxa/include/mach/palmld.h with 100% similarity]
arch/arm/mach-pxa/palmt5.c
arch/arm/mach-pxa/palmt5.h
arch/arm/mach-pxa/palmtc-pcmcia.c [moved from drivers/pcmcia/pxa2xx_palmtc.c with 98% similarity]
arch/arm/mach-pxa/palmtc.c
arch/arm/mach-pxa/palmtc.h [moved from arch/arm/mach-pxa/include/mach/palmtc.h with 100% similarity]
arch/arm/mach-pxa/palmte2.c
arch/arm/mach-pxa/palmtreo.c
arch/arm/mach-pxa/palmtx-pcmcia.c [moved from drivers/pcmcia/pxa2xx_palmtx.c with 98% similarity]
arch/arm/mach-pxa/palmtx.c
arch/arm/mach-pxa/palmtx.h [moved from arch/arm/mach-pxa/include/mach/palmtx.h with 100% similarity]
arch/arm/mach-pxa/palmz72.c
arch/arm/mach-pxa/pcm027.h
arch/arm/mach-pxa/pcm990-baseboard.c
arch/arm/mach-pxa/pcm990_baseboard.h
arch/arm/mach-pxa/poodle.c
arch/arm/mach-pxa/poodle.h [moved from arch/arm/mach-pxa/include/mach/poodle.h with 98% similarity]
arch/arm/mach-pxa/pxa-dt.c
arch/arm/mach-pxa/pxa-regs.h [new file with mode: 0644]
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa25x.h
arch/arm/mach-pxa/pxa27x-udc.h
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-pxa/pxa27x.h
arch/arm/mach-pxa/pxa2xx-regs.h [moved from arch/arm/mach-pxa/include/mach/pxa2xx-regs.h with 76% similarity]
arch/arm/mach-pxa/pxa2xx.c
arch/arm/mach-pxa/pxa300.c
arch/arm/mach-pxa/pxa320.c
arch/arm/mach-pxa/pxa3xx-regs.h [moved from arch/arm/mach-pxa/include/mach/pxa3xx-regs.h with 61% similarity]
arch/arm/mach-pxa/pxa3xx-ulpi.c
arch/arm/mach-pxa/pxa3xx.c
arch/arm/mach-pxa/pxa3xx.h
arch/arm/mach-pxa/pxa930.c
arch/arm/mach-pxa/regs-ost.h [moved from arch/arm/mach-pxa/include/mach/regs-ost.h with 94% similarity]
arch/arm/mach-pxa/regs-rtc.h
arch/arm/mach-pxa/regs-u2d.h
arch/arm/mach-pxa/regs-uart.h [moved from arch/arm/mach-pxa/include/mach/regs-uart.h with 99% similarity]
arch/arm/mach-pxa/reset.c
arch/arm/mach-pxa/reset.h [moved from arch/arm/mach-pxa/include/mach/reset.h with 92% similarity]
arch/arm/mach-pxa/sharpsl_pm.c
arch/arm/mach-pxa/sleep.S
arch/arm/mach-pxa/smemc.c
arch/arm/mach-pxa/smemc.h [moved from arch/arm/mach-pxa/include/mach/smemc.h with 100% similarity]
arch/arm/mach-pxa/spitz.c
arch/arm/mach-pxa/spitz.h [moved from arch/arm/mach-pxa/include/mach/spitz.h with 100% similarity]
arch/arm/mach-pxa/spitz_pm.c
arch/arm/mach-pxa/standby.S
arch/arm/mach-pxa/tosa.c
arch/arm/mach-pxa/tosa.h [moved from arch/arm/mach-pxa/include/mach/tosa.h with 88% similarity]
arch/arm/mach-pxa/trizeps4-pcmcia.c [moved from drivers/pcmcia/pxa2xx_trizeps4.c with 98% similarity]
arch/arm/mach-pxa/trizeps4.c
arch/arm/mach-pxa/trizeps4.h [moved from arch/arm/mach-pxa/include/mach/trizeps4.h with 99% similarity]
arch/arm/mach-pxa/viper-pcmcia.c [moved from drivers/pcmcia/pxa2xx_viper.c with 97% similarity]
arch/arm/mach-pxa/viper-pcmcia.h [moved from include/linux/platform_data/pcmcia-pxa2xx_viper.h with 100% similarity]
arch/arm/mach-pxa/viper.c
arch/arm/mach-pxa/vpac270-pcmcia.c [moved from drivers/pcmcia/pxa2xx_vpac270.c with 98% similarity]
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-pxa/vpac270.h [moved from arch/arm/mach-pxa/include/mach/vpac270.h with 100% similarity]
arch/arm/mach-pxa/xcep.c
arch/arm/mach-pxa/z2.c
arch/arm/mach-pxa/z2.h [moved from arch/arm/mach-pxa/include/mach/z2.h with 100% similarity]
arch/arm/mach-pxa/zeus.c
arch/arm/mach-pxa/zylonite.c
arch/arm/mach-pxa/zylonite.h
arch/arm/mach-pxa/zylonite_pxa300.c
arch/arm/mach-pxa/zylonite_pxa320.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/include/mach/reset.h
arch/arm/mm/copypage-xsc3.c
arch/arm/mm/ioremap.c
arch/arm64/Kconfig
arch/arm64/boot/dts/intel/Makefile
arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts [new file with mode: 0644]
arch/arm64/boot/dts/qcom/apq8096-db820c.dts
arch/arm64/boot/dts/qcom/ipq6018.dtsi
arch/arm64/boot/dts/qcom/ipq8074.dtsi
arch/arm64/boot/dts/qcom/msm8953.dtsi
arch/arm64/boot/dts/qcom/msm8994.dtsi
arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/msm8998.dtsi
arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
arch/arm64/boot/dts/qcom/qcs404.dtsi
arch/arm64/boot/dts/qcom/sc7180.dtsi
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sdm630.dtsi
arch/arm64/boot/dts/qcom/sdm845.dtsi
arch/arm64/boot/dts/qcom/sm6125.dtsi
arch/arm64/boot/dts/qcom/sm6350.dtsi
arch/arm64/boot/dts/qcom/sm8150.dtsi
arch/arm64/boot/dts/qcom/sm8250.dtsi
arch/arm64/boot/dts/qcom/sm8350.dtsi
arch/arm64/boot/dts/qcom/sm8450.dtsi
arch/arm64/boot/dts/rockchip/rk3308.dtsi
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
arch/arm64/boot/dts/rockchip/rk356x.dtsi
arch/arm64/boot/dts/sprd/whale2.dtsi
arch/arm64/include/asm/compat.h
arch/arm64/include/asm/unistd.h
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/process.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal.c
arch/csky/kernel/power.c
arch/csky/kernel/process.c
arch/hexagon/kernel/process.c
arch/ia64/include/asm/ptrace.h
arch/ia64/kernel/process.c
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/smpboot.c
arch/loongarch/Kbuild [new file with mode: 0644]
arch/loongarch/Kconfig [new file with mode: 0644]
arch/loongarch/Kconfig.debug [new file with mode: 0644]
arch/loongarch/Makefile [new file with mode: 0644]
arch/loongarch/boot/.gitignore [moved from arch/arm/mach-pxa/Makefile.boot with 57% similarity]
arch/loongarch/boot/Makefile [new file with mode: 0644]
arch/loongarch/boot/dts/Makefile [new file with mode: 0644]
arch/loongarch/configs/loongson3_defconfig [new file with mode: 0644]
arch/loongarch/include/asm/Kbuild [new file with mode: 0644]
arch/loongarch/include/asm/acenv.h [new file with mode: 0644]
arch/loongarch/include/asm/acpi.h [new file with mode: 0644]
arch/loongarch/include/asm/addrspace.h [new file with mode: 0644]
arch/loongarch/include/asm/asm-offsets.h [new file with mode: 0644]
arch/loongarch/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/loongarch/include/asm/asm.h [new file with mode: 0644]
arch/loongarch/include/asm/asmmacro.h [new file with mode: 0644]
arch/loongarch/include/asm/atomic.h [new file with mode: 0644]
arch/loongarch/include/asm/barrier.h [new file with mode: 0644]
arch/loongarch/include/asm/bitops.h [new file with mode: 0644]
arch/loongarch/include/asm/bitrev.h [new file with mode: 0644]
arch/loongarch/include/asm/bootinfo.h [new file with mode: 0644]
arch/loongarch/include/asm/branch.h [new file with mode: 0644]
arch/loongarch/include/asm/bug.h [new file with mode: 0644]
arch/loongarch/include/asm/cache.h [new file with mode: 0644]
arch/loongarch/include/asm/cacheflush.h [new file with mode: 0644]
arch/loongarch/include/asm/cacheops.h [new file with mode: 0644]
arch/loongarch/include/asm/clocksource.h [new file with mode: 0644]
arch/loongarch/include/asm/cmpxchg.h [new file with mode: 0644]
arch/loongarch/include/asm/compiler.h [new file with mode: 0644]
arch/loongarch/include/asm/cpu-features.h [new file with mode: 0644]
arch/loongarch/include/asm/cpu-info.h [new file with mode: 0644]
arch/loongarch/include/asm/cpu.h [new file with mode: 0644]
arch/loongarch/include/asm/cpufeature.h [new file with mode: 0644]
arch/loongarch/include/asm/delay.h [new file with mode: 0644]
arch/loongarch/include/asm/dma-direct.h [new file with mode: 0644]
arch/loongarch/include/asm/dmi.h [new file with mode: 0644]
arch/loongarch/include/asm/efi.h [new file with mode: 0644]
arch/loongarch/include/asm/elf.h [new file with mode: 0644]
arch/loongarch/include/asm/entry-common.h [new file with mode: 0644]
arch/loongarch/include/asm/exec.h [new file with mode: 0644]
arch/loongarch/include/asm/fb.h [new file with mode: 0644]
arch/loongarch/include/asm/fixmap.h [new file with mode: 0644]
arch/loongarch/include/asm/fpregdef.h [new file with mode: 0644]
arch/loongarch/include/asm/fpu.h [new file with mode: 0644]
arch/loongarch/include/asm/futex.h [new file with mode: 0644]
arch/loongarch/include/asm/hardirq.h [new file with mode: 0644]
arch/loongarch/include/asm/hugetlb.h [new file with mode: 0644]
arch/loongarch/include/asm/hw_irq.h [new file with mode: 0644]
arch/loongarch/include/asm/idle.h [new file with mode: 0644]
arch/loongarch/include/asm/inst.h [new file with mode: 0644]
arch/loongarch/include/asm/io.h [new file with mode: 0644]
arch/loongarch/include/asm/irq.h [new file with mode: 0644]
arch/loongarch/include/asm/irq_regs.h [new file with mode: 0644]
arch/loongarch/include/asm/irqflags.h [new file with mode: 0644]
arch/loongarch/include/asm/kdebug.h [new file with mode: 0644]
arch/loongarch/include/asm/linkage.h [new file with mode: 0644]
arch/loongarch/include/asm/local.h [new file with mode: 0644]
arch/loongarch/include/asm/loongarch.h [new file with mode: 0644]
arch/loongarch/include/asm/loongson.h [new file with mode: 0644]
arch/loongarch/include/asm/mmu.h [new file with mode: 0644]
arch/loongarch/include/asm/mmu_context.h [new file with mode: 0644]
arch/loongarch/include/asm/mmzone.h [new file with mode: 0644]
arch/loongarch/include/asm/module.h [new file with mode: 0644]
arch/loongarch/include/asm/module.lds.h [new file with mode: 0644]
arch/loongarch/include/asm/numa.h [new file with mode: 0644]
arch/loongarch/include/asm/page.h [new file with mode: 0644]
arch/loongarch/include/asm/percpu.h [new file with mode: 0644]
arch/loongarch/include/asm/perf_event.h [new file with mode: 0644]
arch/loongarch/include/asm/pgalloc.h [new file with mode: 0644]
arch/loongarch/include/asm/pgtable-bits.h [new file with mode: 0644]
arch/loongarch/include/asm/pgtable.h [new file with mode: 0644]
arch/loongarch/include/asm/prefetch.h [new file with mode: 0644]
arch/loongarch/include/asm/processor.h [new file with mode: 0644]
arch/loongarch/include/asm/ptrace.h [new file with mode: 0644]
arch/loongarch/include/asm/reboot.h [new file with mode: 0644]
arch/loongarch/include/asm/regdef.h [new file with mode: 0644]
arch/loongarch/include/asm/seccomp.h [new file with mode: 0644]
arch/loongarch/include/asm/serial.h [new file with mode: 0644]
arch/loongarch/include/asm/setup.h [new file with mode: 0644]
arch/loongarch/include/asm/shmparam.h [new file with mode: 0644]
arch/loongarch/include/asm/smp.h [new file with mode: 0644]
arch/loongarch/include/asm/sparsemem.h [new file with mode: 0644]
arch/loongarch/include/asm/stackframe.h [new file with mode: 0644]
arch/loongarch/include/asm/stacktrace.h [new file with mode: 0644]
arch/loongarch/include/asm/string.h [new file with mode: 0644]
arch/loongarch/include/asm/switch_to.h [new file with mode: 0644]
arch/loongarch/include/asm/syscall.h [new file with mode: 0644]
arch/loongarch/include/asm/thread_info.h [new file with mode: 0644]
arch/loongarch/include/asm/time.h [new file with mode: 0644]
arch/loongarch/include/asm/timex.h [new file with mode: 0644]
arch/loongarch/include/asm/tlb.h [new file with mode: 0644]
arch/loongarch/include/asm/tlbflush.h [new file with mode: 0644]
arch/loongarch/include/asm/topology.h [new file with mode: 0644]
arch/loongarch/include/asm/types.h [new file with mode: 0644]
arch/loongarch/include/asm/uaccess.h [new file with mode: 0644]
arch/loongarch/include/asm/unistd.h [new file with mode: 0644]
arch/loongarch/include/asm/vdso.h [new file with mode: 0644]
arch/loongarch/include/asm/vdso/clocksource.h [new file with mode: 0644]
arch/loongarch/include/asm/vdso/gettimeofday.h [new file with mode: 0644]
arch/loongarch/include/asm/vdso/processor.h [new file with mode: 0644]
arch/loongarch/include/asm/vdso/vdso.h [new file with mode: 0644]
arch/loongarch/include/asm/vdso/vsyscall.h [new file with mode: 0644]
arch/loongarch/include/asm/vermagic.h [new file with mode: 0644]
arch/loongarch/include/asm/vmalloc.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/Kbuild [moved from drivers/staging/vme/Makefile with 59% similarity]
arch/loongarch/include/uapi/asm/auxvec.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/break.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/byteorder.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/hwcap.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/ptrace.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/reg.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/sigcontext.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/signal.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/ucontext.h [new file with mode: 0644]
arch/loongarch/include/uapi/asm/unistd.h [new file with mode: 0644]
arch/loongarch/kernel/.gitignore [new file with mode: 0644]
arch/loongarch/kernel/Makefile [new file with mode: 0644]
arch/loongarch/kernel/access-helper.h [new file with mode: 0644]
arch/loongarch/kernel/acpi.c [new file with mode: 0644]
arch/loongarch/kernel/asm-offsets.c [new file with mode: 0644]
arch/loongarch/kernel/cacheinfo.c [new file with mode: 0644]
arch/loongarch/kernel/cpu-probe.c [new file with mode: 0644]
arch/loongarch/kernel/dma.c [new file with mode: 0644]
arch/loongarch/kernel/efi.c [new file with mode: 0644]
arch/loongarch/kernel/elf.c [new file with mode: 0644]
arch/loongarch/kernel/entry.S [new file with mode: 0644]
arch/loongarch/kernel/env.c [new file with mode: 0644]
arch/loongarch/kernel/fpu.S [new file with mode: 0644]
arch/loongarch/kernel/genex.S [new file with mode: 0644]
arch/loongarch/kernel/head.S [new file with mode: 0644]
arch/loongarch/kernel/idle.c [new file with mode: 0644]
arch/loongarch/kernel/inst.c [new file with mode: 0644]
arch/loongarch/kernel/io.c [new file with mode: 0644]
arch/loongarch/kernel/irq.c [new file with mode: 0644]
arch/loongarch/kernel/mem.c [new file with mode: 0644]
arch/loongarch/kernel/module-sections.c [new file with mode: 0644]
arch/loongarch/kernel/module.c [new file with mode: 0644]
arch/loongarch/kernel/numa.c [new file with mode: 0644]
arch/loongarch/kernel/proc.c [new file with mode: 0644]
arch/loongarch/kernel/process.c [new file with mode: 0644]
arch/loongarch/kernel/ptrace.c [new file with mode: 0644]
arch/loongarch/kernel/reset.c [new file with mode: 0644]
arch/loongarch/kernel/setup.c [new file with mode: 0644]
arch/loongarch/kernel/signal.c [new file with mode: 0644]
arch/loongarch/kernel/smp.c [new file with mode: 0644]
arch/loongarch/kernel/switch.S [new file with mode: 0644]
arch/loongarch/kernel/syscall.c [new file with mode: 0644]
arch/loongarch/kernel/time.c [new file with mode: 0644]
arch/loongarch/kernel/topology.c [new file with mode: 0644]
arch/loongarch/kernel/traps.c [new file with mode: 0644]
arch/loongarch/kernel/vdso.c [new file with mode: 0644]
arch/loongarch/kernel/vmlinux.lds.S [new file with mode: 0644]
arch/loongarch/lib/Makefile [new file with mode: 0644]
arch/loongarch/lib/clear_user.S [new file with mode: 0644]
arch/loongarch/lib/copy_user.S [new file with mode: 0644]
arch/loongarch/lib/delay.c [new file with mode: 0644]
arch/loongarch/lib/dump_tlb.c [new file with mode: 0644]
arch/loongarch/mm/Makefile [new file with mode: 0644]
arch/loongarch/mm/cache.c [new file with mode: 0644]
arch/loongarch/mm/extable.c [new file with mode: 0644]
arch/loongarch/mm/fault.c [new file with mode: 0644]
arch/loongarch/mm/hugetlbpage.c [new file with mode: 0644]
arch/loongarch/mm/init.c [new file with mode: 0644]
arch/loongarch/mm/ioremap.c [new file with mode: 0644]
arch/loongarch/mm/maccess.c [new file with mode: 0644]
arch/loongarch/mm/mmap.c [new file with mode: 0644]
arch/loongarch/mm/page.S [new file with mode: 0644]
arch/loongarch/mm/pgtable.c [new file with mode: 0644]
arch/loongarch/mm/tlb.c [new file with mode: 0644]
arch/loongarch/mm/tlbex.S [new file with mode: 0644]
arch/loongarch/pci/Makefile [new file with mode: 0644]
arch/loongarch/vdso/.gitignore [new file with mode: 0644]
arch/loongarch/vdso/Makefile [new file with mode: 0644]
arch/loongarch/vdso/elf.S [new file with mode: 0644]
arch/loongarch/vdso/gen_vdso_offsets.sh [new file with mode: 0755]
arch/loongarch/vdso/sigreturn.S [new file with mode: 0644]
arch/loongarch/vdso/vdso.S [new file with mode: 0644]
arch/loongarch/vdso/vdso.lds.S [new file with mode: 0644]
arch/loongarch/vdso/vgettimeofday.c [new file with mode: 0644]
arch/m68k/Kconfig.bus
arch/m68k/Kconfig.cpu
arch/m68k/Kconfig.machine
arch/m68k/coldfire/Makefile
arch/m68k/coldfire/dma.c [deleted file]
arch/m68k/coldfire/intc.c
arch/m68k/coldfire/m53xx.c
arch/m68k/coldfire/pci.c
arch/m68k/emu/natfeat.c
arch/m68k/hp300/config.c
arch/m68k/include/asm/dma.h
arch/m68k/include/asm/elf.h
arch/m68k/include/asm/machdep.h
arch/m68k/include/asm/mmu.h
arch/m68k/include/asm/pgtable_no.h
arch/m68k/include/uapi/asm/ptrace.h
arch/m68k/kernel/process.c
arch/m68k/kernel/ptrace.c
arch/m68k/kernel/setup_mm.c
arch/m68k/kernel/setup_no.c
arch/m68k/kernel/time.c
arch/m68k/mac/config.c
arch/m68k/mm/motorola.c
arch/m68k/q40/config.c
arch/m68k/virt/config.c
arch/microblaze/include/asm/string.h
arch/microblaze/kernel/kgdb.c
arch/microblaze/kernel/process.c
arch/microblaze/kernel/timer.c
arch/microblaze/lib/memcpy.c
arch/microblaze/lib/memmove.c
arch/microblaze/lib/memset.c
arch/microblaze/mm/init.c
arch/mips/Kconfig
arch/mips/alchemy/common/dbdma.c
arch/mips/alchemy/devboards/db1300.c
arch/mips/bmips/dma.c
arch/mips/boot/dts/brcm/bcm97358svmb.dts
arch/mips/boot/dts/brcm/bcm97360svmb.dts
arch/mips/boot/dts/brcm/bcm97425svmb.dts
arch/mips/boot/dts/ingenic/cu1000-neo.dts
arch/mips/boot/dts/ingenic/cu1830-neo.dts
arch/mips/boot/dts/ingenic/jz4780.dtsi
arch/mips/boot/dts/ingenic/x1000.dtsi
arch/mips/boot/dts/ingenic/x1830.dtsi
arch/mips/boot/dts/mscc/jaguar2_pcb110.dts
arch/mips/boot/dts/mscc/jaguar2_pcb111.dts
arch/mips/boot/dts/mscc/jaguar2_pcb118.dts
arch/mips/boot/dts/mscc/ocelot.dtsi
arch/mips/boot/dts/mscc/ocelot_pcb120.dts
arch/mips/boot/dts/mscc/serval_common.dtsi
arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
arch/mips/boot/dts/ralink/mt7621.dtsi
arch/mips/boot/tools/relocs.c
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
arch/mips/cavium-octeon/executive/cvmx-helper.c
arch/mips/cavium-octeon/executive/cvmx-pko.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/octeon-usb.c
arch/mips/configs/cu1000-neo_defconfig
arch/mips/configs/cu1830-neo_defconfig
arch/mips/dec/ioasic-irq.c
arch/mips/dec/setup.c
arch/mips/fw/arc/memory.c
arch/mips/include/asm/checksum.h
arch/mips/include/asm/compat.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
arch/mips/include/asm/mach-ralink/spaces.h
arch/mips/include/asm/octeon/cvmx-bootinfo.h
arch/mips/include/asm/unistd.h
arch/mips/include/uapi/asm/fcntl.h
arch/mips/include/uapi/asm/stat.h
arch/mips/include/uapi/asm/termbits.h
arch/mips/jazz/irq.c
arch/mips/kernel/cmpxchg.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/idle.c
arch/mips/kernel/kprobes.c
arch/mips/kernel/mips-cpc.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/process.c
arch/mips/kernel/reset.c
arch/mips/kernel/setup.c
arch/mips/kernel/smp.c
arch/mips/kvm/tlb.c
arch/mips/loongson32/Kconfig
arch/mips/mm/fault.c
arch/mips/net/bpf_jit_comp32.c
arch/mips/pci/pcie-octeon.c
arch/mips/pic32/pic32mzda/config.c
arch/mips/sgi-ip22/ip22-reset.c
arch/mips/sgi-ip27/ip27-xtalk.c
arch/mips/sgi-ip30/ip30-xtalk.c
arch/mips/sibyte/bcm1480/setup.c
arch/mips/tools/loongson3-llsc-check.c
arch/mips/txx9/generic/pci.c
arch/mips/vr41xx/common/cmu.c
arch/nios2/kernel/process.c
arch/openrisc/kernel/process.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/cache.h
arch/parisc/include/asm/compat.h
arch/parisc/include/asm/fb.h
arch/parisc/include/asm/fixmap.h
arch/parisc/include/asm/unistd.h
arch/parisc/include/uapi/asm/termbits.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/process.c
arch/parisc/kernel/processor.c
arch/parisc/kernel/topology.c
arch/parisc/mm/init.c
arch/parisc/nm [deleted file]
arch/powerpc/Kconfig
arch/powerpc/include/asm/compat.h
arch/powerpc/include/asm/livepatch.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/stat.h
arch/powerpc/include/uapi/asm/termbits.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/xmon/xmon.c
arch/riscv/Kbuild
arch/riscv/Kconfig
arch/riscv/Kconfig.erratas
arch/riscv/Kconfig.socs
arch/riscv/Makefile
arch/riscv/boot/.gitignore
arch/riscv/boot/dts/microchip/Makefile
arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi [moved from arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi with 91% similarity]
arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts [moved from arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts with 95% similarity]
arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi [new file with mode: 0644]
arch/riscv/boot/dts/microchip/mpfs-polarberry.dts [new file with mode: 0644]
arch/riscv/boot/dts/microchip/mpfs.dtsi [moved from arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi with 98% similarity]
arch/riscv/boot/dts/sifive/fu540-c000.dtsi
arch/riscv/errata/Makefile
arch/riscv/errata/alternative.c [deleted file]
arch/riscv/errata/sifive/errata.c
arch/riscv/errata/thead/Makefile [new file with mode: 0644]
arch/riscv/errata/thead/errata.c [new file with mode: 0644]
arch/riscv/include/asm/alternative-macros.h
arch/riscv/include/asm/alternative.h
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/atomic.h
arch/riscv/include/asm/cmpxchg.h
arch/riscv/include/asm/compat.h [new file with mode: 0644]
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/elf.h
arch/riscv/include/asm/errata_list.h
arch/riscv/include/asm/fixmap.h
arch/riscv/include/asm/hwcap.h
arch/riscv/include/asm/irq_work.h
arch/riscv/include/asm/kexec.h
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/pgtable-32.h
arch/riscv/include/asm/pgtable-64.h
arch/riscv/include/asm/pgtable-bits.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/processor.h
arch/riscv/include/asm/signal32.h [new file with mode: 0644]
arch/riscv/include/asm/syscall.h
arch/riscv/include/asm/thread_info.h
arch/riscv/include/asm/unistd.h
arch/riscv/include/asm/vdso.h
arch/riscv/include/asm/vendorid_list.h
arch/riscv/include/asm/xip_fixup.h [new file with mode: 0644]
arch/riscv/include/uapi/asm/unistd.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/alternative.c [new file with mode: 0644]
arch/riscv/kernel/compat_signal.c [new file with mode: 0644]
arch/riscv/kernel/compat_syscall_table.c [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/.gitignore [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/Makefile [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/compat_vdso.S [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/compat_vdso.lds.S [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/flush_icache.S [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh [new file with mode: 0755]
arch/riscv/kernel/compat_vdso/getcpu.S [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/note.S [new file with mode: 0644]
arch/riscv/kernel/compat_vdso/rt_sigreturn.S [new file with mode: 0644]
arch/riscv/kernel/cpu.c
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/efi.c
arch/riscv/kernel/elf_kexec.c [new file with mode: 0644]
arch/riscv/kernel/entry.S
arch/riscv/kernel/head.S
arch/riscv/kernel/machine_kexec.c
arch/riscv/kernel/machine_kexec_file.c [new file with mode: 0644]
arch/riscv/kernel/module.c
arch/riscv/kernel/process.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/reset.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/suspend_entry.S
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vdso.c
arch/riscv/kernel/vdso/vdso.S
arch/riscv/mm/fault.c
arch/riscv/mm/init.c
arch/riscv/purgatory/.gitignore [new file with mode: 0644]
arch/riscv/purgatory/Makefile [new file with mode: 0644]
arch/riscv/purgatory/entry.S [new file with mode: 0644]
arch/riscv/purgatory/purgatory.c [new file with mode: 0644]
arch/s390/Kconfig
arch/s390/Kconfig.debug
arch/s390/crypto/aes_s390.c
arch/s390/include/asm/asm-extable.h
arch/s390/include/asm/compat.h
arch/s390/include/asm/kexec.h
arch/s390/include/asm/livepatch.h [deleted file]
arch/s390/include/asm/processor.h
arch/s390/include/asm/stacktrace.h
arch/s390/include/asm/uaccess.h
arch/s390/include/asm/unistd.h
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/early.c
arch/s390/kernel/earlypgm.S [moved from arch/s390/kernel/base.S with 52% similarity]
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/perf_event.c
arch/s390/kernel/process.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/extable.c
arch/s390/mm/gmap.c
arch/s390/mm/pgtable.c
arch/sh/kernel/process_32.c
arch/sh/kernel/reboot.c
arch/sparc/Kconfig
arch/sparc/include/asm/compat.h
arch/sparc/include/asm/unistd.h
arch/sparc/include/uapi/asm/stat.h
arch/sparc/include/uapi/asm/termbits.h
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/um/Kconfig
arch/um/drivers/Kconfig
arch/um/drivers/Makefile
arch/um/drivers/chan_kern.c
arch/um/drivers/chan_user.c
arch/um/drivers/daemon_kern.c
arch/um/drivers/line.c
arch/um/drivers/line.h
arch/um/drivers/ssl.c
arch/um/drivers/stdio_console.c
arch/um/drivers/virtio_uml.c
arch/um/drivers/xterm.c
arch/um/include/asm/Kbuild
arch/um/include/asm/irq.h
arch/um/include/asm/thread_info.h
arch/um/kernel/exec.c
arch/um/kernel/process.c
arch/um/kernel/ptrace.c
arch/um/kernel/signal.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/boot/header.S
arch/x86/events/Kconfig
arch/x86/events/intel/core.c
arch/x86/include/asm/compat.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/extable.h
arch/x86/include/asm/fpu/sched.h
arch/x86/include/asm/livepatch.h [deleted file]
arch/x86/include/asm/processor.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/unistd.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/sgx/encl.c
arch/x86/kernel/cpu/sgx/encl.h
arch/x86/kernel/cpu/sgx/main.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/process.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/step.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi_thunk_64.S
arch/x86/um/ldt.c
arch/x86/xen/enlighten_pv.c
arch/xtensa/kernel/process.c
arch/xtensa/kernel/ptrace.c
arch/xtensa/kernel/signal.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-ia-ranges.c
block/blk-iolatency.c
block/blk-mq-tag.c
block/blk-mq.c
block/genhd.c
drivers/Kconfig
drivers/Makefile
drivers/accessibility/speakup/fakekey.c
drivers/accessibility/speakup/serialio.c
drivers/accessibility/speakup/speakup_acntpc.c
drivers/accessibility/speakup/speakup_acntsa.c
drivers/accessibility/speakup/speakup_apollo.c
drivers/accessibility/speakup/speakup_audptr.c
drivers/accessibility/speakup/speakup_bns.c
drivers/accessibility/speakup/speakup_decext.c
drivers/accessibility/speakup/speakup_dectlk.c
drivers/accessibility/speakup/speakup_dtlk.c
drivers/accessibility/speakup/speakup_dummy.c
drivers/accessibility/speakup/speakup_keypc.c
drivers/accessibility/speakup/speakup_ltlk.c
drivers/accessibility/speakup/speakup_soft.c
drivers/accessibility/speakup/speakup_spkout.c
drivers/accessibility/speakup/speakup_txprt.c
drivers/acpi/ac.c
drivers/acpi/acpi_video.c
drivers/acpi/battery.c
drivers/acpi/cppc_acpi.c
drivers/acpi/dptf/dptf_pch_fivr.c
drivers/acpi/dptf/dptf_power.c
drivers/acpi/dptf/int340x_thermal.c
drivers/acpi/fan.h
drivers/acpi/glue.c
drivers/acpi/osl.c
drivers/acpi/processor_idle.c
drivers/acpi/sleep.c
drivers/amba/bus.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_internal.h
drivers/android/binderfs.c
drivers/ata/pata_palmld.c
drivers/base/Makefile
drivers/base/arch_topology.c
drivers/base/base.h
drivers/base/bus.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/driver.c
drivers/base/firmware_loader/Kconfig
drivers/base/firmware_loader/Makefile
drivers/base/firmware_loader/fallback.c
drivers/base/firmware_loader/fallback.h
drivers/base/firmware_loader/firmware.h
drivers/base/firmware_loader/main.c
drivers/base/firmware_loader/sysfs.c [new file with mode: 0644]
drivers/base/firmware_loader/sysfs.h [new file with mode: 0644]
drivers/base/firmware_loader/sysfs_upload.c [new file with mode: 0644]
drivers/base/firmware_loader/sysfs_upload.h [new file with mode: 0644]
drivers/base/physical_location.c [new file with mode: 0644]
drivers/base/physical_location.h [new file with mode: 0644]
drivers/base/platform.c
drivers/base/property.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk/main.c
drivers/block/null_blk/null_blk.h
drivers/block/null_blk/zoned.c
drivers/block/rbd.c
drivers/block/sx8.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/bus/fsl-mc/fsl-mc-bus.c
drivers/bus/mhi/Kconfig
drivers/bus/mhi/Makefile
drivers/bus/mhi/common.h
drivers/bus/mhi/ep/Kconfig [new file with mode: 0644]
drivers/bus/mhi/ep/Makefile [new file with mode: 0644]
drivers/bus/mhi/ep/internal.h [new file with mode: 0644]
drivers/bus/mhi/ep/main.c [new file with mode: 0644]
drivers/bus/mhi/ep/mmio.c [new file with mode: 0644]
drivers/bus/mhi/ep/ring.c [new file with mode: 0644]
drivers/bus/mhi/ep/sm.c [new file with mode: 0644]
drivers/bus/mhi/host/boot.c
drivers/bus/mhi/host/init.c
drivers/bus/mhi/host/internal.h
drivers/bus/mhi/host/main.c
drivers/bus/mhi/host/pci_generic.c
drivers/bus/mhi/host/pm.c
drivers/bus/ti-sysc.c
drivers/char/Kconfig
drivers/char/mem.c
drivers/char/misc.c
drivers/char/pcmcia/synclink_cs.c
drivers/char/ttyprintk.c
drivers/char/xillybus/xillybus_class.c
drivers/char/xillybus/xillyusb.c
drivers/clk/imx/clk-scu.c
drivers/clk/pxa/clk-pxa.c
drivers/clk/pxa/clk-pxa.h
drivers/clk/pxa/clk-pxa25x.c
drivers/clk/pxa/clk-pxa27x.c
drivers/clk/pxa/clk-pxa2xx.h [new file with mode: 0644]
drivers/clk/pxa/clk-pxa3xx.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/bcm_kona_timer.c
drivers/clocksource/jcore-pit.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/timer-armada-370-xp.c
drivers/clocksource/timer-digicolor.c
drivers/clocksource/timer-gxp.c [new file with mode: 0644]
drivers/clocksource/timer-ixp4xx.c
drivers/clocksource/timer-lpc32xx.c
drivers/clocksource/timer-orion.c
drivers/clocksource/timer-oxnas-rps.c
drivers/clocksource/timer-pistachio.c
drivers/clocksource/timer-riscv.c
drivers/clocksource/timer-sp804.c
drivers/clocksource/timer-sun4i.c
drivers/clocksource/timer-sun5i.c
drivers/clocksource/timer-ti-dm.c
drivers/comedi/drivers.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/mediatek-cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/pxa3xx-cpufreq.c
drivers/cpufreq/tegra194-cpufreq.c
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
drivers/crypto/virtio/virtio_crypto_common.h
drivers/crypto/virtio/virtio_crypto_core.c
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
drivers/dio/dio.c
drivers/extcon/Kconfig
drivers/extcon/extcon-axp288.c
drivers/extcon/extcon-intel-int3496.c
drivers/extcon/extcon-ptn5150.c
drivers/extcon/extcon-sm5502.c
drivers/extcon/extcon-usb-gpio.c
drivers/extcon/extcon-usbc-cros-ec.c
drivers/extcon/extcon.c
drivers/firmware/Makefile
drivers/firmware/dmi-sysfs.c
drivers/firmware/edd.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/libstub/x86-stub.c
drivers/firmware/stratix10-svc.c
drivers/firmware/xilinx/zynqmp.c
drivers/fpga/Makefile
drivers/fpga/dfl-pci.c
drivers/fpga/dfl.c
drivers/fpga/dfl.h
drivers/fpga/fpga-mgr.c
drivers/fpga/fpga-region.c
drivers/fpga/of-fpga-region.c
drivers/gpio/gpio-adp5588.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-tegra186.c
drivers/gpio/gpiolib-cdev.c
drivers/gpio/gpiolib.c
drivers/gpio/gpiolib.h
drivers/gpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
drivers/gpu/drm/amd/display/include/link_service_types.h
drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/ttm/ttm_module.c
drivers/gpu/host1x/Kconfig
drivers/gpu/host1x/Makefile
drivers/gpu/host1x/context_bus.c [new file with mode: 0644]
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/usbkbd.c
drivers/hid/usbhid/usbmouse.c
drivers/hte/Kconfig [new file with mode: 0644]
drivers/hte/Makefile [new file with mode: 0644]
drivers/hte/hte-tegra194-test.c [new file with mode: 0644]
drivers/hte/hte-tegra194.c [new file with mode: 0644]
drivers/hte/hte.c [new file with mode: 0644]
drivers/hv/vmbus_drv.c
drivers/hwtracing/coresight/coresight-core.c
drivers/hwtracing/coresight/coresight-cpu-debug.c
drivers/hwtracing/coresight/coresight-etm3x-core.c
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
drivers/hwtracing/coresight/coresight-etm4x-core.c
drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
drivers/hwtracing/coresight/coresight-etm4x.h
drivers/i2c/busses/i2c-at91-master.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-designware-amdpsp.c
drivers/i2c/busses/i2c-designware-common.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-meson.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-mt7621.c
drivers/i2c/busses/i2c-npcm7xx.c
drivers/i2c/busses/i2c-powermac.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-xiic.c
drivers/i3c/master/mipi-i3c-hci/core.c
drivers/i3c/master/svc-i3c-master.c
drivers/iio/accel/Kconfig
drivers/iio/accel/adxl355_core.c
drivers/iio/accel/adxl367.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/dmard09.c
drivers/iio/accel/fxls8962af-core.c
drivers/iio/accel/kxsd9-spi.c
drivers/iio/accel/mma8452.c
drivers/iio/accel/sca3000.c
drivers/iio/accel/ssp_accel_sensor.c
drivers/iio/accel/st_accel.h
drivers/iio/accel/st_accel_core.c
drivers/iio/accel/st_accel_i2c.c
drivers/iio/accel/st_accel_spi.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad7124.c
drivers/iio/adc/ad7192.c
drivers/iio/adc/ad7266.c
drivers/iio/adc/ad7280a.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/ina2xx-adc.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/adc/sc27xx_adc.c
drivers/iio/adc/stm32-dfsdm-adc.c
drivers/iio/adc/stmpe-adc.c
drivers/iio/adc/ti-ads1015.c
drivers/iio/adc/ti-ads8688.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/afe/Kconfig
drivers/iio/afe/iio-rescale.c
drivers/iio/buffer/kfifo_buf.c
drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
drivers/iio/common/scmi_sensors/scmi_iio.c
drivers/iio/common/ssp_sensors/ssp_spi.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/dac/Kconfig
drivers/iio/dac/ad5064.c
drivers/iio/dac/ad5360.c
drivers/iio/dac/ad5380.c
drivers/iio/dac/ad5446.c
drivers/iio/dac/ad5504.c
drivers/iio/dac/ad5624r_spi.c
drivers/iio/dac/ad5686.c
drivers/iio/dac/ad5755.c
drivers/iio/dac/ad5791.c
drivers/iio/dac/ad7303.c
drivers/iio/dac/ltc2632.c
drivers/iio/dac/ltc2688.c
drivers/iio/dac/max5821.c
drivers/iio/dac/mcp4725.c
drivers/iio/dac/stm32-dac.c
drivers/iio/dac/ti-dac082s085.c
drivers/iio/dac/ti-dac5571.c
drivers/iio/dac/ti-dac7311.c
drivers/iio/dummy/iio_simple_dummy.c
drivers/iio/dummy/iio_simple_dummy_buffer.c
drivers/iio/frequency/ad9523.c
drivers/iio/gyro/fxas21002c_core.c
drivers/iio/gyro/mpu3050-core.c
drivers/iio/gyro/mpu3050-i2c.c
drivers/iio/gyro/mpu3050.h
drivers/iio/gyro/ssp_gyro_sensor.c
drivers/iio/gyro/st_gyro_core.c
drivers/iio/health/max30100.c
drivers/iio/health/max30102.c
drivers/iio/imu/adis16480.c
drivers/iio/imu/bmi160/bmi160_core.c
drivers/iio/imu/bmi160/bmi160_i2c.c
drivers/iio/imu/bmi160/bmi160_spi.c
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
drivers/iio/imu/inv_mpu6050/Kconfig
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
drivers/iio/imu/st_lsm6dsx/Kconfig
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/iio/industrialio-event.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/Kconfig
drivers/iio/light/apds9960.c
drivers/iio/light/stk3310.c
drivers/iio/light/tsl2772.c
drivers/iio/magnetometer/Kconfig
drivers/iio/magnetometer/rm3100-core.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/multiplexer/Kconfig
drivers/iio/multiplexer/iio-mux.c
drivers/iio/pressure/st_pressure_core.c
drivers/iio/proximity/mb1232.c
drivers/iio/proximity/ping.c
drivers/iio/proximity/vl53l0x-i2c.c
drivers/iio/temperature/ltc2983.c
drivers/iio/temperature/max31856.c
drivers/iio/temperature/max31865.c
drivers/iio/trigger/iio-trig-sysfs.c
drivers/input/misc/ati_remote2.c
drivers/input/misc/cm109.c
drivers/input/misc/powermate.c
drivers/input/misc/xen-kbdfront.c
drivers/input/misc/yealink.c
drivers/input/mouse/pxa930_trkball.c
drivers/input/tablet/acecad.c
drivers/input/tablet/pegasus_notetaker.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/mainstone-wm97xx.c
drivers/input/touchscreen/wm97xx-core.c
drivers/input/touchscreen/zylonite-wm97xx.c
drivers/interconnect/qcom/Kconfig
drivers/interconnect/qcom/Makefile
drivers/interconnect/qcom/icc-rpm.c
drivers/interconnect/qcom/icc-rpm.h
drivers/interconnect/qcom/icc-rpmh.c
drivers/interconnect/qcom/icc-rpmh.h
drivers/interconnect/qcom/msm8916.c
drivers/interconnect/qcom/msm8939.c
drivers/interconnect/qcom/msm8974.c
drivers/interconnect/qcom/msm8996.c
drivers/interconnect/qcom/osm-l3.c
drivers/interconnect/qcom/qcm2290.c
drivers/interconnect/qcom/qcs404.c
drivers/interconnect/qcom/sc7180.c
drivers/interconnect/qcom/sc7280.c
drivers/interconnect/qcom/sc8180x.c
drivers/interconnect/qcom/sc8180x.h
drivers/interconnect/qcom/sc8280xp.c [new file with mode: 0644]
drivers/interconnect/qcom/sc8280xp.h [new file with mode: 0644]
drivers/interconnect/qcom/sdm660.c
drivers/interconnect/qcom/sdm845.c
drivers/interconnect/qcom/sdx55.c
drivers/interconnect/qcom/sdx65.c [new file with mode: 0644]
drivers/interconnect/qcom/sdx65.h [new file with mode: 0644]
drivers/interconnect/qcom/sm8150.c
drivers/interconnect/qcom/sm8250.c
drivers/interconnect/qcom/sm8350.c
drivers/interconnect/qcom/sm8450.c
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c
drivers/iommu/amd/iommu_v2.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/dma-iommu.c
drivers/iommu/fsl_pamu.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h
drivers/iommu/iommu.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu.h [deleted file]
drivers/iommu/mtk_iommu_v1.c
drivers/iommu/s390-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-loongson-liointc.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/flash/leds-ktd2692.c
drivers/leds/leds-is31fl32xx.c
drivers/leds/leds-locomo.c
drivers/leds/leds-lp50xx.c
drivers/leds/leds-pca9532.c
drivers/leds/leds-regulator.c
drivers/leds/rgb/Kconfig [new file with mode: 0644]
drivers/leds/rgb/Makefile [new file with mode: 0644]
drivers/leds/rgb/leds-pwm-multicolor.c [new file with mode: 0644]
drivers/leds/rgb/leds-qcom-lpg.c [new file with mode: 0644]
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/journal.c
drivers/md/bcache/journal.h
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/bcache/writeback.h
drivers/md/dm-raid.c
drivers/md/dm-table.c
drivers/md/dm-verity-target.c
drivers/md/md-linear.c
drivers/md/md-multipath.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/media/rc/ati_remote.c
drivers/media/rc/mceusb.c
drivers/media/rc/streamzap.c
drivers/media/rc/xbox_remote.c
drivers/media/usb/tm6000/tm6000-dvb.c
drivers/media/usb/tm6000/tm6000-input.c
drivers/media/usb/tm6000/tm6000-video.c
drivers/memory/emif.c
drivers/mfd/tc6393xb.c
drivers/misc/altera-stapl/altera.c
drivers/misc/bcm-vk/bcm_vk_msg.c
drivers/misc/cardreader/alcor_pci.c
drivers/misc/cardreader/rts5261.c
drivers/misc/cardreader/rtsx_usb.c
drivers/misc/fastrpc.c
drivers/misc/habanalabs/common/Makefile
drivers/misc/habanalabs/common/command_buffer.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/habanalabs/common/context.c
drivers/misc/habanalabs/common/debugfs.c
drivers/misc/habanalabs/common/device.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_drv.c
drivers/misc/habanalabs/common/habanalabs_ioctl.c
drivers/misc/habanalabs/common/irq.c
drivers/misc/habanalabs/common/memory.c
drivers/misc/habanalabs/common/memory_mgr.c [new file with mode: 0644]
drivers/misc/habanalabs/common/mmu/mmu.c
drivers/misc/habanalabs/common/mmu/mmu_v1.c
drivers/misc/habanalabs/common/pci/pci.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudiP.h
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/include/common/cpucp_if.h
drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
drivers/misc/lkdtm/bugs.c
drivers/misc/lkdtm/cfi.c
drivers/misc/lkdtm/core.c
drivers/misc/lkdtm/fortify.c
drivers/misc/lkdtm/heap.c
drivers/misc/lkdtm/lkdtm.h
drivers/misc/lkdtm/perms.c
drivers/misc/lkdtm/powerpc.c
drivers/misc/lkdtm/refcount.c
drivers/misc/lkdtm/stackleak.c
drivers/misc/lkdtm/usercopy.c
drivers/misc/mei/hdcp/mei_hdcp.c
drivers/misc/mei/pxp/mei_pxp.c
drivers/misc/pvpanic/pvpanic.c
drivers/misc/vmw_balloon.c
drivers/misc/vmw_vmci/Kconfig
drivers/misc/vmw_vmci/vmci_context.c
drivers/misc/vmw_vmci/vmci_guest.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/mmc/host/Kconfig
drivers/mmc/host/pxamci.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/ubi/fastmap-wl.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/ubi.h
drivers/mtd/ubi/vmt.c
drivers/mtd/ubi/wl.c
drivers/mtd/ubi/wl.h
drivers/net/amt.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_procfs.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
drivers/net/ethernet/intel/ice/Makefile
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_switch.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/netronome/nfp/nfdk/dp.c
drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/siena/efx_channels.c
drivers/net/ethernet/sfc/siena/net_driver.h
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ipa/ipa_endpoint.c
drivers/net/macsec.c
drivers/net/phy/at803x.c
drivers/net/phy/fixed_phy.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/rndis_host.c
drivers/net/usb/usbnet.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/marvell/libertas/host.h
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/realtek/rtw88/fw.c
drivers/net/wireless/realtek/rtw88/fw.h
drivers/net/wireless/realtek/rtw88/mac80211.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/silabs/wfx/hif_tx.c
drivers/net/wireless/silabs/wfx/main.c
drivers/net/wireless/silabs/wfx/sta.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/pci.c
drivers/nvme/target/passthru.c
drivers/nvmem/Kconfig
drivers/nvmem/Makefile
drivers/nvmem/apple-efuses.c [new file with mode: 0644]
drivers/nvmem/bcm-ocotp.c
drivers/nvmem/brcm_nvram.c
drivers/nvmem/core.c
drivers/nvmem/layerscape-sfp.c
drivers/nvmem/qfprom.c
drivers/nvmem/sunplus-ocotp.c
drivers/opp/core.c
drivers/opp/debugfs.c
drivers/opp/of.c
drivers/pci/controller/pcie-brcmstb.c
drivers/pci/of.c
drivers/pci/pci-driver.c
drivers/pci/pci-stub.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/pcie/portdrv_pci.c
drivers/pcmcia/Kconfig
drivers/pcmcia/Makefile
drivers/pcmcia/bcm63xx_pcmcia.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/pxa2xx_sharpsl.c
drivers/pcmcia/rsrc_nonstatic.c
drivers/pcmcia/sa1111_generic.c
drivers/pcmcia/sa1111_lubbock.c
drivers/pcmcia/soc_common.c
drivers/pcmcia/soc_common.h
drivers/phy/Kconfig
drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
drivers/phy/cadence/phy-cadence-sierra.c
drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
drivers/phy/freescale/phy-fsl-imx8m-pcie.c
drivers/phy/mediatek/phy-mtk-hdmi.c
drivers/phy/mediatek/phy-mtk-mipi-dsi.c
drivers/phy/phy-can-transceiver.c
drivers/phy/phy-core.c
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
drivers/phy/rockchip/phy-rockchip-typec.c
drivers/platform/mips/cpu_hwmon.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/axp288_charger.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq24190_charger.c
drivers/power/supply/bq27xxx_battery.c
drivers/power/supply/charger-manager.c
drivers/power/supply/max8997_charger.c
drivers/power/supply/power_supply_core.c
drivers/power/supply/tosa_battery.c
drivers/ptp/ptp_clockmatrix.c
drivers/pwm/Kconfig
drivers/pwm/Makefile
drivers/pwm/pwm-atmel-tcb.c
drivers/pwm/pwm-clps711x.c
drivers/pwm/pwm-cros-ec.c
drivers/pwm/pwm-lp3943.c
drivers/pwm/pwm-lpc18xx-sct.c
drivers/pwm/pwm-lpc32xx.c
drivers/pwm/pwm-mediatek.c
drivers/pwm/pwm-raspberrypi-poe.c
drivers/pwm/pwm-renesas-tpu.c
drivers/pwm/pwm-samsung.c
drivers/pwm/pwm-sifive.c
drivers/pwm/pwm-sti.c
drivers/pwm/pwm-stmpe.c
drivers/pwm/pwm-sun4i.c
drivers/pwm/pwm-sunplus.c [new file with mode: 0644]
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-twl-led.c
drivers/pwm/pwm-xilinx.c [new file with mode: 0644]
drivers/regulator/pfuze100-regulator.c
drivers/remoteproc/imx_dsp_rproc.c
drivers/remoteproc/imx_rproc.c
drivers/remoteproc/mtk_common.h
drivers/remoteproc/mtk_scp.c
drivers/remoteproc/qcom_q6v5_pas.c
drivers/remoteproc/remoteproc_cdev.c
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/remoteproc_debugfs.c
drivers/remoteproc/remoteproc_elf_loader.c
drivers/remoteproc/remoteproc_sysfs.c
drivers/rpmsg/qcom_smd.c
drivers/rpmsg/rpmsg_core.c
drivers/rpmsg/rpmsg_internal.h
drivers/rpmsg/rpmsg_ns.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-ftrtc010.c
drivers/rtc/rtc-gamecube.c
drivers/rtc/rtc-meson.c
drivers/rtc/rtc-mt6397.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-pcf85063.c
drivers/rtc/rtc-pxa.c
drivers/rtc/rtc-rx8025.c
drivers/rtc/rtc-rzn1.c [new file with mode: 0644]
drivers/rtc/rtc-sun6i.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_cp.h
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_ops.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/vfio_ap_private.h
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/esas2r/esas2r_flash.c
drivers/scsi/isci/request.c
drivers/scsi/lpfc/Makefile
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_ids.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_vmid.c [new file with mode: 0644]
drivers/scsi/mpi3mr/mpi3mr.h
drivers/scsi/mpi3mr/mpi3mr_app.c
drivers/scsi/myrb.c
drivers/scsi/pmcraid.c
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_zbc.c
drivers/scsi/sg.c
drivers/scsi/smartpqi/smartpqi.h
drivers/scsi/st.c
drivers/scsi/storvsc_drv.c
drivers/slimbus/qcom-ctrl.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/soc/Kconfig
drivers/soc/Makefile
drivers/soc/ixp4xx/ixp4xx-qmgr.c
drivers/soc/pxa/Kconfig [moved from arch/arm/plat-pxa/Kconfig with 83% similarity]
drivers/soc/pxa/Makefile [moved from arch/arm/plat-pxa/Makefile with 51% similarity]
drivers/soc/pxa/mfp.c [moved from arch/arm/plat-pxa/mfp.c with 99% similarity]
drivers/soc/pxa/ssp.c [moved from arch/arm/plat-pxa/ssp.c with 100% similarity]
drivers/soc/rockchip/grf.c
drivers/soc/tegra/pmc.c
drivers/soc/xilinx/xlnx_event_manager.c
drivers/soc/xilinx/zynqmp_power.c
drivers/soundwire/bus.c
drivers/soundwire/cadence_master.c
drivers/soundwire/intel.c
drivers/soundwire/qcom.c
drivers/soundwire/stream.c
drivers/spi/spi-fsi.c
drivers/spi/spi.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/fieldbus/anybuss/host.c
drivers/staging/greybus/arche-apb-ctrl.c
drivers/staging/greybus/arche-platform.c
drivers/staging/greybus/audio_codec.c
drivers/staging/greybus/pwm.c
drivers/staging/greybus/tools/loopback_test.c
drivers/staging/iio/cdc/ad7746.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/staging/iio/resolver/ad2s1210.c
drivers/staging/ks7010/ks_hostif.c
drivers/staging/ks7010/ks_wlan.h
drivers/staging/most/dim2/dim2.c
drivers/staging/qlge/qlge.h
drivers/staging/r8188eu/core/rtw_ap.c
drivers/staging/r8188eu/core/rtw_br_ext.c
drivers/staging/r8188eu/core/rtw_cmd.c
drivers/staging/r8188eu/core/rtw_fw.c
drivers/staging/r8188eu/core/rtw_ieee80211.c
drivers/staging/r8188eu/core/rtw_ioctl_set.c
drivers/staging/r8188eu/core/rtw_iol.c
drivers/staging/r8188eu/core/rtw_led.c
drivers/staging/r8188eu/core/rtw_mlme.c
drivers/staging/r8188eu/core/rtw_mlme_ext.c
drivers/staging/r8188eu/core/rtw_p2p.c
drivers/staging/r8188eu/core/rtw_pwrctrl.c
drivers/staging/r8188eu/core/rtw_recv.c
drivers/staging/r8188eu/core/rtw_security.c
drivers/staging/r8188eu/core/rtw_sta_mgt.c
drivers/staging/r8188eu/core/rtw_wlan_util.c
drivers/staging/r8188eu/core/rtw_xmit.c
drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
drivers/staging/r8188eu/hal/hal_com.c
drivers/staging/r8188eu/hal/odm_HWConfig.c
drivers/staging/r8188eu/hal/rtl8188e_cmd.c
drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
drivers/staging/r8188eu/hal/usb_halinit.c
drivers/staging/r8188eu/hal/usb_ops_linux.c
drivers/staging/r8188eu/include/HalVerDef.h
drivers/staging/r8188eu/include/basic_types.h
drivers/staging/r8188eu/include/drv_types.h
drivers/staging/r8188eu/include/hal_intf.h
drivers/staging/r8188eu/include/ieee80211.h
drivers/staging/r8188eu/include/odm.h
drivers/staging/r8188eu/include/osdep_service.h
drivers/staging/r8188eu/include/rtl8188e_hal.h
drivers/staging/r8188eu/include/rtl8188e_spec.h
drivers/staging/r8188eu/include/rtw_debug.h [deleted file]
drivers/staging/r8188eu/include/rtw_eeprom.h
drivers/staging/r8188eu/include/rtw_fw.h
drivers/staging/r8188eu/include/rtw_ioctl.h
drivers/staging/r8188eu/include/rtw_mlme.h
drivers/staging/r8188eu/include/rtw_mlme_ext.h
drivers/staging/r8188eu/include/rtw_pwrctrl.h
drivers/staging/r8188eu/include/rtw_recv.h
drivers/staging/r8188eu/include/rtw_xmit.h
drivers/staging/r8188eu/include/sta_info.h
drivers/staging/r8188eu/include/usb_ops.h
drivers/staging/r8188eu/include/usb_osintf.h
drivers/staging/r8188eu/include/usb_vendor_req.h [deleted file]
drivers/staging/r8188eu/include/wifi.h
drivers/staging/r8188eu/os_dep/ioctl_linux.c
drivers/staging/r8188eu/os_dep/mlme_linux.c
drivers/staging/r8188eu/os_dep/os_intfs.c
drivers/staging/r8188eu/os_dep/osdep_service.c
drivers/staging/r8188eu/os_dep/usb_intf.c
drivers/staging/r8188eu/os_dep/usb_ops_linux.c
drivers/staging/r8188eu/os_dep/xmit_linux.c
drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
drivers/staging/rtl8192e/rtl819x_BAProc.c
drivers/staging/rtl8192e/rtllib.h
drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
drivers/staging/rtl8192e/rtllib_crypt_tkip.c
drivers/staging/rtl8192e/rtllib_rx.c
drivers/staging/rtl8192e/rtllib_softmac.c
drivers/staging/rtl8192e/rtllib_softmac_wx.c
drivers/staging/rtl8192e/rtllib_wx.c
drivers/staging/rtl8192u/ieee80211/ieee80211.h
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
drivers/staging/rtl8192u/r8192U_core.c
drivers/staging/rtl8712/drv_types.h
drivers/staging/rtl8712/ieee80211.c
drivers/staging/rtl8712/os_intfs.c
drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h
drivers/staging/rtl8712/rtl8712_efuse.h
drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h
drivers/staging/rtl8712/rtl8712_macsetting_regdef.h
drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h
drivers/staging/rtl8712/rtl8712_recv.c
drivers/staging/rtl8712/rtl8712_security_bitdef.h
drivers/staging/rtl8712/rtl8712_spec.h
drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
drivers/staging/rtl8712/rtl8712_syscfg_regdef.h
drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h
drivers/staging/rtl8712/rtl8712_wmac_bitdef.h
drivers/staging/rtl8712/rtl871x_cmd.c
drivers/staging/rtl8712/rtl871x_cmd.h
drivers/staging/rtl8712/rtl871x_ioctl.h
drivers/staging/rtl8712/rtl871x_ioctl_linux.c
drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
drivers/staging/rtl8712/rtl871x_ioctl_set.c
drivers/staging/rtl8712/rtl871x_mlme.c
drivers/staging/rtl8712/rtl871x_mp_ioctl.h
drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
drivers/staging/rtl8712/rtl871x_recv.c
drivers/staging/rtl8712/rtl871x_security.c
drivers/staging/rtl8712/sta_info.h
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rtl8712/usb_ops.c
drivers/staging/rtl8712/usb_ops_linux.c
drivers/staging/rtl8712/wifi.h
drivers/staging/rtl8712/xmit_linux.c
drivers/staging/rtl8723bs/core/rtw_ap.c
drivers/staging/rtl8723bs/core/rtw_cmd.c
drivers/staging/rtl8723bs/core/rtw_efuse.c
drivers/staging/rtl8723bs/core/rtw_ieee80211.c
drivers/staging/rtl8723bs/core/rtw_mlme.c
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
drivers/staging/rtl8723bs/core/rtw_rf.c
drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
drivers/staging/rtl8723bs/hal/hal_btcoex.c
drivers/staging/rtl8723bs/hal/sdio_ops.c
drivers/staging/rtl8723bs/include/HalVerDef.h
drivers/staging/rtl8723bs/include/drv_types.h
drivers/staging/rtl8723bs/include/hal_com_reg.h
drivers/staging/rtl8723bs/include/rtw_ioctl.h [deleted file]
drivers/staging/rtl8723bs/os_dep/os_intfs.c
drivers/staging/rts5208/rtsx_transport.c
drivers/staging/sm750fb/sm750_hw.c
drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset [deleted file]
drivers/staging/unisys/Documentation/overview.txt [deleted file]
drivers/staging/unisys/Kconfig [deleted file]
drivers/staging/unisys/MAINTAINERS [deleted file]
drivers/staging/unisys/Makefile [deleted file]
drivers/staging/unisys/TODO [deleted file]
drivers/staging/unisys/include/iochannel.h [deleted file]
drivers/staging/unisys/visorhba/Kconfig [deleted file]
drivers/staging/unisys/visorhba/Makefile [deleted file]
drivers/staging/unisys/visorhba/visorhba_main.c [deleted file]
drivers/staging/unisys/visorinput/Kconfig [deleted file]
drivers/staging/unisys/visorinput/Makefile [deleted file]
drivers/staging/unisys/visorinput/visorinput.c [deleted file]
drivers/staging/unisys/visornic/Kconfig [deleted file]
drivers/staging/unisys/visornic/Makefile [deleted file]
drivers/staging/unisys/visornic/visornic_main.c [deleted file]
drivers/staging/vc04_services/Kconfig
drivers/staging/vc04_services/bcm2835-audio/Kconfig
drivers/staging/vc04_services/bcm2835-audio/TODO [deleted file]
drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
drivers/staging/vc04_services/bcm2835-camera/Kconfig
drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
drivers/staging/vc04_services/bcm2835-camera/controls.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
drivers/staging/vme_user/Kconfig [moved from drivers/staging/vme/devices/Kconfig with 93% similarity]
drivers/staging/vme_user/Makefile [moved from drivers/staging/vme/devices/Makefile with 100% similarity]
drivers/staging/vme_user/vme_user.c [moved from drivers/staging/vme/devices/vme_user.c with 99% similarity]
drivers/staging/vme_user/vme_user.h [moved from drivers/staging/vme/devices/vme_user.h with 100% similarity]
drivers/staging/vt6655/baseband.c
drivers/staging/vt6655/card.c
drivers/staging/vt6655/card.h
drivers/staging/vt6655/channel.c
drivers/staging/vt6655/device_main.c
drivers/staging/vt6655/key.c
drivers/staging/vt6655/mac.c
drivers/staging/vt6655/mac.h
drivers/staging/vt6655/rf.c
drivers/staging/vt6655/rxtx.c
drivers/staging/vt6655/srom.c
drivers/staging/vt6655/tmacro.h [deleted file]
drivers/staging/vt6655/upc.h
drivers/staging/vt6656/channel.c
drivers/staging/vt6656/rf.c
drivers/staging/wlan-ng/cfg80211.c
drivers/staging/wlan-ng/hfa384x.h
drivers/staging/wlan-ng/hfa384x_usb.c
drivers/staging/wlan-ng/prism2usb.c
drivers/target/target_core_pscsi.c
drivers/tee/optee/call.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/intel/int340x_thermal/int3403_thermal.c
drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
drivers/thunderbolt/ctl.c
drivers/thunderbolt/domain.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/path.c
drivers/thunderbolt/switch.c
drivers/thunderbolt/tb.c
drivers/thunderbolt/tb.h
drivers/thunderbolt/tb_msgs.h
drivers/thunderbolt/tb_regs.h
drivers/thunderbolt/test.c
drivers/thunderbolt/tunnel.c
drivers/thunderbolt/tunnel.h
drivers/thunderbolt/usb4_port.c
drivers/thunderbolt/xdomain.c
drivers/tty/amiserial.c
drivers/tty/goldfish.c
drivers/tty/hvc/Kconfig
drivers/tty/hvc/hvc_dcc.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/hvc/hvc_vio.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/hvc/hvcs.c
drivers/tty/hvc/hvsi.c
drivers/tty/mxser.c
drivers/tty/n_gsm.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_aspeed_vuart.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dma.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_dwlib.c
drivers/tty/serial/8250/8250_dwlib.h
drivers/tty/serial/8250/8250_fintek.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/8250/8250_of.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/8250_pxa.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/Kconfig
drivers/tty/serial/altera_jtaguart.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/amba-pl011.h [deleted file]
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/cpm_uart/cpm_uart.h
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
drivers/tty/serial/digicolor-usart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/icom.c
drivers/tty/serial/icom.h [deleted file]
drivers/tty/serial/imx.c
drivers/tty/serial/jsm/jsm_cls.c
drivers/tty/serial/jsm/jsm_neo.c
drivers/tty/serial/max310x.c
drivers/tty/serial/men_z135_uart.c
drivers/tty/serial/meson_uart.c
drivers/tty/serial/mpc52xx_uart.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/omap-serial.c
drivers/tty/serial/owl-uart.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/pic32_uart.c
drivers/tty/serial/pic32_uart.h [deleted file]
drivers/tty/serial/pmac_zilog.c
drivers/tty/serial/pmac_zilog.h
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/rda-uart.c
drivers/tty/serial/sa1100.c
drivers/tty/serial/samsung_tty.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_txx9.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sifive.c
drivers/tty/serial/st-asc.c
drivers/tty/serial/stm32-usart.c
drivers/tty/serial/stm32-usart.h
drivers/tty/serial/sunplus-uart.c
drivers/tty/serial/sunsu.c
drivers/tty/serial/uartlite.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/serial/zs.c
drivers/tty/synclink_gt.c
drivers/tty/sysrq.c
drivers/tty/tty_baudrate.c
drivers/tty/tty_ioctl.c
drivers/tty/tty_jobctrl.c
drivers/ufs/Kconfig [new file with mode: 0644]
drivers/ufs/Makefile [new file with mode: 0644]
drivers/ufs/core/Kconfig [new file with mode: 0644]
drivers/ufs/core/Makefile [new file with mode: 0644]
drivers/ufs/core/ufs-debugfs.c [moved from drivers/scsi/ufs/ufs-debugfs.c with 99% similarity]
drivers/ufs/core/ufs-debugfs.h [moved from drivers/scsi/ufs/ufs-debugfs.h with 100% similarity]
drivers/ufs/core/ufs-fault-injection.c [moved from drivers/scsi/ufs/ufs-fault-injection.c with 100% similarity]
drivers/ufs/core/ufs-fault-injection.h [moved from drivers/scsi/ufs/ufs-fault-injection.h with 100% similarity]
drivers/ufs/core/ufs-hwmon.c [moved from drivers/scsi/ufs/ufs-hwmon.c with 99% similarity]
drivers/ufs/core/ufs-sysfs.c [moved from drivers/scsi/ufs/ufs-sysfs.c with 99% similarity]
drivers/ufs/core/ufs-sysfs.h [moved from drivers/scsi/ufs/ufs-sysfs.h with 100% similarity]
drivers/ufs/core/ufs_bsg.c [moved from drivers/scsi/ufs/ufs_bsg.c with 99% similarity]
drivers/ufs/core/ufs_bsg.h [moved from drivers/scsi/ufs/ufs_bsg.h with 100% similarity]
drivers/ufs/core/ufshcd-crypto.c [moved from drivers/scsi/ufs/ufshcd-crypto.c with 99% similarity]
drivers/ufs/core/ufshcd-crypto.h [moved from drivers/scsi/ufs/ufshcd-crypto.h with 97% similarity]
drivers/ufs/core/ufshcd-priv.h [moved from drivers/scsi/ufs/ufshcd-priv.h with 99% similarity]
drivers/ufs/core/ufshcd.c [moved from drivers/scsi/ufs/ufshcd.c with 99% similarity]
drivers/ufs/core/ufshpb.c [moved from drivers/scsi/ufs/ufshpb.c with 99% similarity]
drivers/ufs/core/ufshpb.h [moved from drivers/scsi/ufs/ufshpb.h with 100% similarity]
drivers/ufs/host/Kconfig [moved from drivers/scsi/ufs/Kconfig with 56% similarity]
drivers/ufs/host/Makefile [moved from drivers/scsi/ufs/Makefile with 56% similarity]
drivers/ufs/host/cdns-pltfrm.c [moved from drivers/scsi/ufs/cdns-pltfrm.c with 100% similarity]
drivers/ufs/host/tc-dwc-g210-pci.c [moved from drivers/scsi/ufs/tc-dwc-g210-pci.c with 99% similarity]
drivers/ufs/host/tc-dwc-g210-pltfrm.c [moved from drivers/scsi/ufs/tc-dwc-g210-pltfrm.c with 100% similarity]
drivers/ufs/host/tc-dwc-g210.c [moved from drivers/scsi/ufs/tc-dwc-g210.c with 99% similarity]
drivers/ufs/host/tc-dwc-g210.h [moved from drivers/scsi/ufs/tc-dwc-g210.h with 100% similarity]
drivers/ufs/host/ti-j721e-ufs.c [moved from drivers/scsi/ufs/ti-j721e-ufs.c with 100% similarity]
drivers/ufs/host/ufs-exynos.c [moved from drivers/scsi/ufs/ufs-exynos.c with 99% similarity]
drivers/ufs/host/ufs-exynos.h [moved from drivers/scsi/ufs/ufs-exynos.h with 100% similarity]
drivers/ufs/host/ufs-hisi.c [moved from drivers/scsi/ufs/ufs-hisi.c with 99% similarity]
drivers/ufs/host/ufs-hisi.h [moved from drivers/scsi/ufs/ufs-hisi.h with 100% similarity]
drivers/ufs/host/ufs-mediatek-trace.h [moved from drivers/scsi/ufs/ufs-mediatek-trace.h with 93% similarity]
drivers/ufs/host/ufs-mediatek.c [moved from drivers/scsi/ufs/ufs-mediatek.c with 99% similarity]
drivers/ufs/host/ufs-mediatek.h [moved from drivers/scsi/ufs/ufs-mediatek.h with 100% similarity]
drivers/ufs/host/ufs-qcom-ice.c [moved from drivers/scsi/ufs/ufs-qcom-ice.c with 100% similarity]
drivers/ufs/host/ufs-qcom.c [moved from drivers/scsi/ufs/ufs-qcom.c with 99% similarity]
drivers/ufs/host/ufs-qcom.h [moved from drivers/scsi/ufs/ufs-qcom.h with 99% similarity]
drivers/ufs/host/ufshcd-dwc.c [moved from drivers/scsi/ufs/ufshcd-dwc.c with 98% similarity]
drivers/ufs/host/ufshcd-dwc.h [moved from drivers/scsi/ufs/ufshcd-dwc.h with 95% similarity]
drivers/ufs/host/ufshcd-pci.c [moved from drivers/scsi/ufs/ufshcd-pci.c with 99% similarity]
drivers/ufs/host/ufshcd-pltfrm.c [moved from drivers/scsi/ufs/ufshcd-pltfrm.c with 99% similarity]
drivers/ufs/host/ufshcd-pltfrm.h [moved from drivers/scsi/ufs/ufshcd-pltfrm.h with 98% similarity]
drivers/ufs/host/ufshci-dwc.h [moved from drivers/scsi/ufs/ufshci-dwc.h with 100% similarity]
drivers/uio/uio_dfl.c
drivers/usb/atm/usbatm.c
drivers/usb/c67x00/c67x00-drv.c
drivers/usb/c67x00/c67x00-sched.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/cdns3-gadget.h
drivers/usb/class/cdc-acm.h
drivers/usb/core/devices.c
drivers/usb/core/driver.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/core/usb-acpi.c
drivers/usb/dwc2/core.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/Makefile
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/drd.c
drivers/usb/dwc3/dwc3-am62.c [new file with mode: 0644]
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-xilinx.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/gadget.h
drivers/usb/dwc3/host.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_acm.c
drivers/usb/gadget/function/f_uvc.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/function/u_uvc.h
drivers/usb/gadget/function/uvc.h
drivers/usb/gadget/function/uvc_configfs.c
drivers/usb/gadget/function/uvc_configfs.h
drivers/usb/gadget/function/uvc_queue.c
drivers/usb/gadget/function/uvc_queue.h
drivers/usb/gadget/function/uvc_video.c
drivers/usb/gadget/legacy/dbgp.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/legacy/raw_gadget.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/gadget/udc/omap_udc.c
drivers/usb/gadget/udc/pxa25x_udc.c
drivers/usb/gadget/udc/pxa25x_udc.h
drivers/usb/gadget/udc/pxa27x_udc.h
drivers/usb/gadget/udc/s3c-hsudc.c
drivers/usb/gadget/udc/tegra-xudc.c
drivers/usb/gadget/udc/udc-xilinx.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-platform.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/fhci-hcd.c
drivers/usb/host/fotg210-hcd.c
drivers/usb/host/isp116x-hcd.c
drivers/usb/host/isp1362-hcd.c
drivers/usb/host/max3421-hcd.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-omap.c
drivers/usb/host/ohci-platform.c
drivers/usb/host/ohci-ppc-of.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/oxu210hp-hcd.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/sl811-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/isp1760/isp1760-core.c
drivers/usb/isp1760/isp1760-hcd.c
drivers/usb/misc/ftdi-elan.c
drivers/usb/misc/lvstest.c
drivers/usb/musb/mediatek.c
drivers/usb/musb/omap2430.c
drivers/usb/phy/phy-omap-otg.c
drivers/usb/serial/ark3116.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/whiteheat.c
drivers/usb/storage/alauda.c
drivers/usb/storage/isd200.c
drivers/usb/storage/karma.c
drivers/usb/storage/onetouch.c
drivers/usb/storage/shuttle_usbat.c
drivers/usb/storage/transport.c
drivers/usb/typec/bus.c
drivers/usb/typec/mux.c
drivers/usb/typec/mux.h
drivers/usb/typec/mux/Kconfig
drivers/usb/typec/mux/Makefile
drivers/usb/typec/mux/fsa4480.c [new file with mode: 0644]
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/typec/mux/pi3usb30532.c
drivers/usb/typec/tcpm/fusb302.c
drivers/usb/typec/tipd/core.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi.h
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/usb/usbip/stub_dev.c
drivers/usb/usbip/stub_rx.c
drivers/vdpa/alibaba/eni_vdpa.c
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/vdpa_sim/vdpa_sim.h
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vfio/fsl-mc/vfio_fsl_mc.c
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
drivers/vfio/pci/mlx5/cmd.c
drivers/vfio/pci/mlx5/cmd.h
drivers/vfio/pci/mlx5/main.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/pci/vfio_pci_core.c
drivers/vfio/platform/vfio_amba.c
drivers/vfio/platform/vfio_platform.c
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/iotlb.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/test.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/vhost/vsock.c
drivers/video/console/sticon.c
drivers/video/console/sticore.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/hyperv_fb.c
drivers/video/fbdev/omap/omapfb.h
drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
drivers/video/fbdev/pxa3xx-gcu.c
drivers/video/fbdev/pxa3xx-regs.h [moved from arch/arm/mach-pxa/include/mach/regs-lcd.h with 90% similarity]
drivers/video/fbdev/pxafb.c
drivers/video/fbdev/sticore.h
drivers/video/fbdev/stifb.c
drivers/video/fbdev/vesafb.c
drivers/video/fbdev/xen-fbfront.c
drivers/virt/fsl_hypervisor.c
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/virtio/virtio_ring.c
drivers/virtio/virtio_vdpa.c
drivers/visorbus/Kconfig [deleted file]
drivers/visorbus/Makefile [deleted file]
drivers/visorbus/controlvmchannel.h [deleted file]
drivers/visorbus/vbuschannel.h [deleted file]
drivers/visorbus/visorbus_main.c [deleted file]
drivers/visorbus/visorbus_private.h [deleted file]
drivers/visorbus/visorchannel.c [deleted file]
drivers/visorbus/visorchipset.c [deleted file]
drivers/vme/Kconfig
drivers/w1/masters/ds2490.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/bcm7038_wdt.c
drivers/watchdog/da9063_wdt.c
drivers/watchdog/gxp-wdt.c [new file with mode: 0644]
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/mtk_wdt.c
drivers/watchdog/rti_wdt.c
drivers/watchdog/rzg2l_wdt.c
drivers/watchdog/rzn1_wdt.c [new file with mode: 0644]
drivers/watchdog/sa1100_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/sunplus_wdt.c [new file with mode: 0644]
drivers/watchdog/ts4800_wdt.c
drivers/watchdog/wdat_wdt.c
drivers/xen/gntalloc.c
drivers/xen/gntdev-dmabuf.c
drivers/xen/grant-table.c
drivers/xen/pvcalls-front.c
drivers/xen/xen-front-pgdir-shbuf.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_probe.c
fs/Kconfig.binfmt
fs/afs/dir.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/quota.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/Makefile
fs/cifs/cifs_swn.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smbdirect.c
fs/cifs/transport.c
fs/erofs/fscache.c
fs/erofs/inode.c
fs/erofs/zdata.c
fs/erofs/zdata.h
fs/exec.c
fs/exportfs/expfs.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/hash.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/verity.c
fs/file.c
fs/file_table.c
fs/freevxfs/vxfs.h
fs/freevxfs/vxfs_bmap.c
fs/freevxfs/vxfs_dir.h
fs/freevxfs/vxfs_extern.h
fs/freevxfs/vxfs_fshead.c
fs/freevxfs/vxfs_fshead.h
fs/freevxfs/vxfs_immed.c
fs/freevxfs/vxfs_inode.c
fs/freevxfs/vxfs_inode.h
fs/freevxfs/vxfs_lookup.c
fs/freevxfs/vxfs_olt.c
fs/freevxfs/vxfs_olt.h
fs/freevxfs/vxfs_subr.c
fs/freevxfs/vxfs_super.c
fs/fsopen.c
fs/internal.h
fs/io_uring.c
fs/jffs2/erase.c
fs/jffs2/fs.c
fs/kernfs/dir.c
fs/kernfs/file.c
fs/ksmbd/connection.c
fs/ksmbd/connection.h
fs/ksmbd/ksmbd_netlink.h
fs/ksmbd/misc.c
fs/ksmbd/smb2misc.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb_common.c
fs/ksmbd/smbacl.c
fs/ksmbd/transport_ipc.c
fs/ksmbd/transport_rdma.c
fs/ksmbd/transport_rdma.h
fs/namei.c
fs/namespace.c
fs/nfs/file.c
fs/nfs/filelayout/filelayout.c
fs/nfs/fscache.c
fs/nfs/internal.h
fs/nfs/nfs4file.c
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/unlink.c
fs/nfs/write.c
fs/ntfs3/file.c
fs/ntfs3/frecord.c
fs/ntfs3/fslog.c
fs/ntfs3/inode.c
fs/ntfs3/xattr.c
fs/open.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/read_write.c
fs/stat.c
fs/sync.c
fs/ubifs/budget.c
fs/ubifs/xattr.c
fs/xfs/libxfs/xfs_ag.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr.h
fs/xfs/libxfs/xfs_attr_remote.c
fs/xfs/libxfs/xfs_attr_remote.h
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_da_btree.c
fs/xfs/libxfs/xfs_da_btree.h
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/libxfs/xfs_log_recover.h
fs/xfs/libxfs/xfs_symlink_remote.c
fs/xfs/scrub/scrub.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_attr_item.c
fs/xfs/xfs_attr_item.h
fs/xfs/xfs_buf_item_recover.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_message.h
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
fs/xfs/xfs_super.c
fs/xfs/xfs_super.h
fs/xfs/xfs_xattr.c
fs/xfs/xfs_xattr.h [new file with mode: 0644]
include/asm-generic/compat.h
include/clocksource/timer-xilinx.h [new file with mode: 0644]
include/drm/drm_cache.h
include/dt-bindings/interconnect/qcom,sc8180x.h
include/dt-bindings/interconnect/qcom,sc8280xp.h [new file with mode: 0644]
include/dt-bindings/interconnect/qcom,sdx65.h [new file with mode: 0644]
include/dt-bindings/memory/mt8186-memory-port.h [new file with mode: 0644]
include/dt-bindings/memory/mt8195-memory-port.h [new file with mode: 0644]
include/dt-bindings/memory/mtk-memory-port.h
include/dt-bindings/mfd/cros_ec.h [new file with mode: 0644]
include/dt-bindings/reset/mt7986-resets.h [new file with mode: 0644]
include/dt-bindings/reset/mt8186-resets.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/amba/bus.h
include/linux/bitmap.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/clk/pxa.h [new file with mode: 0644]
include/linux/compat.h
include/linux/context_tracking_state.h
include/linux/cpuhotplug.h
include/linux/delayacct.h
include/linux/device.h
include/linux/device/bus.h
include/linux/device/driver.h
include/linux/export.h
include/linux/extcon.h
include/linux/fdtable.h
include/linux/file.h
include/linux/find.h
include/linux/firmware.h
include/linux/firmware/xlnx-event-manager.h
include/linux/firmware/xlnx-zynqmp.h
include/linux/fpga/fpga-region.h
include/linux/fs.h
include/linux/fsl/mc.h
include/linux/gpio/consumer.h
include/linux/gpio/driver.h
include/linux/gpio/machine.h
include/linux/host1x_context_bus.h [new file with mode: 0644]
include/linux/hte.h [new file with mode: 0644]
include/linux/hyperv.h
include/linux/iio/adc/ad_sigma_delta.h
include/linux/iio/common/st_sensors.h
include/linux/iio/iio-opaque.h
include/linux/iio/iio.h
include/linux/iio/kfifo_buf.h
include/linux/intel-iommu.h
include/linux/intel-svm.h
include/linux/iommu.h
include/linux/ipc_namespace.h
include/linux/ipv6.h
include/linux/jump_label.h
include/linux/kexec.h
include/linux/livepatch.h
include/linux/mfd/tc6393xb.h
include/linux/mhi_ep.h [new file with mode: 0644]
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mlx5_ifc_vdpa.h
include/linux/mod_devicetable.h
include/linux/mount.h
include/linux/namei.h
include/linux/nfs4.h
include/linux/nfs_fs_sb.h
include/linux/nfs_xdr.h
include/linux/nodemask.h
include/linux/notifier.h
include/linux/nvme-fc-driver.h
include/linux/nvmem-consumer.h
include/linux/pci.h
include/linux/phy/phy-lvds.h [new file with mode: 0644]
include/linux/phy/phy.h
include/linux/platform_data/asoc-poodle.h [new file with mode: 0644]
include/linux/platform_data/asoc-pxa.h [moved from arch/arm/mach-pxa/include/mach/audio.h with 93% similarity]
include/linux/platform_data/timer-ixp4xx.h [deleted file]
include/linux/platform_data/video-pxafb.h
include/linux/platform_device.h
include/linux/pm.h
include/linux/pm_opp.h
include/linux/property.h
include/linux/ptrace.h
include/linux/reboot.h
include/linux/rpmsg.h
include/linux/rtsx_pci.h
include/linux/sched.h
include/linux/sched/jobctl.h
include/linux/sched/signal.h
include/linux/sched/task.h
include/linux/serial_core.h
include/linux/serial_s3c.h
include/linux/signal.h
include/linux/siphash.h
include/linux/skbuff.h
include/linux/soc/pxa/cpu.h [moved from arch/arm/mach-pxa/include/mach/hardware.h with 75% similarity]
include/linux/soc/pxa/mfp.h [moved from arch/arm/plat-pxa/include/plat/mfp.h with 98% similarity]
include/linux/soc/pxa/smemc.h [new file with mode: 0644]
include/linux/spi/spi.h
include/linux/thunderbolt.h
include/linux/usb.h
include/linux/usb/gadget.h
include/linux/usb/hcd.h
include/linux/usb/typec_mux.h
include/linux/vdpa.h
include/linux/vfio.h
include/linux/vfio_pci_core.h
include/linux/vhost_iotlb.h
include/linux/virtio.h
include/linux/virtio_config.h
include/linux/wm97xx.h
include/net/amt.h
include/net/ax25.h
include/net/bluetooth/hci_core.h
include/net/bonding.h
include/net/netfilter/nf_conntrack_core.h
include/net/sch_generic.h
include/pcmcia/soc_common.h [new file with mode: 0644]
include/sound/pxa2xx-lib.h
include/trace/events/f2fs.h
include/trace/events/thermal_pressure.h [new file with mode: 0644]
include/uapi/asm-generic/fcntl.h
include/uapi/asm-generic/termbits-common.h [new file with mode: 0644]
include/uapi/asm-generic/termbits.h
include/uapi/asm-generic/unistd.h
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/android/binder.h
include/uapi/linux/audit.h
include/uapi/linux/elf-em.h
include/uapi/linux/elf.h
include/uapi/linux/gpio.h
include/uapi/linux/io_uring.h
include/uapi/linux/ipv6.h
include/uapi/linux/kexec.h
include/uapi/linux/socket.h
include/uapi/linux/taskstats.h
include/uapi/linux/vdpa.h
include/uapi/linux/vfio.h
include/uapi/linux/vhost.h
include/uapi/linux/vhost_types.h
include/uapi/misc/habanalabs.h
include/ufs/ufs.h [moved from drivers/scsi/ufs/ufs.h with 100% similarity]
include/ufs/ufs_quirks.h [moved from drivers/scsi/ufs/ufs_quirks.h with 100% similarity]
include/ufs/ufshcd.h [moved from drivers/scsi/ufs/ufshcd.h with 99% similarity]
include/ufs/ufshci.h [moved from drivers/scsi/ufs/ufshci.h with 100% similarity]
include/ufs/unipro.h [moved from drivers/scsi/ufs/unipro.h with 100% similarity]
include/video/radeon.h
include/xen/arm/page.h
include/xen/grant_table.h
init/Kconfig
init/initramfs.c
init/main.c
ipc/ipc_sysctl.c
ipc/mq_sysctl.c
ipc/mqueue.c
ipc/namespace.c
kernel/bpf/core.c
kernel/delayacct.c
kernel/dma/debug.c
kernel/dma/swiotlb.c
kernel/events/core.c
kernel/fork.c
kernel/kexec_file.c
kernel/livepatch/patch.c
kernel/module/signing.c
kernel/notifier.c
kernel/printk/printk.c
kernel/ptrace.c
kernel/reboot.c
kernel/sched/autogroup.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/signal.c
kernel/time/posix-cpu-timers.c
kernel/trace/fgraph.c
kernel/umh.c
kernel/usermode_driver.c
lib/assoc_array.c
lib/bitmap.c
lib/nodemask.c
lib/siphash.c
lib/test_bitmap.c
lib/test_firmware.c
lib/test_siphash.c
mm/fadvise.c
mm/hugetlb.c
mm/hugetlb_vmemmap.c
mm/memory.c
mm/memremap.c
mm/oom_kill.c
mm/page_isolation.c
mm/readahead.c
mm/vmstat.c
net/9p/trans_xen.c
net/Kconfig.debug
net/ax25/af_ax25.c
net/ax25/ax25_dev.c
net/ax25/ax25_subr.c
net/bluetooth/hci_core.c
net/bluetooth/hci_request.c
net/bluetooth/hci_sync.c
net/bluetooth/mgmt.c
net/ceph/crush/mapper.c
net/core/neighbour.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/ndisc.c
net/ipv6/ping.c
net/key/af_key.c
net/mac80211/chan.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_limit.c
net/nfc/core.c
net/packet/af_packet.c
net/sched/act_ct.c
net/smc/af_smc.c
net/smc/smc_cdc.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/tipc/bearer.c
net/xfrm/xfrm_output.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.lib
scripts/Makefile.modfinal
scripts/Makefile.modpost
scripts/Makefile.vmlinux_o [new file with mode: 0644]
scripts/check-local-export [new file with mode: 0755]
scripts/get_abi.pl
scripts/kallsyms.c
scripts/kconfig/nconf.c
scripts/link-vmlinux.sh
scripts/mod/file2alias.c
scripts/mod/modpost.c
scripts/mod/modpost.h
scripts/objdiff
scripts/sorttable.c
scripts/spdxcheck-test.sh
scripts/spdxcheck.py
scripts/spdxexclude [new file with mode: 0644]
scripts/subarch.include
scripts/tags.sh
security/smack/smackfs.c
sound/arm/pxa2xx-ac97-lib.c
sound/arm/pxa2xx-ac97-regs.h [moved from arch/arm/mach-pxa/include/mach/regs-ac97.h with 71% similarity]
sound/arm/pxa2xx-ac97.c
sound/core/Makefile
sound/isa/Kconfig
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/amd/acp/acp-pci.c
sound/soc/codecs/da7219-aad.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5640.h
sound/soc/fsl/fsl_sai.h
sound/soc/intel/avs/board_selection.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/pxa/corgi.c
sound/soc/pxa/e740_wm9705.c
sound/soc/pxa/e750_wm9705.c
sound/soc/pxa/e800_wm9712.c
sound/soc/pxa/em-x270.c
sound/soc/pxa/hx4700.c
sound/soc/pxa/magician.c
sound/soc/pxa/mioa701_wm9713.c
sound/soc/pxa/palm27x.c
sound/soc/pxa/poodle.c
sound/soc/pxa/pxa2xx-ac97.c
sound/soc/pxa/pxa2xx-i2s.c
sound/soc/pxa/spitz.c
sound/soc/pxa/tosa.c
sound/soc/pxa/z2.c
sound/soc/soc-pcm.c
sound/usb/clock.c
sound/usb/line6/pcm.c
sound/usb/midi.c
sound/usb/mixer_maps.c
sound/usb/usx2y/usb_stream.c
sound/usb/usx2y/usbusx2yaudio.c
sound/usb/usx2y/usx2yhwdeppcm.c
tools/accounting/getdelays.c
tools/gpio/gpio-event-mon.c
tools/include/linux/bitmap.h
tools/include/uapi/asm-generic/fcntl.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/asm/bitsperlong.h
tools/lib/bitmap.c
tools/objtool/check.c
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-top.txt
tools/perf/arch/arm64/util/mem-events.c
tools/perf/arch/x86/util/evsel.c
tools/perf/arch/x86/util/evsel.h [new file with mode: 0644]
tools/perf/arch/x86/util/topdown.c
tools/perf/builtin-c2c.c
tools/perf/builtin-lock.c
tools/perf/pmu-events/arch/s390/cf_z10/basic.json
tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
tools/perf/pmu-events/arch/s390/cf_z10/extended.json
tools/perf/pmu-events/arch/s390/cf_z13/basic.json
tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
tools/perf/pmu-events/arch/s390/cf_z13/extended.json
tools/perf/pmu-events/arch/s390/cf_z14/basic.json
tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
tools/perf/pmu-events/arch/s390/cf_z14/extended.json
tools/perf/pmu-events/arch/s390/cf_z15/basic.json
tools/perf/pmu-events/arch/s390/cf_z15/crypto.json [deleted file]
tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
tools/perf/pmu-events/arch/s390/cf_z15/extended.json
tools/perf/pmu-events/arch/s390/cf_z16/basic.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z16/extended.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z16/transaction.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z196/basic.json
tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
tools/perf/pmu-events/arch/s390/cf_z196/extended.json
tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
tools/perf/pmu-events/arch/s390/mapfile.csv
tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json [new file with mode: 0644]
tools/perf/tests/shell/test_arm_spe_fork.sh [new file with mode: 0755]
tools/perf/util/dso.h
tools/perf/util/unwind-libunwind-local.c
tools/testing/memblock/TODO
tools/testing/memblock/tests/basic_api.c
tools/testing/selftests/alsa/Makefile
tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
tools/testing/selftests/filesystems/binderfs/binderfs_test.c
tools/testing/selftests/firmware/Makefile
tools/testing/selftests/firmware/config
tools/testing/selftests/firmware/fw_filesystem.sh
tools/testing/selftests/firmware/fw_lib.sh
tools/testing/selftests/firmware/fw_run_tests.sh
tools/testing/selftests/firmware/fw_upload.sh [new file with mode: 0755]
tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
tools/testing/selftests/lkdtm/config
tools/testing/selftests/lkdtm/tests.txt
tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
tools/testing/selftests/net/psock_snd.c
tools/usb/testusb.c
virt/kvm/vfio.c

index 6d48493..825fae8 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -201,6 +201,8 @@ Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
 <josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
 <josh@joshtriplett.org> <josht@us.ibm.com>
 <josh@joshtriplett.org> <josht@vnet.ibm.com>
+Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@redhat.com>
+Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@us.ibm.com>
 Juha Yrjola <at solidboot.com>
 Juha Yrjola <juha.yrjola@nokia.com>
 Juha Yrjola <juha.yrjola@solidboot.com>
@@ -236,6 +238,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 <linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com>
 Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
 Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
+Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
 Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
 Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
index ecfe766..96ccc33 100644 (file)
@@ -19,3 +19,13 @@ Description: The file holds the OEM PK Hash value of the endpoint device
                read without having the device power on at least once, the file
                will read all 0's.
 Users:         Any userspace application or clients interested in device info.
+
+What:           /sys/bus/mhi/devices/.../soc_reset
+Date:           April 2022
+KernelVersion:  5.19
+Contact:        mhi@lists.linux.dev
+Description:   Initiates a SoC reset on the MHI controller.  A SoC reset is
+                a reset of last resort, and will require a complete re-init.
+                This can be useful as a method of recovery if the device is
+                non-responsive, or as a means of loading new firmware as a
+                system administration task.
index 889ed45..611b23e 100644 (file)
@@ -7,6 +7,7 @@ Description:    UVC function directory
                streaming_maxburst      0..15 (ss only)
                streaming_maxpacket     1..1023 (fs), 1..3072 (hs/ss)
                streaming_interval      1..16
+               function_name           string [32]
                ===================     =============================
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control
index bcf6915..0f8d20f 100644 (file)
@@ -170,6 +170,20 @@ KernelVersion:  5.1
 Contact:        ogabbay@kernel.org
 Description:    Sets the state of the third S/W led on the device
 
+What:           /sys/kernel/debug/habanalabs/hl<n>/memory_scrub
+Date:           May 2022
+KernelVersion:  5.19
+Contact:        dhirschfeld@habana.ai
+Description:    Allows the root user to scrub the dram memory. The scrubbing
+                value can be set using the debugfs file memory_scrub_val.
+
+What:           /sys/kernel/debug/habanalabs/hl<n>/memory_scrub_val
+Date:           May 2022
+KernelVersion:  5.19
+Contact:        dhirschfeld@habana.ai
+Description:    The value to which the dram will be set to when the user
+                scrubs the dram using 'memory_scrub' debugfs file
+
 What:           /sys/kernel/debug/habanalabs/hl<n>/mmu
 Date:           Jan 2019
 KernelVersion:  5.1
@@ -190,6 +204,30 @@ Description:    Check and display page fault or access violation mmu errors for
                 echo "0x200" > /sys/kernel/debug/habanalabs/hl0/mmu_error
                 cat /sys/kernel/debug/habanalabs/hl0/mmu_error
 
+What:           /sys/kernel/debug/habanalabs/hl<n>/monitor_dump
+Date:           Mar 2022
+KernelVersion:  5.19
+Contact:        osharabi@habana.ai
+Description:    Allows the root user to dump monitors status from the device's
+                protected config space.
+                This property is a binary blob that contains the result of the
+                monitors registers dump.
+                This custom interface is needed (instead of using the generic
+                Linux user-space PCI mapping) because this space is protected
+                and cannot be accessed using PCI read.
+                This interface doesn't support concurrency in the same device.
+                Only supported on GAUDI.
+
+What:           /sys/kernel/debug/habanalabs/hl<n>/monitor_dump_trig
+Date:           Mar 2022
+KernelVersion:  5.19
+Contact:        osharabi@habana.ai
+Description:    Triggers dump of monitor data. The value to trigger the operation
+                must be 1. Triggering the monitor dump operation initiates dump of
+                current registers values of all monitors.
+                When the write is finished, the user can read the "monitor_dump"
+                blob
+
 What:           /sys/kernel/debug/habanalabs/hl<n>/set_power_state
 Date:           Jan 2019
 KernelVersion:  5.1
index b7e87f6..f7570c2 100644 (file)
@@ -293,6 +293,16 @@ Contact:   thunderbolt-software@lists.01.org
 Description:   This contains XDomain service specific settings as
                bitmask. Format: %x
 
+What:          /sys/bus/thunderbolt/devices/usb4_portX/connector
+Date:          April 2022
+Contact:       Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+               Symlink to the USB Type-C connector. This link is only
+               created when USB Type-C Connector Class is enabled,
+               and only if the system firmware is capable of
+               describing the connection between a port and its
+               connector.
+
 What:          /sys/bus/thunderbolt/devices/usb4_portX/link
 Date:          Sep 2021
 KernelVersion: v5.14
diff --git a/Documentation/ABI/testing/sysfs-class-firmware b/Documentation/ABI/testing/sysfs-class-firmware
new file mode 100644 (file)
index 0000000..978d3d5
--- /dev/null
@@ -0,0 +1,77 @@
+What:          /sys/class/firmware/.../data
+Date:          July 2022
+KernelVersion: 5.19
+Contact:       Russ Weight <russell.h.weight@intel.com>
+Description:   The data sysfs file is used for firmware-fallback and for
+               firmware uploads. Cat a firmware image to this sysfs file
+               after you echo 1 to the loading sysfs file. When the firmware
+               image write is complete, echo 0 to the loading sysfs file. This
+               sequence will signal the completion of the firmware write and
+               signal the lower-level driver that the firmware data is
+               available.
+
+What:          /sys/class/firmware/.../cancel
+Date:          July 2022
+KernelVersion: 5.19
+Contact:       Russ Weight <russell.h.weight@intel.com>
+Description:   Write-only. For firmware uploads, write a "1" to this file to
+               request that the transfer of firmware data to the lower-level
+               device be canceled. This request will be rejected (EBUSY) if
+               the update cannot be canceled (e.g. a FLASH write is in
+               progress) or (ENODEV) if there is no firmware update in progress.
+
+What:          /sys/class/firmware/.../error
+Date:          July 2022
+KernelVersion: 5.19
+Contact:       Russ Weight <russell.h.weight@intel.com>
+Description:   Read-only. Returns a string describing a failed firmware
+               upload. This string will be in the form of <STATUS>:<ERROR>,
+               where <STATUS> will be one of the status strings described
+               for the status sysfs file and <ERROR> will be one of the
+               following: "hw-error", "timeout", "user-abort", "device-busy",
+               "invalid-file-size", "read-write-error", "flash-wearout". The
+               error sysfs file is only meaningful when the current firmware
+               upload status is "idle". If this file is read while a firmware
+               transfer is in progress, then the read will fail with EBUSY.
+
+What:          /sys/class/firmware/.../loading
+Date:          July 2022
+KernelVersion: 5.19
+Contact:       Russ Weight <russell.h.weight@intel.com>
+Description:   The loading sysfs file is used for both firmware-fallback and
+               for firmware uploads. Echo 1 onto the loading file to indicate
+               you are writing a firmware file to the data sysfs node. Echo
+               -1 onto this file to abort the data write or echo 0 onto this
+               file to indicate that the write is complete. For firmware
+               uploads, the zero value also triggers the transfer of the
+               firmware data to the lower-level device driver.
+
+What:          /sys/class/firmware/.../remaining_size
+Date:          July 2022
+KernelVersion: 5.19
+Contact:       Russ Weight <russell.h.weight@intel.com>
+Description:   Read-only. For firmware upload, this file contains the size
+               of the firmware data that remains to be transferred to the
+               lower-level device driver. The size value is initialized to
+               the full size of the firmware image that was previously
+               written to the data sysfs file. This value is periodically
+               updated during the "transferring" phase of the firmware
+               upload.
+               Format: "%u".
+
+What:          /sys/class/firmware/.../status
+Date:          July 2022
+KernelVersion: 5.19
+Contact:       Russ Weight <russell.h.weight@intel.com>
+Description:   Read-only. Returns a string describing the current status of
+               a firmware upload. The string will be one of the following:
+               idle, "receiving", "preparing", "transferring", "programming".
+
+What:          /sys/class/firmware/.../timeout
+Date:          July 2022
+KernelVersion: 5.19
+Contact:       Russ Weight <russell.h.weight@intel.com>
+Description:   This file supports the timeout mechanism for firmware
+               fallback.  This file has no affect on firmware uploads. For
+               more information on timeouts please see the documentation
+               for firmware fallback.
diff --git a/Documentation/ABI/testing/sysfs-devices-physical_location b/Documentation/ABI/testing/sysfs-devices-physical_location
new file mode 100644 (file)
index 0000000..202324b
--- /dev/null
@@ -0,0 +1,42 @@
+What:          /sys/devices/.../physical_location
+Date:          March 2022
+Contact:       Won Chung <wonchung@google.com>
+Description:
+               This directory contains information on physical location of
+               the device connection point with respect to the system's
+               housing.
+
+What:          /sys/devices/.../physical_location/panel
+Date:          March 2022
+Contact:       Won Chung <wonchung@google.com>
+Description:
+               Describes which panel surface of the system’s housing the
+               device connection point resides on.
+
+What:          /sys/devices/.../physical_location/vertical_position
+Date:          March 2022
+Contact:       Won Chung <wonchung@google.com>
+Description:
+               Describes vertical position of the device connection point on
+               the panel surface.
+
+What:          /sys/devices/.../physical_location/horizontal_position
+Date:          March 2022
+Contact:       Won Chung <wonchung@google.com>
+Description:
+               Describes horizontal position of the device connection point on
+               the panel surface.
+
+What:          /sys/devices/.../physical_location/dock
+Date:          March 2022
+Contact:       Won Chung <wonchung@google.com>
+Description:
+               "Yes" if the device connection point resides in a docking
+               station or a port replicator. "No" otherwise.
+
+What:          /sys/devices/.../physical_location/lid
+Date:          March 2022
+Contact:       Won Chung <wonchung@google.com>
+Description:
+               "Yes" if the device connection point resides on the lid of
+               laptop system. "No" otherwise.
index 197fe31..241d1a8 100644 (file)
@@ -15,6 +15,7 @@ c) swapping in pages
 d) memory reclaim
 e) thrashing page cache
 f) direct compact
+g) write-protect copy
 
 and makes these statistics available to userspace through
 the taskstats interface.
@@ -48,7 +49,7 @@ this structure. See
 for a description of the fields pertaining to delay accounting.
 It will generally be in the form of counters returning the cumulative
 delay seen for cpu, sync block I/O, swapin, memory reclaim, thrash page
-cache, direct compact etc.
+cache, direct compact, write-protect copy etc.
 
 Taking the difference of two successive readings of a given
 counter (say cpu_delay_total) for a task will give the delay
@@ -117,6 +118,8 @@ Get sum of delays, since system boot, for all pids with tgid 5::
                            0              0              0ms
        COMPACT         count    delay total  delay average
                            0              0              0ms
+        WPCOPY          count    delay total  delay average
+                            0              0              0ms
 
 Get IO accounting for pid 1, it works only with -p::
 
index b903cf1..957ccf6 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-===========================
-The Linux RapidIO Subsystem
-===========================
+=============
+Block Devices
+=============
 
 .. toctree::
    :maxdepth: 1
index 710b52d..8090130 100644 (file)
                        [KNL] Debugging option to set a timeout in seconds for
                        deferred probe to give up waiting on dependencies to
                        probe. Only specific dependencies (subsystems or
-                       drivers) that have opted in will be ignored. A timeout of 0
-                       will timeout at the end of initcalls. This option will also
+                       drivers) that have opted in will be ignored. A timeout
+                       of 0 will timeout at the end of initcalls. If the time
+                       out hasn't expired, it'll be restarted by each
+                       successful driver registration. This option will also
                        dump out devices still on the deferred probe list after
                        retrying.
 
                        driver later using sysfs.
 
        driver_async_probe=  [KNL]
-                       List of driver names to be probed asynchronously.
+                       List of driver names to be probed asynchronously. *
+                       matches with all driver names. If * is specified, the
+                       rest of the listed driver names are those that will NOT
+                       match the *.
                        Format: <driver_name1>,<driver_name2>...
 
        drm.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
index 6adb645..3676068 100644 (file)
@@ -36,10 +36,9 @@ administrative requirements that require particular behavior that does not
 work well as part of an nfs_client_id4 string.
 
 The nfs.nfs4_unique_id boot parameter specifies a unique string that can be
-used instead of a system's node name when an NFS client identifies itself to
-a server.  Thus, if the system's node name is not unique, or it changes, its
-nfs.nfs4_unique_id stays the same, preventing collision with other clients
-or loss of state during NFS reboot recovery or transparent state migration.
+used together with  a system's node name when an NFS client identifies itself to
+a server.  Thus, if the system's node name is not unique, its
+nfs.nfs4_unique_id can help prevent collisions with other clients.
 
 The nfs.nfs4_unique_id string is typically a UUID, though it can contain
 anything that is believed to be unique across all NFS clients.  An
@@ -53,8 +52,12 @@ outstanding NFSv4 state has expired, to prevent loss of NFSv4 state.
 
 This string can be stored in an NFS client's grub.conf, or it can be provided
 via a net boot facility such as PXE.  It may also be specified as an nfs.ko
-module parameter.  Specifying a uniquifier string is not support for NFS
-clients running in containers.
+module parameter.
+
+This uniquifier string will be the same for all NFS clients running in
+containers unless it is overridden by a value written to
+/sys/fs/nfs/net/nfs_client/identifier which will be local to the network
+namespace of the process which writes.
 
 
 The DNS resolver
index 0a1fbdb..a2bfb97 100644 (file)
@@ -262,6 +262,28 @@ Which shows that the base frequency now increased from 2600 MHz at performance
 level 0 to 2800 MHz at performance level 4. As a result, any workload, which can
 use fewer CPUs, can see a boost of 200 MHz compared to performance level 0.
 
+Changing performance level via BMC Interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to change SST-PP level using out of band (OOB) agent (Via some
+remote management console, through BMC "Baseboard Management Controller"
+interface). This mode is supported from the Sapphire Rapids processor
+generation. The kernel and tool change to support this mode is added to Linux
+kernel version 5.18. To enable this feature, kernel config
+"CONFIG_INTEL_HFI_THERMAL" is required. The minimum version of the tool
+is "v1.12" to support this feature, which is part of Linux kernel version 5.18.
+
+To support such configuration, this tool can be used as a daemon. Add
+a command line option --oob::
+
+ # intel-speed-select --oob
+ Intel(R) Speed Select Technology
+ Executing on CPU model:143[0x8f]
+ OOB mode is enabled and will run as daemon
+
+In this mode the tool will online/offline CPUs based on the new performance
+level.
+
 Check presence of other Intel(R) SST features
 ---------------------------------------------
 
index 14bcd82..41a66a8 100644 (file)
@@ -13,6 +13,7 @@ implementation.
    arm/index
    arm64/index
    ia64/index
+   loongarch/index
    m68k/index
    mips/index
    nios2/index
index 2f41caa..3707215 100644 (file)
@@ -374,8 +374,6 @@ PXA 2xx/3xx/93x/95x family
 
    Linux kernel mach directory:
        arch/arm/mach-pxa
-   Linux kernel plat directory:
-       arch/arm/plat-pxa
 
 MMP/MMP2/MMP3 family (communication processor)
 ----------------------------------------------
@@ -429,8 +427,6 @@ MMP/MMP2/MMP3 family (communication processor)
 
    Linux kernel mach directory:
        arch/arm/mach-mmp
-   Linux kernel plat directory:
-       arch/arm/plat-pxa
 
 Berlin family (Multimedia Solutions)
 -------------------------------------
@@ -518,9 +514,6 @@ Long-term plans
    Business Unit) in a single mach-<foo> directory. The plat-orion/
    would therefore disappear.
 
- * Unify the mach-mmp/ and mach-pxa/ into the same mach-pxa
-   directory. The plat-pxa/ would therefore disappear.
-
 Credits
 -------
 
index 072ee31..934727e 100644 (file)
@@ -161,7 +161,7 @@ finally:
 #
 # This is also used if you do content translation via gettext catalogs.
 # Usually you set "language" from the command line for these cases.
-language = None
+language = 'en'
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
diff --git a/Documentation/devicetree/bindings/arm/hpe,gxp.yaml b/Documentation/devicetree/bindings/arm/hpe,gxp.yaml
new file mode 100644 (file)
index 0000000..224bbcb
--- /dev/null
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/hpe,gxp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HPE BMC GXP platforms
+
+maintainers:
+  - Nick Hawkins <nick.hawkins@hpe.com>
+  - Jean-Marie Verdun <verdun@hpe.com>
+
+properties:
+  compatible:
+    oneOf:
+      - description: GXP Based Boards
+        items:
+          - enum:
+              - hpe,gxp-dl360gen10
+          - const: hpe,gxp
+
+required:
+  - compatible
+
+additionalProperties: true
+
+...
index 6e04345..61a454a 100644 (file)
@@ -18,6 +18,7 @@ properties:
         items:
           - enum:
               - intel,n5x-socdk
+              - intel,socfpga-agilex-n6000
               - intel,socfpga-agilex-socdk
           - const: intel,socfpga-agilex
 
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra-ccplex-cluster.yaml
new file mode 100644 (file)
index 0000000..8c6543b
--- /dev/null
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/tegra/nvidia,tegra-ccplex-cluster.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: NVIDIA Tegra CPU COMPLEX CLUSTER area device tree bindings
+
+maintainers:
+  - Sumit Gupta <sumitg@nvidia.com>
+  - Mikko Perttunen <mperttunen@nvidia.com>
+  - Jon Hunter <jonathanh@nvidia.com>
+  - Thierry Reding <thierry.reding@gmail.com>
+
+description: |+
+  The Tegra CPU COMPLEX CLUSTER area contains memory-mapped
+  registers that initiate CPU frequency/voltage transitions.
+
+properties:
+  $nodename:
+    pattern: "ccplex@([0-9a-f]+)$"
+
+  compatible:
+    enum:
+      - nvidia,tegra186-ccplex-cluster
+      - nvidia,tegra234-ccplex-cluster
+
+  reg:
+    maxItems: 1
+
+  nvidia,bpmp:
+    $ref: '/schemas/types.yaml#/definitions/phandle'
+    description: |
+      Specifies the BPMP node that needs to be queried to get
+      operating point data for all CPUs.
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - nvidia,bpmp
+  - status
+
+examples:
+  - |
+    ccplex@e000000 {
+      compatible = "nvidia,tegra234-ccplex-cluster";
+      reg = <0x0e000000 0x5ffff>;
+      nvidia,bpmp = <&bpmp>;
+      status = "okay";
+    };
index 4b79e89..32e8701 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Multimedia Clock & Reset Controller Binding
 
 maintainers:
-  - Jeffrey Hugo <jhugo@codeaurora.org>
+  - Jeffrey Hugo <quic_jhugo@quicinc.com>
   - Taniya Das <tdas@codeaurora.org>
 
 description: |
index b8233ec..e0a4ba5 100644 (file)
@@ -20,6 +20,13 @@ Optional properties:
               Vsram to fit SoC specific needs. When absent, the voltage scaling
               flow is handled by hardware, hence no software "voltage tracking" is
               needed.
+- mediatek,cci:
+       Used to confirm the link status between cpufreq and mediatek cci. Because
+       cpufreq and mediatek cci could share the same regulator in some MediaTek SoCs.
+       To prevent the issue of high frequency and low voltage, we need to use this
+       property to make sure mediatek cci is ready.
+       For details of mediatek cci, please refer to
+       Documentation/devicetree/bindings/interconnect/mediatek,cci.yaml
 - #cooling-cells:
        For details, please refer to
        Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml
index fd2e550..7a224b2 100644 (file)
@@ -20,11 +20,12 @@ properties:
     enum:
       - siliconmitus,sm5502-muic
       - siliconmitus,sm5504-muic
+      - siliconmitus,sm5703-muic
 
   reg:
     maxItems: 1
-    description: I2C slave address of the device. Usually 0x25 for SM5502,
-      0x14 for SM5504.
+    description: I2C slave address of the device. Usually 0x25 for SM5502
+      and SM5703, 0x14 for SM5504.
 
   interrupts:
     maxItems: 1
index 146e554..2a80e27 100644 (file)
@@ -9,8 +9,9 @@ Required properties:
   - The second cell is reserved and is currently unused.
 - gpio-controller : Marks the device node as a GPIO controller.
 - interrupt-controller: Mark the device node as an interrupt controller
-- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
+- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware.
   - The first cell is the GPIO offset number within the GPIO controller.
+  - The second cell is the interrupt trigger type and level flags.
 - interrupts: Specify the interrupt.
 - altr,interrupt-type: Specifies the interrupt trigger type the GPIO
   hardware is synthesized. This field is required if the Altera GPIO controller
@@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 {
        altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
        #gpio-cells = <2>;
        gpio-controller;
-       #interrupt-cells = <1>;
+       #interrupt-cells = <2>;
        interrupt-controller;
 };
index c301078..f992957 100644 (file)
@@ -46,11 +46,11 @@ properties:
               - renesas,i2c-r8a77980     # R-Car V3H
               - renesas,i2c-r8a77990     # R-Car E3
               - renesas,i2c-r8a77995     # R-Car D3
-              - renesas,i2c-r8a779a0     # R-Car V3U
           - const: renesas,rcar-gen3-i2c # R-Car Gen3 and RZ/G2
 
       - items:
           - enum:
+              - renesas,i2c-r8a779a0     # R-Car V3U
               - renesas,i2c-r8a779f0     # R-Car S4-8
           - const: renesas,rcar-gen4-i2c # R-Car Gen4
 
diff --git a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
deleted file mode 100644 (file)
index 3716589..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-Bindings for cadence I3C master block
-=====================================
-
-Required properties:
---------------------
-- compatible: shall be "cdns,i3c-master"
-- clocks: shall reference the pclk and sysclk
-- clock-names: shall contain "pclk" and "sysclk"
-- interrupts: the interrupt line connected to this I3C master
-- reg: I3C master registers
-
-Mandatory properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- #address-cells: shall be set to 1
-- #size-cells: shall be set to 0
-
-Optional properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- i2c-scl-hz
-- i3c-scl-hz
-
-I3C device connected on the bus follow the generic description (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details).
-
-Example:
-
-       i3c-master@0d040000 {
-               compatible = "cdns,i3c-master";
-               clocks = <&coreclock>, <&i3csysclock>;
-               clock-names = "pclk", "sysclk";
-               interrupts = <3 0>;
-               reg = <0x0d040000 0x1000>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               i2c-scl-hz = <100000>;
-
-               nunchuk: nunchuk@52 {
-                       compatible = "nintendo,nunchuk";
-                       reg = <0x52 0x0 0x10>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml
new file mode 100644 (file)
index 0000000..cc40d25
--- /dev/null
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i3c/cdns,i3c-master.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence I3C master block
+
+maintainers:
+  - Boris Brezillon <bbrezillon@kernel.org>
+
+allOf:
+  - $ref: i3c.yaml#
+
+properties:
+  compatible:
+    const: cdns,i3c-master
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 2
+
+  clock-names:
+    items:
+      - const: pclk
+      - const: sysclk
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - interrupts
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    i3c-master@d040000 {
+        compatible = "cdns,i3c-master";
+        clocks = <&coreclock>, <&i3csysclock>;
+        clock-names = "pclk", "sysclk";
+        interrupts = <3 0>;
+        reg = <0x0d040000 0x1000>;
+        #address-cells = <3>;
+        #size-cells = <0>;
+        i2c-scl-hz = <100000>;
+
+        eeprom@57{
+            compatible = "atmel,24c01";
+            reg = <0x57 0x0 0x10>;
+            pagesize = <0x8>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
deleted file mode 100644 (file)
index 07f35f3..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-Bindings for Synopsys DesignWare I3C master block
-=================================================
-
-Required properties:
---------------------
-- compatible: shall be "snps,dw-i3c-master-1.00a"
-- clocks: shall reference the core_clk
-- interrupts: the interrupt line connected to this I3C master
-- reg: Offset and length of I3C master registers
-
-Mandatory properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- #address-cells: shall be set to 3
-- #size-cells: shall be set to 0
-
-Optional properties defined by the generic binding (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
-
-- i2c-scl-hz
-- i3c-scl-hz
-
-I3C device connected on the bus follow the generic description (see
-Documentation/devicetree/bindings/i3c/i3c.yaml for more details).
-
-Example:
-
-       i3c-master@2000 {
-               compatible = "snps,dw-i3c-master-1.00a";
-               #address-cells = <3>;
-               #size-cells = <0>;
-               reg = <0x02000 0x1000>;
-               interrupts = <0>;
-               clocks = <&i3cclk>;
-
-               eeprom@57{
-                       compatible = "atmel,24c01";
-                       reg = <0x57 0x0 0x10>;
-                       pagesize = <0x8>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
new file mode 100644 (file)
index 0000000..7a76fd3
--- /dev/null
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i3c/snps,dw-i3c-master.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare I3C master block
+
+maintainers:
+  - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+  - $ref: i3c.yaml#
+
+properties:
+  compatible:
+    const: snps,dw-i3c-master-1.00a
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    i3c-master@2000 {
+        compatible = "snps,dw-i3c-master-1.00a";
+        #address-cells = <3>;
+        #size-cells = <0>;
+        reg = <0x02000 0x1000>;
+        interrupts = <0>;
+        clocks = <&i3cclk>;
+
+        eeprom@57{
+            compatible = "atmel,24c01";
+            reg = <0x57 0x0 0x10>;
+            pagesize = <0x8>;
+        };
+    };
+...
index c80201d..d66c24c 100644 (file)
@@ -19,7 +19,8 @@ properties:
   compatible:
     items:
       - enum:
-          - renesas,r9a07g044-adc   # RZ/G2{L,LC}
+          - renesas,r9a07g044-adc   # RZ/G2L
+          - renesas,r9a07g054-adc   # RZ/V2L
       - const: renesas,rzg2l-adc
 
   reg:
index caa3ee0..44aa28b 100644 (file)
@@ -20,6 +20,7 @@ properties:
       - sprd,sc2723-adc
       - sprd,sc2730-adc
       - sprd,sc2731-adc
+      - sprd,ump9620-adc
 
   reg:
     maxItems: 1
@@ -33,13 +34,39 @@ properties:
   hwlocks:
     maxItems: 1
 
-  nvmem-cells:
-    maxItems: 2
+  nvmem-cells: true
 
-  nvmem-cell-names:
-    items:
-      - const: big_scale_calib
-      - const: small_scale_calib
+  nvmem-cell-names: true
+
+allOf:
+  - if:
+      not:
+        properties:
+          compatible:
+            contains:
+              enum:
+                - sprd,ump9620-adc
+    then:
+      properties:
+        nvmem-cells:
+          maxItems: 2
+        nvmem-cell-names:
+          items:
+            - const: big_scale_calib
+            - const: small_scale_calib
+
+    else:
+      properties:
+        nvmem-cells:
+          maxItems: 6
+        nvmem-cell-names:
+          items:
+            - const: big_scale_calib1
+            - const: big_scale_calib2
+            - const: small_scale_calib1
+            - const: small_scale_calib2
+            - const: vbat_det_cal1
+            - const: vbat_det_cal2
 
 required:
   - compatible
@@ -69,4 +96,25 @@ examples:
             nvmem-cell-names = "big_scale_calib", "small_scale_calib";
         };
     };
+
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    pmic {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        adc@504 {
+            compatible = "sprd,ump9620-adc";
+            reg = <0x504>;
+            interrupt-parent = <&ump9620_pmic>;
+            interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+            #io-channel-cells = <1>;
+            hwlocks = <&hwlock 4>;
+            nvmem-cells = <&adc_bcal1>, <&adc_bcal2>,
+                          <&adc_scal1>, <&adc_scal2>,
+                          <&vbat_det_cal1>, <&vbat_det_cal2>;
+            nvmem-cell-names = "big_scale_calib1", "big_scale_calib2",
+                               "small_scale_calib1", "small_scale_calib2",
+                               "vbat_det_cal1", "vbat_det_cal2";
+        };
+    };
 ...
index 2c2d01b..a3b7943 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/iio/adc/ti,ads1015.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: TI ADS1015 4 channel I2C analog to digital converter
+title: TI ADS1015/ADS1115 4 channel I2C analog to digital converter
 
 maintainers:
   - Daniel Baluta <daniel.baluta@nxp.com>
@@ -15,7 +15,10 @@ description: |
 
 properties:
   compatible:
-    const: ti,ads1015
+    enum:
+      - ti,ads1015
+      - ti,ads1115
+      - ti,tla2024
 
   reg:
     maxItems: 1
index 501a463..9c48c76 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Analog Devices AD2552R DAC device driver
 
 maintainers:
-  - Mihail Chindris <mihail.chindris@analog.com>
+  - Nuno Sá <nuno.sa@analog.com>
 
 description: |
   Bindings for the Analog Devices AD3552R DAC device and similar.
index edf804d..b1eb773 100644 (file)
@@ -68,7 +68,7 @@ examples:
       #size-cells = <0>;
 
       dac@0 {
-        compatible = "lltc,ltc2632";
+        compatible = "lltc,ltc2632-l12";
         reg = <0>;    /* CS0 */
         spi-max-frequency = <1000000>;
         vref-supply = <&vref>;
index d69595a..3ebc652 100644 (file)
@@ -14,21 +14,25 @@ description: |
 
 properties:
   compatible:
-    enum:
-      - invensense,iam20680
-      - invensense,icm20608
-      - invensense,icm20609
-      - invensense,icm20689
-      - invensense,icm20602
-      - invensense,icm20690
-      - invensense,mpu6000
-      - invensense,mpu6050
-      - invensense,mpu6500
-      - invensense,mpu6515
-      - invensense,mpu6880
-      - invensense,mpu9150
-      - invensense,mpu9250
-      - invensense,mpu9255
+    oneOf:
+      - enum:
+          - invensense,iam20680
+          - invensense,icm20608
+          - invensense,icm20609
+          - invensense,icm20689
+          - invensense,icm20602
+          - invensense,icm20690
+          - invensense,mpu6000
+          - invensense,mpu6050
+          - invensense,mpu6500
+          - invensense,mpu6515
+          - invensense,mpu6880
+          - invensense,mpu9150
+          - invensense,mpu9250
+          - invensense,mpu9255
+      - items:
+          - const: invensense,icm20608d
+          - const: invensense,icm20608
 
   reg:
     maxItems: 1
index 0750f70..5d4839f 100644 (file)
@@ -14,23 +14,27 @@ description:
 
 properties:
   compatible:
-    enum:
-      - st,lsm6ds3
-      - st,lsm6ds3h
-      - st,lsm6dsl
-      - st,lsm6dsm
-      - st,ism330dlc
-      - st,lsm6dso
-      - st,asm330lhh
-      - st,lsm6dsox
-      - st,lsm6dsr
-      - st,lsm6ds3tr-c
-      - st,ism330dhcx
-      - st,lsm9ds1-imu
-      - st,lsm6ds0
-      - st,lsm6dsrx
-      - st,lsm6dst
-      - st,lsm6dsop
+    oneOf:
+      - enum:
+          - st,lsm6ds3
+          - st,lsm6ds3h
+          - st,lsm6dsl
+          - st,lsm6dsm
+          - st,ism330dlc
+          - st,lsm6dso
+          - st,asm330lhh
+          - st,lsm6dsox
+          - st,lsm6dsr
+          - st,lsm6ds3tr-c
+          - st,ism330dhcx
+          - st,lsm9ds1-imu
+          - st,lsm6ds0
+          - st,lsm6dsrx
+          - st,lsm6dst
+          - st,lsm6dsop
+      - items:
+          - const: st,asm330lhhx
+          - const: st,lsm6dsr
 
   reg:
     maxItems: 1
index f92bf7b..f6e22dc 100644 (file)
@@ -13,6 +13,9 @@ maintainers:
 description: |
   Ambient light and proximity sensor over an i2c interface.
 
+allOf:
+  - $ref: ../common.yaml#
+
 properties:
   compatible:
     enum:
@@ -26,6 +29,8 @@ properties:
   interrupts:
     maxItems: 1
 
+  proximity-near-level: true
+
 required:
   - compatible
   - reg
@@ -44,6 +49,7 @@ examples:
         stk3310@48 {
                 compatible = "sensortek,stk3310";
                 reg = <0x48>;
+                proximity-near-level = <25>;
                 interrupt-parent = <&gpio1>;
                 interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
         };
index 945a2d6..32e92bc 100644 (file)
@@ -95,7 +95,7 @@ examples:
         #size-cells = <0>;
 
         potentiometer@0 {
-            compatible = "mcp4131-502";
+            compatible = "microchip,mcp4131-502";
             reg = <0>;
             spi-max-frequency = <500000>;
         };
index 9735a20..fcb2902 100644 (file)
@@ -29,6 +29,7 @@ properties:
           - st,lis2dw12
           - st,lis2hh12
           - st,lis2dh12-accel
+          - st,lis302dl
           - st,lis331dl-accel
           - st,lis331dlh-accel
           - st,lis3de
index 116e434..bf538c0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Operating State Manager (OSM) L3 Interconnect Provider
 
 maintainers:
-  - Sibi Sankar <sibis@codeaurora.org>
+  - Sibi Sankar <quic_sibis@quicinc.com>
 
 description:
   L3 cache bandwidth requirements on Qualcomm SoCs is serviced by the OSM.
index 5a911be..28b3516 100644 (file)
@@ -31,7 +31,6 @@ properties:
       - qcom,sc7180-config-noc
       - qcom,sc7180-dc-noc
       - qcom,sc7180-gem-noc
-      - qcom,sc7180-ipa-virt
       - qcom,sc7180-mc-virt
       - qcom,sc7180-mmss-noc
       - qcom,sc7180-npu-noc
@@ -59,7 +58,20 @@ properties:
       - qcom,sc8180x-ipa-virt
       - qcom,sc8180x-mc-virt
       - qcom,sc8180x-mmss-noc
+      - qcom,sc8180x-qup-virt
       - qcom,sc8180x-system-noc
+      - qcom,sc8280xp-aggre1-noc
+      - qcom,sc8280xp-aggre2-noc
+      - qcom,sc8280xp-clk-virt
+      - qcom,sc8280xp-config-noc
+      - qcom,sc8280xp-dc-noc
+      - qcom,sc8280xp-gem-noc
+      - qcom,sc8280xp-lpass-ag-noc
+      - qcom,sc8280xp-mc-virt
+      - qcom,sc8280xp-mmss-noc
+      - qcom,sc8280xp-nspa-noc
+      - qcom,sc8280xp-nspb-noc
+      - qcom,sc8280xp-system-noc
       - qcom,sdm845-aggre1-noc
       - qcom,sdm845-aggre2-noc
       - qcom,sdm845-config-noc
@@ -68,10 +80,12 @@ properties:
       - qcom,sdm845-mem-noc
       - qcom,sdm845-mmss-noc
       - qcom,sdm845-system-noc
-      - qcom,sdx55-ipa-virt
       - qcom,sdx55-mc-virt
       - qcom,sdx55-mem-noc
       - qcom,sdx55-system-noc
+      - qcom,sdx65-mc-virt
+      - qcom,sdx65-mem-noc
+      - qcom,sdx65-system-noc
       - qcom,sm8150-aggre1-noc
       - qcom,sm8150-aggre2-noc
       - qcom,sm8150-camnoc-noc
index da5381c..76fc2c0 100644 (file)
@@ -37,8 +37,10 @@ properties:
               - qcom,sc7180-smmu-500
               - qcom,sc7280-smmu-500
               - qcom,sc8180x-smmu-500
+              - qcom,sc8280xp-smmu-500
               - qcom,sdm845-smmu-500
               - qcom,sdx55-smmu-500
+              - qcom,sdx65-smmu-500
               - qcom,sm6350-smmu-500
               - qcom,sm8150-smmu-500
               - qcom,sm8250-smmu-500
@@ -62,8 +64,9 @@ properties:
           for improved performance.
         items:
           - enum:
-              - nvidia,tegra194-smmu
               - nvidia,tegra186-smmu
+              - nvidia,tegra194-smmu
+              - nvidia,tegra234-smmu
           - const: nvidia,smmu-500
       - items:
           - const: arm,mmu-500
@@ -157,6 +160,17 @@ properties:
   power-domains:
     maxItems: 1
 
+  nvidia,memory-controller:
+    description: |
+      A phandle to the memory controller on NVIDIA Tegra186 and later SoCs.
+      The memory controller needs to be programmed with a mapping of memory
+      client IDs to ARM SMMU stream IDs.
+
+      If this property is absent, the mapping programmed by early firmware
+      will be used and it is not guaranteed that IOMMU translations will be
+      enabled for any given device.
+    $ref: /schemas/types.yaml#/definitions/phandle
+
 required:
   - compatible
   - reg
@@ -172,13 +186,20 @@ allOf:
         compatible:
           contains:
             enum:
-              - nvidia,tegra194-smmu
               - nvidia,tegra186-smmu
+              - nvidia,tegra194-smmu
+              - nvidia,tegra234-smmu
     then:
       properties:
         reg:
           minItems: 1
           maxItems: 2
+
+      # The reference to the memory controller is required to ensure that the
+      # memory client to stream ID mapping can be done synchronously with the
+      # IOMMU attachment.
+      required:
+        - nvidia,memory-controller
     else:
       properties:
         reg:
index 97e8c47..2ae3bba 100644 (file)
@@ -76,7 +76,11 @@ properties:
           - mediatek,mt8167-m4u  # generation two
           - mediatek,mt8173-m4u  # generation two
           - mediatek,mt8183-m4u  # generation two
+          - mediatek,mt8186-iommu-mm         # generation two
           - mediatek,mt8192-m4u  # generation two
+          - mediatek,mt8195-iommu-vdo        # generation two
+          - mediatek,mt8195-iommu-vpp        # generation two
+          - mediatek,mt8195-iommu-infra      # generation two
 
       - description: mt7623 generation one
         items:
@@ -119,7 +123,9 @@ properties:
       dt-binding/memory/mt8167-larb-port.h for mt8167,
       dt-binding/memory/mt8173-larb-port.h for mt8173,
       dt-binding/memory/mt8183-larb-port.h for mt8183,
+      dt-binding/memory/mt8186-memory-port.h for mt8186,
       dt-binding/memory/mt8192-larb-port.h for mt8192.
+      dt-binding/memory/mt8195-memory-port.h for mt8195.
 
   power-domains:
     maxItems: 1
@@ -128,7 +134,6 @@ required:
   - compatible
   - reg
   - interrupts
-  - mediatek,larbs
   - '#iommu-cells'
 
 allOf:
@@ -140,7 +145,10 @@ allOf:
               - mediatek,mt2701-m4u
               - mediatek,mt2712-m4u
               - mediatek,mt8173-m4u
+              - mediatek,mt8186-iommu-mm
               - mediatek,mt8192-m4u
+              - mediatek,mt8195-iommu-vdo
+              - mediatek,mt8195-iommu-vpp
 
     then:
       required:
@@ -150,12 +158,26 @@ allOf:
       properties:
         compatible:
           enum:
+            - mediatek,mt8186-iommu-mm
             - mediatek,mt8192-m4u
+            - mediatek,mt8195-iommu-vdo
+            - mediatek,mt8195-iommu-vpp
 
     then:
       required:
         - power-domains
 
+  - if: # The IOMMUs don't have larbs.
+      not:
+        properties:
+          compatible:
+            contains:
+              const: mediatek,mt8195-iommu-infra
+
+    then:
+      required:
+        - mediatek,larbs
+
 additionalProperties: false
 
 examples:
@@ -173,13 +195,3 @@ examples:
                              <&larb3>, <&larb4>, <&larb5>;
             #iommu-cells = <1>;
     };
-
-  - |
-    #include <dt-bindings/memory/mt8173-larb-port.h>
-
-    /* Example for a client device */
-    display {
-           compatible = "mediatek,mt8173-disp";
-           iommus = <&iommu M4U_PORT_DISP_OVL0>,
-                    <&iommu M4U_PORT_DISP_RDMA0>;
-     };
index 783c6b3..672a0be 100644 (file)
@@ -86,16 +86,6 @@ examples:
   - |
     #include <dt-bindings/clock/exynos5250.h>
 
-    gsc_0: scaler@13e00000 {
-      compatible = "samsung,exynos5-gsc";
-      reg = <0x13e00000 0x1000>;
-      interrupts = <0 85 0>;
-      power-domains = <&pd_gsc>;
-      clocks = <&clock CLK_GSCL0>;
-      clock-names = "gscl";
-      iommus = <&sysmmu_gsc0>;
-    };
-
     sysmmu_gsc0: iommu@13e80000 {
       compatible = "samsung,exynos-sysmmu";
       reg = <0x13E80000 0x1000>;
diff --git a/Documentation/devicetree/bindings/leds/kinetic,ktd2692.yaml b/Documentation/devicetree/bindings/leds/kinetic,ktd2692.yaml
new file mode 100644 (file)
index 0000000..bac95a5
--- /dev/null
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/kinetic,ktd2692.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: KTD2692 Flash LED Driver from Kinetic Technologies
+
+maintainers:
+  - Markuss Broks <markuss.broks@gmail.com>
+
+description: |
+  KTD2692 is the ideal power solution for high-power flash LEDs.
+  It uses ExpressWire single-wire programming for maximum flexibility.
+
+  The ExpressWire interface through CTRL pin can control LED on/off and
+  enable/disable the IC, Movie(max 1/3 of Flash current) / Flash mode current,
+  Flash timeout, LVP(low voltage protection).
+
+  Also, When the AUX pin is pulled high while CTRL pin is high,
+  LED current will be ramped up to the flash-mode current level.
+
+properties:
+  compatible:
+    const: kinetic,ktd2692
+
+  ctrl-gpios:
+    maxItems: 1
+    description: Specifier of the GPIO connected to CTRL pin.
+
+  aux-gpios:
+    maxItems: 1
+    description: Specifier of the GPIO connected to CTRL pin.
+
+  vin-supply:
+    description: LED supply (2.7V to 5.5V).
+
+  led:
+    type: object
+    $ref: common.yaml#
+    description: Properties for the LED.
+    properties:
+      function: true
+      color: true
+      flash-max-timeout-us:
+        description: Flash LED maximum timeout.
+
+      led-max-microamp:
+        maximum: 300000
+        description: Minimum Threshold for Timer protection
+          is defined internally (Maximum 300mA).
+
+      flash-max-microamp:
+        maximum: 300000
+        description: Flash LED maximum current
+          Formula - I(uA) = 15000000 / Rset.
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - ctrl-gpios
+  - led
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/leds/common.h>
+
+    ktd2692 {
+      compatible = "kinetic,ktd2692";
+      ctrl-gpios = <&gpc0 1 0>;
+      aux-gpios = <&gpc0 2 0>;
+      vin-supply = <&vbat>;
+
+      led {
+        function = LED_FUNCTION_FLASH;
+        color = <LED_COLOR_ID_WHITE>;
+        flash-max-timeout-us = <250000>;
+        flash-max-microamp = <150000>;
+        led-max-microamp = <25000>;
+      };
+    };
+
+...
index 37445c6..f41d021 100644 (file)
@@ -20,7 +20,7 @@ description: |
   within this documentation directory.
 
 patternProperties:
-  "^multi-led@([0-9a-f])$":
+  "^multi-led(@[0-9a-f])?$":
     type: object
     description: Represents the LEDs that are to be grouped.
     properties:
diff --git a/Documentation/devicetree/bindings/leds/leds-ktd2692.txt b/Documentation/devicetree/bindings/leds/leds-ktd2692.txt
deleted file mode 100644 (file)
index 8537374..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-* Kinetic Technologies - KTD2692 Flash LED Driver
-
-KTD2692 is the ideal power solution for high-power flash LEDs.
-It uses ExpressWire single-wire programming for maximum flexibility.
-
-The ExpressWire interface through CTRL pin can control LED on/off and
-enable/disable the IC, Movie(max 1/3 of Flash current) / Flash mode current,
-Flash timeout, LVP(low voltage protection).
-
-Also, When the AUX pin is pulled high while CTRL pin is high,
-LED current will be ramped up to the flash-mode current level.
-
-Required properties:
-- compatible : Should be "kinetic,ktd2692".
-- ctrl-gpios : Specifier of the GPIO connected to CTRL pin.
-- aux-gpios : Specifier of the GPIO connected to AUX pin.
-
-Optional properties:
-- vin-supply : "vin" LED supply (2.7V to 5.5V).
-  See Documentation/devicetree/bindings/regulator/regulator.txt
-
-A discrete LED element connected to the device must be represented by a child
-node - See Documentation/devicetree/bindings/leds/common.txt
-
-Required properties for flash LED child nodes:
-  See Documentation/devicetree/bindings/leds/common.txt
-- led-max-microamp : Minimum Threshold for Timer protection
-  is defined internally (Maximum 300mA).
-- flash-max-microamp : Flash LED maximum current
-  Formula : I(mA) = 15000 / Rset.
-- flash-max-timeout-us : Flash LED maximum timeout.
-
-Optional properties for flash LED child nodes:
-- label : See Documentation/devicetree/bindings/leds/common.txt
-
-Example:
-
-ktd2692 {
-       compatible = "kinetic,ktd2692";
-       ctrl-gpios = <&gpc0 1 0>;
-       aux-gpios = <&gpc0 2 0>;
-       vin-supply = <&vbat>;
-
-       flash-led {
-               label = "ktd2692-flash";
-               led-max-microamp = <300000>;
-               flash-max-microamp = <1500000>;
-               flash-max-timeout-us = <1835000>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
new file mode 100644 (file)
index 0000000..6625a52
--- /dev/null
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-pwm-multicolor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Multi-color LEDs connected to PWM
+
+maintainers:
+  - Sven Schwermer <sven.schwermer@disruptive-technologies.com>
+
+description: |
+  This driver combines several monochrome PWM LEDs into one multi-color
+  LED using the multicolor LED class.
+
+properties:
+  compatible:
+    const: pwm-leds-multicolor
+
+  multi-led:
+    type: object
+
+    patternProperties:
+      "^led-[0-9a-z]+$":
+        type: object
+        $ref: common.yaml#
+
+        additionalProperties: false
+
+        properties:
+          pwms:
+            maxItems: 1
+
+          pwm-names: true
+
+          color: true
+
+        required:
+          - pwms
+          - color
+
+required:
+  - compatible
+
+allOf:
+  - $ref: leds-class-multicolor.yaml#
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/leds/common.h>
+
+    led-controller {
+        compatible = "pwm-leds-multicolor";
+
+        multi-led {
+          color = <LED_COLOR_ID_RGB>;
+          function = LED_FUNCTION_INDICATOR;
+          max-brightness = <65535>;
+
+          led-red {
+              pwms = <&pwm1 0 1000000>;
+              color = <LED_COLOR_ID_RED>;
+          };
+
+          led-green {
+              pwms = <&pwm2 0 1000000>;
+              color = <LED_COLOR_ID_GREEN>;
+          };
+
+          led-blue {
+              pwms = <&pwm3 0 1000000>;
+              color = <LED_COLOR_ID_BLUE>;
+          };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml b/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
new file mode 100644 (file)
index 0000000..409a4c7
--- /dev/null
@@ -0,0 +1,174 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-qcom-lpg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Light Pulse Generator
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: >
+  The Qualcomm Light Pulse Generator consists of three different hardware blocks;
+  a ramp generator with lookup table, the light pulse generator and a three
+  channel current sink. These blocks are found in a wide range of Qualcomm PMICs.
+
+properties:
+  compatible:
+    enum:
+      - qcom,pm8150b-lpg
+      - qcom,pm8150l-lpg
+      - qcom,pm8350c-pwm
+      - qcom,pm8916-pwm
+      - qcom,pm8941-lpg
+      - qcom,pm8994-lpg
+      - qcom,pmc8180c-lpg
+      - qcom,pmi8994-lpg
+      - qcom,pmi8998-lpg
+
+  "#pwm-cells":
+    const: 2
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  qcom,power-source:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      power-source used to drive the output, as defined in the datasheet.
+      Should be specified if the TRILED block is present
+    enum: [0, 1, 3]
+
+  qcom,dtest:
+    $ref: /schemas/types.yaml#/definitions/uint32-matrix
+    description: >
+      A list of integer pairs, where each pair represent the dtest line the
+      particular channel should be connected to and the flags denoting how the
+      value should be outputed, as defined in the datasheet. The number of
+      pairs should be the same as the number of channels.
+    items:
+      items:
+        - description: dtest line to attach
+        - description: flags for the attachment
+
+  multi-led:
+    type: object
+    $ref: leds-class-multicolor.yaml#
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+    patternProperties:
+      "^led@[0-9a-f]$":
+        type: object
+        $ref: common.yaml#
+
+patternProperties:
+  "^led@[0-9a-f]$":
+    type: object
+    $ref: common.yaml#
+
+    properties:
+      reg: true
+
+    required:
+      - reg
+
+required:
+  - compatible
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/leds/common.h>
+
+    led-controller {
+      compatible = "qcom,pmi8994-lpg";
+
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      qcom,power-source = <1>;
+
+      qcom,dtest = <0 0>,
+                   <0 0>,
+                   <0 0>,
+                   <4 1>;
+
+      led@1 {
+        reg = <1>;
+        color = <LED_COLOR_ID_GREEN>;
+        function = LED_FUNCTION_INDICATOR;
+        function-enumerator = <1>;
+      };
+
+      led@2 {
+        reg = <2>;
+        color = <LED_COLOR_ID_GREEN>;
+        function = LED_FUNCTION_INDICATOR;
+        function-enumerator = <0>;
+        default-state = "on";
+      };
+
+      led@3 {
+        reg = <3>;
+        color = <LED_COLOR_ID_GREEN>;
+        function = LED_FUNCTION_INDICATOR;
+        function-enumerator = <2>;
+      };
+
+      led@4 {
+        reg = <4>;
+        color = <LED_COLOR_ID_GREEN>;
+        function = LED_FUNCTION_INDICATOR;
+        function-enumerator = <3>;
+      };
+    };
+  - |
+    #include <dt-bindings/leds/common.h>
+
+    led-controller {
+      compatible = "qcom,pmi8994-lpg";
+
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      qcom,power-source = <1>;
+
+      multi-led {
+        color = <LED_COLOR_ID_RGB>;
+        function = LED_FUNCTION_STATUS;
+
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        led@1 {
+          reg = <1>;
+          color = <LED_COLOR_ID_RED>;
+        };
+
+        led@2 {
+          reg = <2>;
+          color = <LED_COLOR_ID_GREEN>;
+        };
+
+        led@3 {
+          reg = <3>;
+          color = <LED_COLOR_ID_BLUE>;
+        };
+      };
+    };
+  - |
+    pwm-controller {
+      compatible = "qcom,pm8916-pwm";
+      #pwm-cells = <2>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/leds/regulator-led.yaml b/Documentation/devicetree/bindings/leds/regulator-led.yaml
new file mode 100644 (file)
index 0000000..3e020d7
--- /dev/null
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/regulator-led.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Device Tree Bindings for Regulator LEDs
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  Regulator LEDs are powered by a single regulator such that they can
+  be turned on or off by enabling or disabling the regulator. The available
+  brightness settings will be inferred from the available voltages on the
+  regulator, and any constraints on the voltage or current will need to be
+  specified on the regulator.
+
+allOf:
+  - $ref: common.yaml#
+
+properties:
+  $nodename:
+    pattern: '^led.*$'
+
+  compatible:
+    const: regulator-led
+
+  vled-supply:
+    description:
+      The regulator controlling the current to the LED.
+
+  function: true
+  color: true
+  linux,default-trigger: true
+  default-state: true
+
+required:
+  - compatible
+  - vled-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/leds/common.h>
+
+    led-heartbeat {
+        compatible = "regulator-led";
+        vled-supply = <&regulator>;
+        function = LED_FUNCTION_STATUS;
+        color = <LED_COLOR_ID_BLUE>;
+        linux,default-trigger = "heartbeat";
+    };
+...
diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc-peripherals.yaml
new file mode 100644 (file)
index 0000000..b8ed52a
--- /dev/null
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/ingenic,nemc-peripherals.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs NAND / External Memory Controller (NEMC) devicetree bindings
+
+maintainers:
+  - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+  reg:
+    minItems: 1
+    maxItems: 255
+
+  ingenic,nemc-bus-width:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [8, 16]
+    description: Specifies the bus width in bits.
+
+  ingenic,nemc-tAS:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Address setup time in nanoseconds.
+
+  ingenic,nemc-tAH:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Address hold time in nanoseconds.
+
+  ingenic,nemc-tBP:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Burst pitch time in nanoseconds.
+
+  ingenic,nemc-tAW:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Address wait time in nanoseconds.
+
+  ingenic,nemc-tSTRV:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Static memory recovery time in nanoseconds.
+
+required:
+  - reg
+
+additionalProperties: true
+...
index 24f9e19..dd13a51 100644 (file)
@@ -39,38 +39,6 @@ properties:
 patternProperties:
   ".*@[0-9]+$":
     type: object
-    properties:
-      reg:
-        minItems: 1
-        maxItems: 255
-
-      ingenic,nemc-bus-width:
-        $ref: /schemas/types.yaml#/definitions/uint32
-        enum: [8, 16]
-        description: Specifies the bus width in bits.
-
-      ingenic,nemc-tAS:
-        $ref: /schemas/types.yaml#/definitions/uint32
-        description: Address setup time in nanoseconds.
-
-      ingenic,nemc-tAH:
-        $ref: /schemas/types.yaml#/definitions/uint32
-        description: Address hold time in nanoseconds.
-
-      ingenic,nemc-tBP:
-        $ref: /schemas/types.yaml#/definitions/uint32
-        description: Burst pitch time in nanoseconds.
-
-      ingenic,nemc-tAW:
-        $ref: /schemas/types.yaml#/definitions/uint32
-        description: Address wait time in nanoseconds.
-
-      ingenic,nemc-tSTRV:
-        $ref: /schemas/types.yaml#/definitions/uint32
-        description: Static memory recovery time in nanoseconds.
-
-    required:
-      - reg
 
 required:
   - compatible
index 91b79a2..aa8b800 100644 (file)
@@ -64,10 +64,13 @@ Sub-nodes:
     and KEY_SLEEP.
 
 - watchdog : This node defines settings for the Watchdog timer associated
-  with the DA9063 and DA9063L. There are currently no entries in this
-  binding, however compatible = "dlg,da9063-watchdog" should be added
-  if a node is created.
+  with the DA9063 and DA9063L. The node should contain the compatible property
+  with the value "dlg,da9063-watchdog".
 
+  Optional watchdog properties:
+  - dlg,use-sw-pm: Add this property to disable the watchdog during suspend.
+  Only use this option if you can't use the watchdog automatic suspend
+  function during a suspend (see register CONTROL_B).
 
 Example:
 
index 9de8ef6..8c272c8 100644 (file)
@@ -11,6 +11,7 @@ maintainers:
 
 allOf:
   - $ref: nand-controller.yaml#
+  - $ref: /schemas/memory-controllers/ingenic,nemc-peripherals.yaml#
 
 properties:
   compatible:
index 431faac..dd3cd1d 100644 (file)
@@ -11,6 +11,7 @@ maintainers:
 
 allOf:
   - $ref: "nand-chip.yaml#"
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
 
 properties:
   compatible:
index 77750df..929cf8c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Analog Devices ADIN1200/ADIN1300 PHY
 
 maintainers:
-  - Alexandru Ardelean <alexandru.ardelean@analog.com>
+  - Alexandru Tachici <alexandru.tachici@analog.com>
 
 description: |
   Bindings for Analog Devices Industrial Ethernet PHYs
@@ -37,7 +37,8 @@ properties:
     default: 8
 
   adi,phy-output-clock:
-    description: Select clock output on GP_CLK pin. Two clocks are available:
+    description: |
+      Select clock output on GP_CLK pin. Two clocks are available:
       A 25MHz reference and a free-running 125MHz.
       The phy can alternatively automatically switch between the reference and
       the 125MHz clocks based on its internal state.
index 337cec4..86fc31c 100644 (file)
@@ -191,7 +191,6 @@ examples:
                     clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
                     #address-cells = <1>;
                     #size-cells = <0>;
-                    #stream-id-cells = <1>;
                     iommus = <&smmu 0x875>;
                     power-domains = <&zynqmp_firmware PD_ETH_1>;
                     resets = <&zynqmp_reset ZYNQMP_RESET_GEM1>;
index c3c9388..23114d6 100644 (file)
@@ -6,9 +6,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Broadcom BCM53xx Ethernet switches
 
-allOf:
-  - $ref: dsa.yaml#
-
 maintainers:
   - Florian Fainelli <f.fainelli@gmail.com>
 
@@ -68,53 +65,71 @@ required:
   - compatible
   - reg
 
-# BCM585xx/586xx/88312 SoCs
-if:
-  properties:
-    compatible:
-      contains:
-        enum:
-          - brcm,bcm58522-srab
-          - brcm,bcm58523-srab
-          - brcm,bcm58525-srab
-          - brcm,bcm58622-srab
-          - brcm,bcm58623-srab
-          - brcm,bcm58625-srab
-          - brcm,bcm88312-srab
-then:
-  properties:
-    reg:
-      minItems: 3
-      maxItems: 3
-    reg-names:
-      items:
-        - const: srab
-        - const: mux_config
-        - const: sgmii_config
-    interrupts:
-      minItems: 13
-      maxItems: 13
-    interrupt-names:
-      items:
-        - const: link_state_p0
-        - const: link_state_p1
-        - const: link_state_p2
-        - const: link_state_p3
-        - const: link_state_p4
-        - const: link_state_p5
-        - const: link_state_p7
-        - const: link_state_p8
-        - const: phy
-        - const: ts
-        - const: imp_sleep_timer_p5
-        - const: imp_sleep_timer_p7
-        - const: imp_sleep_timer_p8
-  required:
-    - interrupts
-else:
-  properties:
-    reg:
-      maxItems: 1
+allOf:
+  - $ref: dsa.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - brcm,bcm5325
+              - brcm,bcm53115
+              - brcm,bcm53125
+              - brcm,bcm53128
+              - brcm,bcm5365
+              - brcm,bcm5395
+              - brcm,bcm5397
+              - brcm,bcm5398
+    then:
+      $ref: /schemas/spi/spi-peripheral-props.yaml
+
+    # BCM585xx/586xx/88312 SoCs
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - brcm,bcm58522-srab
+              - brcm,bcm58523-srab
+              - brcm,bcm58525-srab
+              - brcm,bcm58622-srab
+              - brcm,bcm58623-srab
+              - brcm,bcm58625-srab
+              - brcm,bcm88312-srab
+    then:
+      properties:
+        reg:
+          minItems: 3
+          maxItems: 3
+        reg-names:
+          items:
+            - const: srab
+            - const: mux_config
+            - const: sgmii_config
+        interrupts:
+          minItems: 13
+          maxItems: 13
+        interrupt-names:
+          items:
+            - const: link_state_p0
+            - const: link_state_p1
+            - const: link_state_p2
+            - const: link_state_p3
+            - const: link_state_p4
+            - const: link_state_p5
+            - const: link_state_p7
+            - const: link_state_p8
+            - const: phy
+            - const: ts
+            - const: imp_sleep_timer_p5
+            - const: imp_sleep_timer_p7
+            - const: imp_sleep_timer_p8
+      required:
+        - interrupts
+    else:
+      properties:
+        reg:
+          maxItems: 1
 
 unevaluatedProperties: false
 
index 1841520..6bbd814 100644 (file)
@@ -12,6 +12,7 @@ maintainers:
 
 allOf:
   - $ref: dsa.yaml#
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
 
 properties:
   # See Documentation/devicetree/bindings/net/dsa/dsa.yaml for a list of additional
index 1ea0bd4..1e26d87 100644 (file)
@@ -14,6 +14,7 @@ description:
 
 allOf:
   - $ref: "dsa.yaml#"
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
 
 maintainers:
   - Vladimir Oltean <vladimir.oltean@nxp.com>
index 99ee4b5..4f99aff 100644 (file)
@@ -108,6 +108,7 @@ if:
     - reg
 
 then:
+  $ref: /schemas/spi/spi-peripheral-props.yaml#
   not:
     required:
       - mdc-gpios
index 699164d..f5564ec 100644 (file)
@@ -27,6 +27,9 @@ properties:
   reg:
     maxItems: 1
 
+  clocks: true
+  clock-names: true
+
   interrupts:
     minItems: 3
     maxItems: 4
index 9019446..61b2fb9 100644 (file)
@@ -58,6 +58,9 @@ properties:
       - const: rmii_internal
       - const: mac_cg
 
+  power-domains:
+    maxItems: 1
+
   mediatek,pericfg:
     $ref: /schemas/types.yaml#/definitions/phandle
     description:
index 249967d..5a12dc3 100644 (file)
@@ -51,7 +51,7 @@ properties:
     description:
       Specify the consys reset for mt7986.
 
-  reset-name:
+  reset-names:
     const: consys
 
   mediatek,infracfg:
diff --git a/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml b/Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
new file mode 100644 (file)
index 0000000..5ec8f2b
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/apple,efuses.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple SoC eFuse-based NVMEM
+
+description: |
+  Apple SoCs such as the M1 contain factory-programmed eFuses used to e.g. store
+  calibration data for the PCIe and the Type-C PHY or unique chip identifiers
+  such as the ECID.
+
+maintainers:
+  - Sven Peter <sven@svenpeter.dev>
+
+allOf:
+  - $ref: "nvmem.yaml#"
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,t8103-efuses
+          - apple,t6000-efuses
+      - const: apple,efuses
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    efuse@3d2bc000 {
+        compatible = "apple,t8103-efuses", "apple,efuses";
+        reg = <0x3d2bc000 0x1000>;
+        #address-cells = <1>;
+        #size-cells = <1>;
+
+        ecid: efuse@500 {
+            reg = <0x500 0x8>;
+        };
+    };
+
+...
index 80914b9..3b4e6e9 100644 (file)
@@ -10,7 +10,7 @@ maintainers:
   - Michael Walle <michael@walle.cc>
 
 description: |
-  SFP is the security fuse processor which among other things provide a
+  SFP is the security fuse processor which among other things provides a
   unique identifier per part.
 
 allOf:
@@ -18,21 +18,45 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - fsl,ls1028a-sfp
+    oneOf:
+      - description: Trust architecture 2.1 SFP
+        items:
+          - const: fsl,ls1021a-sfp
+      - description: Trust architecture 3.0 SFP
+        items:
+          - const: fsl,ls1028a-sfp
 
   reg:
     maxItems: 1
 
+  clocks:
+    maxItems: 1
+    description:
+      The SFP clock. Typically, this is the platform clock divided by 4.
+
+  clock-names:
+    const: sfp
+
+  ta-prog-sfp-supply:
+    description:
+      The regulator for the TA_PROG_SFP pin. It will be enabled for programming
+      and disabled for reading.
+
 required:
   - compatible
   - reg
+  - clock-names
+  - clocks
 
 unevaluatedProperties: false
 
 examples:
   - |
+    #include <dt-bindings/clock/fsl,qoriq-clockgen.h>
     efuse@1e80000 {
         compatible = "fsl,ls1028a-sfp";
         reg = <0x1e80000 0x8000>;
+        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+                            QORIQ_CLK_PLL_DIV(4)>;
+        clock-names = "sfp";
     };
index 8c2e9ac..30f7b59 100644 (file)
@@ -17,10 +17,10 @@ description: |
   the CPU frequencies subset and voltage value of each OPP varies based on
   the silicon variant in use.
   Qualcomm Technologies, Inc. Process Voltage Scaling Tables
-  defines the voltage and frequency value based on the msm-id in SMEM
-  and speedbin blown in the efuse combination.
-  The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
-  to provide the OPP framework with required information (existing HW bitmap).
+  defines the voltage and frequency value based on the speedbin blown in
+  the efuse combination.
+  The qcom-cpufreq-nvmem driver reads the efuse value from the SoC to provide
+  the OPP framework with required information (existing HW bitmap).
   This is used to determine the voltage and frequency value for each OPP of
   operating-points-v2 table when it is parsed by the OPP framework.
 
@@ -50,15 +50,11 @@ patternProperties:
         description: |
           A single 32 bit bitmap value, representing compatible HW.
           Bitmap:
-          0:  MSM8996 V3, speedbin 0
-          1:  MSM8996 V3, speedbin 1
-          2:  MSM8996 V3, speedbin 2
-          3:  unused
-          4:  MSM8996 SG, speedbin 0
-          5:  MSM8996 SG, speedbin 1
-          6:  MSM8996 SG, speedbin 2
-          7-31:  unused
-        maximum: 0x77
+          0:  MSM8996, speedbin 0
+          1:  MSM8996, speedbin 1
+          2:  MSM8996, speedbin 2
+          3-31:  unused
+        maximum: 0x7
 
       clock-latency-ns: true
 
@@ -184,19 +180,19 @@ examples:
             opp-307200000 {
                 opp-hz = /bits/ 64 <307200000>;
                 opp-microvolt = <905000 905000 1140000>;
-                opp-supported-hw = <0x77>;
+                opp-supported-hw = <0x7>;
                 clock-latency-ns = <200000>;
             };
-            opp-1593600000 {
-                opp-hz = /bits/ 64 <1593600000>;
+            opp-1401600000 {
+                opp-hz = /bits/ 64 <1401600000>;
                 opp-microvolt = <1140000 905000 1140000>;
-                opp-supported-hw = <0x71>;
+                opp-supported-hw = <0x5>;
                 clock-latency-ns = <200000>;
             };
-            opp-2188800000 {
-                opp-hz = /bits/ 64 <2188800000>;
+            opp-1593600000 {
+                opp-hz = /bits/ 64 <1593600000>;
                 opp-microvolt = <1140000 905000 1140000>;
-                opp-supported-hw = <0x10>;
+                opp-supported-hw = <0x1>;
                 clock-latency-ns = <200000>;
             };
         };
@@ -209,25 +205,25 @@ examples:
             opp-307200000 {
                 opp-hz = /bits/ 64 <307200000>;
                 opp-microvolt = <905000 905000 1140000>;
-                opp-supported-hw = <0x77>;
+                opp-supported-hw = <0x7>;
                 clock-latency-ns = <200000>;
             };
-            opp-1593600000 {
-                opp-hz = /bits/ 64 <1593600000>;
+            opp-1804800000 {
+                opp-hz = /bits/ 64 <1804800000>;
                 opp-microvolt = <1140000 905000 1140000>;
-                opp-supported-hw = <0x70>;
+                opp-supported-hw = <0x6>;
                 clock-latency-ns = <200000>;
             };
-            opp-2150400000 {
-                opp-hz = /bits/ 64 <2150400000>;
+            opp-1900800000 {
+                opp-hz = /bits/ 64 <1900800000>;
                 opp-microvolt = <1140000 905000 1140000>;
-                opp-supported-hw = <0x31>;
+                opp-supported-hw = <0x4>;
                 clock-latency-ns = <200000>;
             };
-            opp-2342400000 {
-                opp-hz = /bits/ 64 <2342400000>;
+            opp-2150400000 {
+                opp-hz = /bits/ 64 <2150400000>;
                 opp-microvolt = <1140000 905000 1140000>;
-                opp-supported-hw = <0x10>;
+                opp-supported-hw = <0x1>;
                 clock-latency-ns = <200000>;
             };
         };
index daf602a..aa38680 100644 (file)
@@ -68,6 +68,9 @@ properties:
   iommu-map: true
   iommu-map-mask: true
 
+  power-domains:
+    maxItems: 1
+
 required:
   - compatible
   - reg
@@ -134,7 +137,7 @@ examples:
         ranges = <0x43000000 0x6 0xa0000000 0x6 0xa0000000 0x0 0x20000000>,
                  <0x02000000 0x0 0xc0000000 0x6 0xc0000000 0x0 0x40000000>;
 
-        power-domains = <&ps_apcie>, <&ps_apcie_gp>, <&ps_pcie_ref>;
+        power-domains = <&ps_apcie_gp>;
         pinctrl-0 = <&pcie_pins>;
         pinctrl-names = "default";
 
index f5926d0..638b99d 100644 (file)
@@ -51,6 +51,19 @@ properties:
   phy-names:
     const: pcie-phy
 
+  interrupt-controller:
+    type: object
+    additionalProperties: false
+
+    properties:
+      interrupt-controller: true
+
+      '#interrupt-cells':
+        const: 1
+
+      interrupts:
+        maxItems: 1
+
 required:
   - compatible
   - reg
@@ -62,6 +75,13 @@ unevaluatedProperties: false
 
 examples:
   - |
+    bus {
+        gic: interrupt-controller {
+            interrupt-controller;
+            #interrupt-cells = <3>;
+        };
+    };
+
     pcie: pcie@66000000 {
         compatible = "socionext,uniphier-pcie";
         reg-names = "dbi", "link", "config";
@@ -80,6 +100,7 @@ examples:
         phys = <&pcie_phy>;
         #interrupt-cells = <1>;
         interrupt-names = "dma", "msi";
+        interrupt-parent = <&gic>;
         interrupts = <0 224 4>, <0 225 4>;
         interrupt-map-mask = <0 0 0  7>;
         interrupt-map = <0 0 0  1  &pcie_intc 0>,
@@ -87,7 +108,7 @@ examples:
                         <0 0 0  3  &pcie_intc 2>,
                         <0 0 0  4  &pcie_intc 3>;
 
-        pcie_intc: legacy-interrupt-controller {
+        pcie_intc: interrupt-controller {
             interrupt-controller;
             #interrupt-cells = <1>;
             interrupt-parent = <&gic>;
index 32f4641..cca3953 100644 (file)
@@ -18,13 +18,13 @@ properties:
 
   reg:
     items:
-      - description: Configuration space region and bridge registers.
       - description: CPM system level control and status registers.
+      - description: Configuration space region and bridge registers.
 
   reg-names:
     items:
-      - const: cfg
       - const: cpm_slcr
+      - const: cfg
 
   interrupts:
     maxItems: 1
@@ -86,9 +86,9 @@ examples:
                        ranges = <0x02000000 0x0 0xe0000000 0x0 0xe0000000 0x0 0x10000000>,
                                 <0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
                        msi-map = <0x0 &its_gic 0x0 0x10000>;
-                       reg = <0x6 0x00000000 0x0 0x10000000>,
-                             <0x0 0xfca10000 0x0 0x1000>;
-                       reg-names = "cfg", "cpm_slcr";
+                       reg = <0x0 0xfca10000 0x0 0x1000>,
+                             <0x6 0x00000000 0x0 0x10000000>;
+                       reg-names = "cpm_slcr", "cfg";
                        pcie_intc_0: interrupt-controller {
                                #address-cells = <0>;
                                #interrupt-cells = <1>;
index d0b541a..22636c9 100644 (file)
@@ -37,6 +37,18 @@ properties:
   resets:
     maxItems: 1
 
+  allwinner,direction:
+    $ref: '/schemas/types.yaml#/definitions/string'
+    description: |
+      Direction of the D-PHY:
+      - "rx" for receiving (e.g. when used with MIPI CSI-2);
+      - "tx" for transmitting (e.g. when used with MIPI DSI).
+
+    enum:
+      - tx
+      - rx
+    default: tx
+
 required:
   - "#phy-cells"
   - compatible
index 2437c36..632d61c 100644 (file)
@@ -45,7 +45,7 @@ additionalProperties: false
 examples:
   - |
     usb2_utmi_host_phy: phy@5f000 {
-      compatible = "marvell,armada-3700-utmi-host-phy";
+      compatible = "marvell,a3700-utmi-host-phy";
       reg = <0x5f000 0x800>;
       marvell,usb-misc-reg = <&usb2_syscon>;
       #phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.txt b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.txt
deleted file mode 100644 (file)
index 9b23407..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-Mixel DSI PHY for i.MX8
-
-The Mixel MIPI-DSI PHY IP block is e.g. found on i.MX8 platforms (along the
-MIPI-DSI IP from Northwest Logic). It represents the physical layer for the
-electrical signals for DSI.
-
-Required properties:
-- compatible: Must be:
-  - "fsl,imx8mq-mipi-dphy"
-- clocks: Must contain an entry for each entry in clock-names.
-- clock-names: Must contain the following entries:
-  - "phy_ref": phandle and specifier referring to the DPHY ref clock
-- reg: the register range of the PHY controller
-- #phy-cells: number of cells in PHY, as defined in
-  Documentation/devicetree/bindings/phy/phy-bindings.txt
-  this must be <0>
-
-Optional properties:
-- power-domains: phandle to power domain
-
-Example:
-       dphy: dphy@30a0030 {
-               compatible = "fsl,imx8mq-mipi-dphy";
-               clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
-               clock-names = "phy_ref";
-               reg = <0x30a00300 0x100>;
-               power-domains = <&pd_mipi0>;
-               #phy-cells = <0>;
-        };
diff --git a/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mixel,mipi-dsi-phy.yaml
new file mode 100644 (file)
index 0000000..786cfd7
--- /dev/null
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mixel,mipi-dsi-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mixel DSI PHY for i.MX8
+
+maintainers:
+  - Guido Günther <agx@sigxcpu.org>
+
+description: |
+  The Mixel MIPI-DSI PHY IP block is e.g. found on i.MX8 platforms (along the
+  MIPI-DSI IP from Northwest Logic). It represents the physical layer for the
+  electrical signals for DSI.
+
+  The Mixel PHY IP block found on i.MX8qxp is a combo PHY that can work
+  in either MIPI-DSI PHY mode or LVDS PHY mode.
+
+properties:
+  compatible:
+    enum:
+      - fsl,imx8mq-mipi-dphy
+      - fsl,imx8qxp-mipi-dphy
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: phy_ref
+
+  assigned-clocks:
+    maxItems: 1
+
+  assigned-clock-parents:
+    maxItems: 1
+
+  assigned-clock-rates:
+    maxItems: 1
+
+  "#phy-cells":
+    const: 0
+
+  fsl,syscon:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: |
+      A phandle which points to Control and Status Registers(CSR) module.
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - "#phy-cells"
+  - power-domains
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: fsl,imx8mq-mipi-dphy
+    then:
+      properties:
+        fsl,syscon: false
+
+      required:
+        - assigned-clocks
+        - assigned-clock-parents
+        - assigned-clock-rates
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: fsl,imx8qxp-mipi-dphy
+    then:
+      properties:
+        assigned-clocks: false
+        assigned-clock-parents: false
+        assigned-clock-rates: false
+
+      required:
+        - fsl,syscon
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/imx8mq-clock.h>
+    dphy: dphy@30a0030 {
+        compatible = "fsl,imx8mq-mipi-dphy";
+        reg = <0x30a00300 0x100>;
+        clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
+        clock-names = "phy_ref";
+        assigned-clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
+        assigned-clock-parents = <&clk IMX8MQ_VIDEO_PLL1_OUT>;
+        assigned-clock-rates = <24000000>;
+        #phy-cells = <0>;
+        power-domains = <&pgc_mipi>;
+    };
index e20d9b0..8b850c5 100644 (file)
@@ -39,6 +39,7 @@ properties:
       - qcom,sdm845-qmp-usb3-phy
       - qcom,sdm845-qmp-usb3-uni-phy
       - qcom,sm6115-qmp-ufs-phy
+      - qcom,sm6350-qmp-ufs-phy
       - qcom,sm8150-qmp-ufs-phy
       - qcom,sm8150-qmp-usb3-phy
       - qcom,sm8150-qmp-usb3-uni-phy
@@ -57,6 +58,7 @@ properties:
       - qcom,sm8450-qmp-usb3-phy
       - qcom,sdx55-qmp-pcie-phy
       - qcom,sdx55-qmp-usb3-uni-phy
+      - qcom,sdx65-qmp-usb3-uni-phy
 
   reg:
     minItems: 1
@@ -163,6 +165,7 @@ allOf:
           contains:
             enum:
               - qcom,sdx55-qmp-usb3-uni-phy
+              - qcom,sdx65-qmp-usb3-uni-phy
     then:
       properties:
         clocks:
@@ -279,6 +282,7 @@ allOf:
             enum:
               - qcom,msm8998-qmp-ufs-phy
               - qcom,sdm845-qmp-ufs-phy
+              - qcom,sm6350-qmp-ufs-phy
               - qcom,sm8150-qmp-ufs-phy
               - qcom,sm8250-qmp-ufs-phy
               - qcom,sc8180x-qmp-ufs-phy
index 16807bb..f82649a 100644 (file)
@@ -32,6 +32,7 @@ properties:
 
       - items:
           - enum:
+              - renesas,usb2-phy-r9a07g043 # RZ/G2UL
               - renesas,usb2-phy-r9a07g044 # RZ/G2{L,LC}
               - renesas,usb2-phy-r9a07g054 # RZ/V2L
           - const: renesas,rzg2l-usb2-phy
index 3b400a8..a3cd45a 100644 (file)
@@ -30,32 +30,79 @@ properties:
     minItems: 1
     maxItems: 2
 
-  clock-names:
-    oneOf:
-      - items:          # for PXs2
-          - const: link
-      - items:          # for Pro4
-          - const: link
-          - const: gio
-      - items:          # for others
-          - const: link
-          - const: phy
+  clock-names: true
 
   resets:
     minItems: 2
-    maxItems: 5
+    maxItems: 6
 
-  reset-names:
-    oneOf:
-      - items:          # for Pro4
-          - const: link
-          - const: gio
-          - const: pm
-          - const: tx
-          - const: rx
-      - items:          # for others
-          - const: link
-          - const: phy
+  reset-names: true
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pro4-ahci-phy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: link
+            - const: gio
+        resets:
+          minItems: 6
+          maxItems: 6
+        reset-names:
+          items:
+            - const: link
+            - const: gio
+            - const: phy
+            - const: pm
+            - const: tx
+            - const: rx
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pxs2-ahci-phy
+    then:
+      properties:
+        clocks:
+          maxItems: 1
+        clock-names:
+          const: link
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: link
+            - const: phy
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pxs3-ahci-phy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: link
+            - const: phy
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: link
+            - const: phy
 
 required:
   - compatible
index fbb71d6..b3ed2f7 100644 (file)
@@ -31,28 +31,51 @@ properties:
     minItems: 1
     maxItems: 2
 
-  clock-names:
-    oneOf:
-      - items:            # for Pro5
-          - const: gio
-          - const: link
-      - const: link       # for others
+  clock-names: true
 
   resets:
     minItems: 1
     maxItems: 2
 
-  reset-names:
-    oneOf:
-      - items:            # for Pro5
-          - const: gio
-          - const: link
-      - const: link       # for others
+  reset-names: true
 
   socionext,syscon:
     $ref: /schemas/types.yaml#/definitions/phandle
     description: A phandle to system control to set configurations for phy
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pro5-pcie-phy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: gio
+            - const: link
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: gio
+            - const: link
+    else:
+      properties:
+        clocks:
+          maxItems: 1
+        clock-names:
+          const: link
+        resets:
+          maxItems: 1
+        reset-names:
+          const: link
+
 required:
   - compatible
   - reg
index 479b203..63dab91 100644 (file)
@@ -43,6 +43,9 @@ patternProperties:
       "#phy-cells":
         const: 0
 
+      vbus-supply:
+        description: A phandle to the regulator for USB VBUS, only for USB host
+
     required:
       - reg
       - "#phy-cells"
index 33946ef..21e4414 100644 (file)
@@ -31,27 +31,15 @@ properties:
     const: 0
 
   clocks:
-    minItems: 1
+    minItems: 2
     maxItems: 3
 
-  clock-names:
-    oneOf:
-      - const: link          # for PXs2
-      - items:               # for PXs3 with phy-ext
-          - const: link
-          - const: phy
-          - const: phy-ext
-      - items:               # for others
-          - const: link
-          - const: phy
+  clock-names: true
 
   resets:
     maxItems: 2
 
-  reset-names:
-    items:
-      - const: link
-      - const: phy
+  reset-names: true
 
   vbus-supply:
     description: A phandle to the regulator for USB VBUS
@@ -74,6 +62,77 @@ properties:
       required for each port, if any one is omitted, the trimming data
       of the port will not be set at all.
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pro5-usb3-hsphy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: gio
+            - const: link
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: gio
+            - const: link
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - socionext,uniphier-pxs2-usb3-hsphy
+              - socionext,uniphier-ld20-usb3-hsphy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: link
+            - const: phy
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: link
+            - const: phy
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - socionext,uniphier-pxs3-usb3-hsphy
+              - socionext,uniphier-nx1-usb3-hsphy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 3
+        clock-names:
+          minItems: 2
+          items:
+            - const: link
+            - const: phy
+            - const: phy-ext
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: link
+            - const: phy
+
 required:
   - compatible
   - reg
index 92d46eb..4c26d2d 100644 (file)
@@ -35,33 +35,88 @@ properties:
     minItems: 2
     maxItems: 3
 
-  clock-names:
-    oneOf:
-      - items:             # for Pro4, Pro5
-          - const: gio
-          - const: link
-      - items:             # for PXs3 with phy-ext
-          - const: link
-          - const: phy
-          - const: phy-ext
-      - items:             # for others
-          - const: link
-          - const: phy
+  clock-names: true
 
   resets:
     maxItems: 2
 
-  reset-names:
-    oneOf:
-      - items:              # for Pro4,Pro5
-          - const: gio
-          - const: link
-      - items:              # for others
-          - const: link
-          - const: phy
+  reset-names: true
 
   vbus-supply:
-    description: A phandle to the regulator for USB VBUS
+    description: A phandle to the regulator for USB VBUS, only for USB host
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - socionext,uniphier-pro4-usb3-ssphy
+              - socionext,uniphier-pro5-usb3-ssphy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: gio
+            - const: link
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: gio
+            - const: link
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - socionext,uniphier-pxs2-usb3-ssphy
+              - socionext,uniphier-ld20-usb3-ssphy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: link
+            - const: phy
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: link
+            - const: phy
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - socionext,uniphier-pxs3-usb3-ssphy
+              - socionext,uniphier-nx1-usb3-ssphy
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 3
+        clock-names:
+          minItems: 2
+          items:
+            - const: link
+            - const: phy
+            - const: phy-ext
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: link
+            - const: phy
 
 required:
   - compatible
@@ -71,7 +126,6 @@ required:
   - clock-names
   - resets
   - reset-names
-  - vbus-supply
 
 additionalProperties: false
 
diff --git a/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml b/Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
new file mode 100644 (file)
index 0000000..ab45df8
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/atmel,at91sam-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel/Microchip PWM controller
+
+maintainers:
+  - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+allOf:
+  - $ref: "pwm.yaml#"
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - atmel,at91sam9rl-pwm
+              - atmel,sama5d3-pwm
+              - atmel,sama5d2-pwm
+              - microchip,sam9x60-pwm
+      - items:
+          - const: microchip,sama7g5-pwm
+          - const: atmel,sama5d2-pwm
+
+  reg:
+    maxItems: 1
+
+  "#pwm-cells":
+    const: 3
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    pwm0: pwm@f8034000 {
+        compatible = "atmel,at91sam9rl-pwm";
+        reg = <0xf8034000 0x400>;
+        #pwm-cells = <3>;
+    };
diff --git a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
deleted file mode 100644 (file)
index fbb5325..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-Atmel PWM controller
-
-Required properties:
-  - compatible: should be one of:
-    - "atmel,at91sam9rl-pwm"
-    - "atmel,sama5d3-pwm"
-    - "atmel,sama5d2-pwm"
-    - "microchip,sam9x60-pwm"
-  - reg: physical base address and length of the controller's registers
-  - #pwm-cells: Should be 3. See pwm.yaml in this directory for a
-    description of the cells format.
-
-Example:
-
-       pwm0: pwm@f8034000 {
-               compatible = "atmel,at91sam9rl-pwm";
-               reg = <0xf8034000 0x400>;
-               #pwm-cells = <3>;
-       };
-
-       pwmleds {
-               compatible = "pwm-leds";
-
-               d1 {
-                       label = "d1";
-                       pwms = <&pwm0 3 5000 0>
-                       max-brightness = <255>;
-               };
-
-               d2 {
-                       label = "d2";
-                       pwms = <&pwm0 1 5000 1>
-                       max-brightness = <255>;
-               };
-       };
index 7ab6912..c8577bd 100644 (file)
@@ -21,7 +21,14 @@ allOf:
 
 properties:
   compatible:
-    const: google,cros-ec-pwm
+    oneOf:
+      - description: PWM controlled using EC_PWM_TYPE_GENERIC channels.
+        items:
+          - const: google,cros-ec-pwm
+      - description: PWM controlled using CROS_EC_PWM_DT_<...> types.
+        items:
+          - const: google,cros-ec-pwm-type
+
   "#pwm-cells":
     description: The cell specifies the PWM index.
     const: 1
diff --git a/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml b/Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
new file mode 100644 (file)
index 0000000..e4fe2d1
--- /dev/null
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/mediatek,pwm-disp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek DISP_PWM Controller Device Tree Bindings
+
+maintainers:
+  - Jitao Shi <jitao.shi@mediatek.com>
+  - Xinlei Lee <xinlei.lee@mediatek.com>
+
+allOf:
+  - $ref: pwm.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - enum:
+          - mediatek,mt2701-disp-pwm
+          - mediatek,mt6595-disp-pwm
+          - mediatek,mt8173-disp-pwm
+          - mediatek,mt8183-disp-pwm
+      - items:
+          - const: mediatek,mt8167-disp-pwm
+          - const: mediatek,mt8173-disp-pwm
+      - items:
+          - enum:
+              - mediatek,mt8186-disp-pwm
+              - mediatek,mt8192-disp-pwm
+              - mediatek,mt8195-disp-pwm
+          - const: mediatek,mt8183-disp-pwm
+
+  reg:
+    maxItems: 1
+
+  "#pwm-cells":
+    const: 2
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Main Clock
+      - description: Mm Clock
+
+  clock-names:
+    items:
+      - const: main
+      - const: mm
+
+required:
+  - compatible
+  - reg
+  - "#pwm-cells"
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/mt8173-clk.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    pwm0: pwm@1401e000 {
+        compatible = "mediatek,mt8173-disp-pwm";
+        reg = <0x1401e000 0x1000>;
+        #pwm-cells = <2>;
+        clocks = <&mmsys CLK_MM_DISP_PWM026M>,
+                 <&mmsys CLK_MM_DISP_PWM0MM>;
+        clock-names = "main", "mm";
+    };
index 25ed214..033d1fc 100644 (file)
@@ -3,6 +3,7 @@ MediaTek PWM controller
 Required properties:
  - compatible: should be "mediatek,<name>-pwm":
    - "mediatek,mt2712-pwm": found on mt2712 SoC.
+   - "mediatek,mt6795-pwm": found on mt6795 SoC.
    - "mediatek,mt7622-pwm": found on mt7622 SoC.
    - "mediatek,mt7623-pwm": found on mt7623 SoC.
    - "mediatek,mt7628-pwm": found on mt7628 SoC.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt b/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
deleted file mode 100644 (file)
index 691e58b..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-MediaTek display PWM controller
-
-Required properties:
- - compatible: should be "mediatek,<name>-disp-pwm":
-   - "mediatek,mt2701-disp-pwm": found on mt2701 SoC.
-   - "mediatek,mt6595-disp-pwm": found on mt6595 SoC.
-   - "mediatek,mt8167-disp-pwm", "mediatek,mt8173-disp-pwm": found on mt8167 SoC.
-   - "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
-   - "mediatek,mt8183-disp-pwm": found on mt8183 SoC.$
- - reg: physical base address and length of the controller's registers.
- - #pwm-cells: must be 2. See pwm.yaml in this directory for a description of
-   the cell format.
- - clocks: phandle and clock specifier of the PWM reference clock.
- - clock-names: must contain the following:
-   - "main": clock used to generate PWM signals.
-   - "mm": sync signals from the modules of mmsys.
- - pinctrl-names: Must contain a "default" entry.
- - pinctrl-0: One property must exist for each entry in pinctrl-names.
-   See pinctrl/pinctrl-bindings.txt for details of the property values.
-
-Example:
-       pwm0: pwm@1401e000 {
-               compatible = "mediatek,mt8173-disp-pwm",
-                            "mediatek,mt6595-disp-pwm";
-               reg = <0 0x1401e000 0 0x1000>;
-               #pwm-cells = <2>;
-               clocks = <&mmsys CLK_MM_DISP_PWM026M>,
-                        <&mmsys CLK_MM_DISP_PWM0MM>;
-               clock-names = "main", "mm";
-               pinctrl-names = "default";
-               pinctrl-0 = <&disp_pwm0_pins>;
-       };
-
-       backlight_lcd: backlight_lcd {
-               compatible = "pwm-backlight";
-               pwms = <&pwm0 0 1000000>;
-               brightness-levels = <
-                         0  16  32  48  64  80  96 112
-                       128 144 160 176 192 208 224 240
-                       255
-               >;
-               default-brightness-level = <9>;
-               power-supply = <&mt6397_vio18_reg>;
-               enable-gpios = <&pio 95 GPIO_ACTIVE_HIGH>;
-       };
diff --git a/Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml b/Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml
new file mode 100644 (file)
index 0000000..d4fc9e8
--- /dev/null
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) Sunplus Co., Ltd. 2021
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/sunplus,sp7021-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sunplus SoC SP7021 PWM Controller
+
+maintainers:
+  - Hammer Hsieh <hammerh0314@gmail.com>
+
+allOf:
+  - $ref: pwm.yaml#
+
+properties:
+  compatible:
+    const: sunplus,sp7021-pwm
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  '#pwm-cells':
+    const: 2
+
+unevaluatedProperties: false
+
+required:
+  - reg
+  - clocks
+
+examples:
+  - |
+    pwm: pwm@9c007a00 {
+      compatible = "sunplus,sp7021-pwm";
+      reg = <0x9c007a00 0x80>;
+      clocks = <&clkc 0xa2>;
+      #pwm-cells = <2>;
+    };
index 5d2d989..37402c3 100644 (file)
@@ -55,7 +55,7 @@ examples:
           regulator-min-microvolt = <300000>;
           regulator-max-microvolt = <1193750>;
           regulator-enable-ramp-delay = <256>;
-          regulator-allowed-modes = <0 1 2 4>;
+          regulator-allowed-modes = <0 1 2>;
         };
 
         vbuck3 {
@@ -63,7 +63,7 @@ examples:
           regulator-min-microvolt = <300000>;
           regulator-max-microvolt = <1193750>;
           regulator-enable-ramp-delay = <256>;
-          regulator-allowed-modes = <0 1 2 4>;
+          regulator-allowed-modes = <0 1 2>;
         };
       };
     };
index fc16d90..3a1f59a 100644 (file)
@@ -15,14 +15,15 @@ maintainers:
 properties:
   compatible:
     enum:
-      - fsl,imx8mq-cm4
+      - fsl,imx6sx-cm4
+      - fsl,imx7d-cm4
+      - fsl,imx7ulp-cm4
       - fsl,imx8mm-cm4
       - fsl,imx8mn-cm7
       - fsl,imx8mp-cm7
+      - fsl,imx8mq-cm4
       - fsl,imx8ulp-cm33
-      - fsl,imx7d-cm4
-      - fsl,imx7ulp-cm4
-      - fsl,imx6sx-cm4
+      - fsl,imx93-cm33
 
   clocks:
     maxItems: 1
index 5b693a2..eec3b9c 100644 (file)
@@ -23,11 +23,13 @@ properties:
 
   reg:
     description:
-      Should contain the address ranges for memory regions SRAM, CFG, and
-      L1TCM.
+      Should contain the address ranges for memory regions SRAM, CFG, and,
+      on some platforms, L1TCM.
+    minItems: 2
     maxItems: 3
 
   reg-names:
+    minItems: 2
     items:
       - const: sram
       - const: cfg
@@ -42,21 +44,48 @@ properties:
   clock-names:
     const: main
 
+  interrupts:
+    maxItems: 1
+
+  firmware-name:
+    $ref: /schemas/types.yaml#/definitions/string
+    description:
+      If present, name (or relative path) of the file within the
+      firmware search path containing the firmware image used when
+      initializing SCP.
+
+  memory-region:
+    maxItems: 1
+
 required:
   - compatible
   - reg
   - reg-names
 
-if:
-  properties:
-    compatible:
-      enum:
-        - mediatek,mt8183-scp
-        - mediatek,mt8192-scp
-then:
-  required:
-    - clocks
-    - clock-names
+allOf:
+  - if:
+      properties:
+        compatible:
+          enum:
+            - mediatek,mt8183-scp
+            - mediatek,mt8192-scp
+    then:
+      required:
+        - clocks
+        - clock-names
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - mediatek,mt8183-scp
+            - mediatek,mt8186-scp
+    then:
+      properties:
+        reg:
+          maxItems: 2
+        reg-names:
+          maxItems: 2
 
 additionalProperties:
   type: object
@@ -76,10 +105,10 @@ additionalProperties:
 
 examples:
   - |
-    #include <dt-bindings/clock/mt8183-clk.h>
+    #include <dt-bindings/clock/mt8192-clk.h>
 
     scp@10500000 {
-        compatible = "mediatek,mt8183-scp";
+        compatible = "mediatek,mt8192-scp";
         reg = <0x10500000 0x80000>,
               <0x10700000 0x8000>,
               <0x10720000 0xe0000>;
index a4409c3..947f945 100644 (file)
@@ -16,6 +16,7 @@ description:
 properties:
   compatible:
     enum:
+      - qcom,msm8226-adsp-pil
       - qcom,msm8974-adsp-pil
       - qcom,msm8996-adsp-pil
       - qcom,msm8996-slpi-pil
@@ -29,6 +30,9 @@ properties:
       - qcom,sc8180x-adsp-pas
       - qcom,sc8180x-cdsp-pas
       - qcom,sc8180x-mpss-pas
+      - qcom,sc8280xp-adsp-pas
+      - qcom,sc8280xp-nsp0-pas
+      - qcom,sc8280xp-nsp1-pas
       - qcom,sdm660-adsp-pas
       - qcom,sdm845-adsp-pas
       - qcom,sdm845-cdsp-pas
@@ -159,6 +163,7 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,msm8226-adsp-pil
               - qcom,msm8974-adsp-pil
               - qcom,msm8996-adsp-pil
               - qcom,msm8996-slpi-pil
@@ -169,6 +174,9 @@ allOf:
               - qcom,sc8180x-adsp-pas
               - qcom,sc8180x-cdsp-pas
               - qcom,sc8180x-mpss-pas
+              - qcom,sc8280xp-adsp-pas
+              - qcom,sc8280xp-nsp0-pas
+              - qcom,sc8280xp-nsp1-pas
               - qcom,sdm845-adsp-pas
               - qcom,sdm845-cdsp-pas
               - qcom,sm6350-adsp-pas
@@ -274,6 +282,7 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,msm8226-adsp-pil
               - qcom,msm8974-adsp-pil
               - qcom,msm8996-adsp-pil
               - qcom,msm8996-slpi-pil
@@ -284,6 +293,9 @@ allOf:
               - qcom,qcs404-wcss-pas
               - qcom,sc8180x-adsp-pas
               - qcom,sc8180x-cdsp-pas
+              - qcom,sc8280xp-adsp-pas
+              - qcom,sc8280xp-nsp0-pas
+              - qcom,sc8280xp-nsp1-pas
               - qcom,sdm845-adsp-pas
               - qcom,sdm845-cdsp-pas
               - qcom,sm6350-adsp-pas
@@ -364,6 +376,7 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,msm8226-adsp-pil
               - qcom,msm8996-adsp-pil
               - qcom,msm8998-adsp-pas
     then:
@@ -471,6 +484,7 @@ allOf:
             enum:
               - qcom,sc8180x-adsp-pas
               - qcom,sc8180x-cdsp-pas
+              - qcom,sc8280xp-adsp-pas
               - qcom,sm6350-adsp-pas
               - qcom,sm8150-slpi-pas
               - qcom,sm8250-adsp-pas
@@ -513,6 +527,22 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,sc8280xp-nsp0-pas
+              - qcom,sc8280xp-nsp1-pas
+    then:
+      properties:
+        power-domains:
+          items:
+            - description: NSP power domain
+        power-domain-names:
+          items:
+            - const: nsp
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
               - qcom,qcs404-cdsp-pas
     then:
       properties:
@@ -546,6 +576,7 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,msm8226-adsp-pil
               - qcom,msm8974-adsp-pil
               - qcom,msm8996-adsp-pil
               - qcom,msm8996-slpi-pil
index be3d9b0..da50f0e 100644 (file)
@@ -43,8 +43,8 @@ properties:
     items:
       - items:
           - description: Phandle of syscon block
-          - description: FIXME
-          - description: FIXME
+          - description: The offset of the trust zone setting register
+          - description: The field mask of the trust zone state
 
   interrupts:
     description: Should contain the WWDG1 watchdog reset interrupt
@@ -101,8 +101,8 @@ properties:
     items:
       - items:
           - description: Phandle of syscon block
-          - description: FIXME
-          - description: FIXME
+          - description: The offset of the power setting register
+          - description: The field mask of the PDDS selection
 
   st,syscfg-m4-state:
     $ref: "/schemas/types.yaml#/definitions/phandle-array"
@@ -111,8 +111,8 @@ properties:
     items:
       - items:
           - description: Phandle of syscon block with the tamp register
-          - description: FIXME
-          - description: FIXME
+          - description: The offset of the tamp register
+          - description: The field mask of the Cortex-M4 state
 
   st,syscfg-rsc-tbl:
     $ref: "/schemas/types.yaml#/definitions/phandle-array"
@@ -122,8 +122,8 @@ properties:
     items:
       - items:
           - description: Phandle of syscon block with the tamp register
-          - description: FIXME
-          - description: FIXME
+          - description: The offset of the tamp register
+          - description: The field mask of the Cortex-M4 resource table address
 
   st,auto-boot:
     $ref: /schemas/types.yaml#/definitions/flag
index a054757..d92e2b3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm AOSS Reset Controller
 
 maintainers:
-  - Sibi Sankar <sibis@codeaurora.org>
+  - Sibi Sankar <quic_sibis@quicinc.com>
 
 description:
   The bindings describe the reset-controller found on AOSS-CC (always on
index 831ea8d..ca5d793 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm PDC Global
 
 maintainers:
-  - Sibi Sankar <sibis@codeaurora.org>
+  - Sibi Sankar <quic_sibis@quicinc.com>
 
 description:
   The bindings describes the reset-controller found on PDC-Global (Power Domain
index 3f981e8..1aa7336 100644 (file)
@@ -20,6 +20,8 @@ properties:
     items:
       - enum:
           - microchip,mpfs-icicle-kit
+          - microchip,mpfs-icicle-reference-rtlv2203
+          - sundance,polarberry
       - const: microchip,mpfs
 
 additionalProperties: true
index 6439682..217b7cd 100644 (file)
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible: Should one of contain:
+       "nxp,pca85073a",
        "nxp,pcf85063",
        "nxp,pcf85063a",
        "nxp,pcf85063tp",
diff --git a/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml b/Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml
new file mode 100644 (file)
index 0000000..2d4741f
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/renesas,rzn1-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/N1 SoCs Real-Time Clock DT bindings
+
+maintainers:
+  - Miquel Raynal <miquel.raynal@bootlin.com>
+
+allOf:
+  - $ref: rtc.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - renesas,r9a06g032-rtc
+      - const: renesas,rzn1-rtc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    minItems: 3
+    maxItems: 3
+
+  interrupt-names:
+    items:
+      - const: alarm
+      - const: timer
+      - const: pps
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: hclk
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-names
+  - clocks
+  - clock-names
+  - power-domains
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/r9a06g032-sysctrl.h>
+    rtc@40006000 {
+       compatible = "renesas,r9a06g032-rtc", "renesas,rzn1-rtc";
+       reg = <0x40006000 0x1000>;
+       interrupts = <GIC_SPI 66 IRQ_TYPE_EDGE_RISING>,
+                    <GIC_SPI 67 IRQ_TYPE_EDGE_RISING>,
+                    <GIC_SPI 68 IRQ_TYPE_EDGE_RISING>;
+       interrupt-names = "alarm", "timer", "pps";
+       clocks = <&sysctrl R9A06G032_HCLK_RTC>;
+       clock-names = "hclk";
+       power-domains = <&sysctrl>;
+       start-year = <2000>;
+     };
index ff364bd..30eaa62 100644 (file)
@@ -23,7 +23,9 @@ properties:
           - fsl,imx8qxp-lpuart
           - fsl,imxrt1050-lpuart
       - items:
-          - const: fsl,imx8ulp-lpuart
+          - enum:
+              - fsl,imx93-lpuart
+              - fsl,imx8ulp-lpuart
           - const: fsl,imx7ulp-lpuart
       - items:
           - enum:
diff --git a/Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml b/Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml
new file mode 100644 (file)
index 0000000..05a6999
--- /dev/null
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/qcom,serial-geni-qcom.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Geni based QUP UART interface
+
+maintainers:
+  - Andy Gross <agross@kernel.org>
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+allOf:
+  - $ref: /schemas/serial/serial.yaml#
+
+properties:
+  compatible:
+    enum:
+      - qcom,geni-uart
+      - qcom,geni-debug-uart
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: se
+
+  interconnects:
+    maxItems: 2
+
+  interconnect-names:
+    items:
+      - const: qup-core
+      - const: qup-config
+
+  interrupts:
+    minItems: 1
+    items:
+      - description: UART core irq
+      - description: Wakeup irq (RX GPIO)
+
+  operating-points-v2: true
+
+  pinctrl-0: true
+  pinctrl-1: true
+
+  pinctrl-names:
+    minItems: 1
+    items:
+      - const: default
+      - const: sleep
+
+  power-domains:
+    maxItems: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+  - interrupts
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+    #include <dt-bindings/interconnect/qcom,sc7180.h>
+
+    serial@a88000 {
+        compatible = "qcom,geni-uart";
+        reg = <0xa88000 0x7000>;
+        interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+        clock-names = "se";
+        clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+        pinctrl-0 = <&qup_uart0_default>;
+        pinctrl-names = "default";
+        interconnects = <&qup_virt MASTER_QUP_CORE_0 0 &qup_virt SLAVE_QUP_CORE_0 0>,
+                        <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_0 0>;
+        interconnect-names = "qup-core", "qup-config";
+    };
+...
index e98ec48..b25aca7 100644 (file)
@@ -9,12 +9,16 @@ title: Renesas EMMA Mobile UART Interface
 maintainers:
   - Magnus Damm <magnus.damm@gmail.com>
 
-allOf:
-  - $ref: serial.yaml#
-
 properties:
   compatible:
-    const: renesas,em-uart
+    oneOf:
+      - items:
+          - enum:
+              - renesas,r9a09g011-uart    # RZ/V2M
+          - const: renesas,em-uart        # generic EMMA Mobile compatible UART
+
+      - items:
+          - const: renesas,em-uart        # generic EMMA Mobile compatible UART
 
   reg:
     maxItems: 1
@@ -23,10 +27,31 @@ properties:
     maxItems: 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    items:
+      - description: UART functional clock
+      - description: Internal clock to access the registers
 
   clock-names:
-    const: sclk
+    minItems: 1
+    items:
+      - const: sclk
+      - const: pclk
+
+allOf:
+  - $ref: serial.yaml#
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: renesas,r9a09g011-uart
+    then:
+      properties:
+        clocks:
+          minItems: 2
+        clock-names:
+          minItems: 2
 
 required:
   - compatible
index ee9804c..87180d9 100644 (file)
@@ -51,10 +51,16 @@ properties:
               - renesas,hscif-r8a77980     # R-Car V3H
               - renesas,hscif-r8a77990     # R-Car E3
               - renesas,hscif-r8a77995     # R-Car D3
-              - renesas,hscif-r8a779a0     # R-Car V3U
           - const: renesas,rcar-gen3-hscif # R-Car Gen3 and RZ/G2
           - const: renesas,hscif           # generic HSCIF compatible UART
 
+      - items:
+          - enum:
+              - renesas,hscif-r8a779a0     # R-Car V3U
+              - renesas,hscif-r8a779g0     # R-Car V4H
+          - const: renesas,rcar-gen4-hscif # R-Car Gen4
+          - const: renesas,hscif           # generic HSCIF compatible UART
+
   reg:
     maxItems: 1
 
@@ -113,6 +119,7 @@ if:
         enum:
           - renesas,rcar-gen2-hscif
           - renesas,rcar-gen3-hscif
+          - renesas,rcar-gen4-hscif
 then:
   required:
     - resets
index 5d37f8f..90fe452 100644 (file)
@@ -60,12 +60,12 @@ properties:
               - renesas,scif-r8a77980     # R-Car V3H
               - renesas,scif-r8a77990     # R-Car E3
               - renesas,scif-r8a77995     # R-Car D3
-              - renesas,scif-r8a779a0     # R-Car V3U
           - const: renesas,rcar-gen3-scif # R-Car Gen3 and RZ/G2
           - const: renesas,scif           # generic SCIF compatible UART
 
       - items:
           - enum:
+              - renesas,scif-r8a779a0     # R-Car V3U
               - renesas,scif-r8a779f0     # R-Car S4-8
           - const: renesas,rcar-gen4-scif # R-Car Gen4
           - const: renesas,scif           # generic SCIF compatible UART
index 0c9fa69..f2c9c9f 100644 (file)
@@ -33,6 +33,11 @@ properties:
     description: drive RTS low when sending (default is high).
     $ref: /schemas/types.yaml#/definitions/flag
 
+  rs485-rx-active-high:
+    description: Polarity of receiver enable signal (when separate from RTS).
+      True indicates active high (default is low).
+    $ref: /schemas/types.yaml#/definitions/flag
+
   linux,rs485-enabled-at-boot-time:
     description: enables the rs485 feature at boot time. It can be disabled
       later with proper ioctl.
index d490c7c..3d01cc3 100644 (file)
@@ -20,7 +20,10 @@ properties:
     maxItems: 1
 
   clocks:
-    minItems: 1
+    maxItems: 1
+
+  resets:
+    maxItems: 1
 
   auto-flow-control:
     description: enable automatic flow control support.
index 21d3ee4..b246d83 100644 (file)
@@ -88,7 +88,7 @@ examples:
                         <&mediamix_pd>, <&ispdwp_pd>, <&ispdwp_pd>,
                         <&mipi_phy2_pd>;
         power-domain-names = "bus", "mipi-dsi1", "mipi-csi1", "lcdif1", "isi",
-                             "mipi-csi2", "lcdif2", "isp1", "dwe", "mipi-dsi2";
+                             "mipi-csi2", "lcdif2", "isp", "dwe", "mipi-dsi2";
         clocks = <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
                  <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>,
                  <&clk IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT>,
diff --git a/Documentation/devicetree/bindings/soc/intel/intel,hps-copy-engine.yaml b/Documentation/devicetree/bindings/soc/intel/intel,hps-copy-engine.yaml
new file mode 100644 (file)
index 0000000..8634865
--- /dev/null
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2022, Intel Corporation
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/intel/intel,hps-copy-engine.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel HPS Copy Engine
+
+maintainers:
+  - Matthew Gerlach <matthew.gerlach@linux.intel.com>
+
+description: |
+  The Intel Hard Processor System (HPS) Copy Engine is an IP block used to copy
+  a bootable image from host memory to HPS DDR.  Additionally, there is a
+  register the HPS can use to indicate the state of booting the copied image as
+  well as a keep-a-live indication to the host.
+
+properties:
+  compatible:
+    const: intel,hps-copy-engine
+
+  '#dma-cells':
+    const: 1
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    bus@80000000 {
+        compatible = "simple-bus";
+        reg = <0x80000000 0x60000000>,
+              <0xf9000000 0x00100000>;
+        reg-names = "axi_h2f", "axi_h2f_lw";
+        #address-cells = <2>;
+        #size-cells = <1>;
+        ranges = <0x00000000 0x00000000 0xf9000000 0x00001000>;
+
+        dma-controller@0 {
+            compatible = "intel,hps-copy-engine";
+            reg = <0x00000000 0x00000000 0x00001000>;
+            #dma-cells = <1>;
+        };
+    };
index e6f9ffa..bca07bb 100644 (file)
@@ -66,9 +66,7 @@ patternProperties:
           The identifier for the remote processor as known by the rest of the
           system.
 
-    # Binding for edge subnodes is not complete
-    patternProperties:
-      "^rpm-requests$":
+      rpm-requests:
         type: object
         description:
           In turn, subnodes of the "edges" represent devices tied to SMD
index 51ddbc5..c85c257 100644 (file)
@@ -162,6 +162,18 @@ board specific bus parameters.
                    or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
+- reset:
+       Usage: optional
+       Value type: <prop-encoded-array>
+       Definition: Should specify the SoundWire audio CSR reset controller interface,
+                   which is required for SoundWire version 1.6.0 and above.
+
+- reset-names:
+       Usage: optional
+       Value type: <stringlist>
+       Definition: should be "swr_audio_cgcr" for SoundWire audio CSR reset
+                   controller interface.
+
 Note:
        More Information on detail of encoding of these fields can be
 found in MIPI Alliance SoundWire 1.0 Specifications.
@@ -180,6 +192,8 @@ soundwire: soundwire@c85 {
        interrupts = <20 IRQ_TYPE_EDGE_RISING>;
        clocks = <&wcc>;
        clock-names = "iface";
+       resets = <&lpass_audiocc LPASS_AUDIO_SWR_TX_CGCR>;
+       reset-names = "swr_audio_cgcr";
        #sound-dai-cells = <1>;
        qcom,dports-type = <0>;
        qcom,dout-ports = <6>;
diff --git a/Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml b/Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
new file mode 100644 (file)
index 0000000..d33d90f
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/hpe,gxp-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HPE GXP Timer
+
+maintainers:
+  - Nick Hawkins <nick.hawkins@hpe.com>
+  - Jean-Marie Verdun <verdun@hpe.com>
+
+properties:
+  compatible:
+    const: hpe,gxp-timer
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: iop
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    timer@c0000000 {
+        compatible = "hpe,gxp-timer";
+        reg = <0x80 0x16>;
+        interrupts = <0>;
+        interrupt-parent = <&vic0>;
+        clocks = <&iopclk>;
+        clock-names = "iop";
+    };
index fbd76a8..6f1f9db 100644 (file)
@@ -23,6 +23,7 @@ Required properties:
 
        For those SoCs that use SYST
        * "mediatek,mt8183-timer" for MT8183 compatible timers (SYST)
+       * "mediatek,mt8186-timer" for MT8186 compatible timers (SYST)
        * "mediatek,mt8192-timer" for MT8192 compatible timers (SYST)
        * "mediatek,mt8195-timer" for MT8195 compatible timers (SYST)
        * "mediatek,mt7629-timer" for MT7629 compatible timers (SYST)
diff --git a/Documentation/devicetree/bindings/timer/xlnx,xps-timer.yaml b/Documentation/devicetree/bindings/timer/xlnx,xps-timer.yaml
new file mode 100644 (file)
index 0000000..dd168d4
--- /dev/null
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/xlnx,xps-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx LogiCORE IP AXI Timer Device Tree Binding
+
+maintainers:
+  - Sean Anderson <sean.anderson@seco.com>
+
+properties:
+  compatible:
+    contains:
+      const: xlnx,xps-timer-1.00.a
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: s_axi_aclk
+
+  interrupts:
+    maxItems: 1
+
+  reg:
+    maxItems: 1
+
+  '#pwm-cells': true
+
+  xlnx,count-width:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [8, 16, 32]
+    default: 32
+    description:
+      The width of the counter(s), in bits.
+
+  xlnx,one-timer-only:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [ 0, 1 ]
+    description:
+      Whether only one timer is present in this block.
+
+required:
+  - compatible
+  - reg
+  - xlnx,one-timer-only
+
+allOf:
+  - if:
+      required:
+        - '#pwm-cells'
+    then:
+      allOf:
+        - required:
+            - clocks
+        - properties:
+            xlnx,one-timer-only:
+              const: 0
+    else:
+      required:
+        - interrupts
+  - if:
+      required:
+        - clocks
+    then:
+      required:
+        - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    timer@800e0000 {
+        clock-names = "s_axi_aclk";
+        clocks = <&zynqmp_clk 71>;
+        compatible = "xlnx,xps-timer-1.00.a";
+        reg = <0x800e0000 0x10000>;
+        interrupts = <0 39 2>;
+        xlnx,count-width = <16>;
+        xlnx,one-timer-only = <0x0>;
+    };
+
+    timer@800f0000 {
+        #pwm-cells = <0>;
+        clock-names = "s_axi_aclk";
+        clocks = <&zynqmp_clk 71>;
+        compatible = "xlnx,xps-timer-1.00.a";
+        reg = <0x800e0000 0x10000>;
+        xlnx,count-width = <32>;
+        xlnx,one-timer-only = <0x0>;
+    };
diff --git a/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml b/Documentation/devicetree/bindings/timestamp/hardware-timestamps-common.yaml
new file mode 100644 (file)
index 0000000..fd6a7b5
--- /dev/null
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timestamp/hardware-timestamps-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hardware timestamp providers
+
+maintainers:
+  - Dipen Patel <dipenp@nvidia.com>
+
+description:
+  Some devices/SoCs have hardware timestamp engines (HTE) which can use
+  hardware means to timestamp entity in realtime. The entity could be anything
+  from GPIOs, IRQs, Bus and so on. The hardware timestamp engine present
+  itself as a provider with the bindings described in this document.
+
+properties:
+  $nodename:
+    pattern: "^timestamp(@.*|-[0-9a-f])?$"
+
+  "#timestamp-cells":
+    description:
+      Number of cells in a HTE specifier.
+
+required:
+  - "#timestamp-cells"
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/timestamp/hte-consumer.yaml b/Documentation/devicetree/bindings/timestamp/hte-consumer.yaml
new file mode 100644 (file)
index 0000000..6456515
--- /dev/null
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timestamp/hte-consumer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HTE Consumer Device Tree Bindings
+
+maintainers:
+  - Dipen Patel <dipenp@nvidia.com>
+
+select: true
+
+properties:
+  timestamps:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description:
+      The list of HTE provider phandle. The first cell must represent the
+      provider phandle followed by the line identifiers. The meaning of the
+      line identifier and exact number of arguments must be specified in the
+      HTE provider device tree binding document.
+
+  timestamp-names:
+    $ref: /schemas/types.yaml#/definitions/string-array
+    description:
+      An optional string property to label each line specifier present in the
+      timestamp property.
+
+dependencies:
+  timestamp-names: [ timestamps ]
+
+additionalProperties: true
+
+examples:
+  - |
+    hte_tegra_consumer {
+              timestamps = <&tegra_hte_aon 0x9>, <&tegra_hte_lic 0x19>;
+              timestamp-names = "hte-gpio", "hte-i2c";
+    };
diff --git a/Documentation/devicetree/bindings/timestamp/nvidia,tegra194-hte.yaml b/Documentation/devicetree/bindings/timestamp/nvidia,tegra194-hte.yaml
new file mode 100644 (file)
index 0000000..c31e207
--- /dev/null
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timestamp/nvidia,tegra194-hte.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Tegra194 on chip generic hardware timestamping engine (HTE)
+
+maintainers:
+  - Dipen Patel <dipenp@nvidia.com>
+
+description:
+  Tegra SoC has two instances of generic hardware timestamping engines (GTE)
+  known as GTE GPIO and GTE IRQ, which can monitor subset of GPIO and on chip
+  IRQ lines for the state change respectively, upon detection it will record
+  timestamp (taken from system counter) in its internal hardware FIFO. It has
+  a bitmap array arranged in 32bit slices where each bit represent signal/line
+  to enable or disable for the hardware timestamping. The GTE GPIO monitors
+  GPIO lines from the AON (always on) GPIO controller.
+
+properties:
+  compatible:
+    enum:
+      - nvidia,tegra194-gte-aon
+      - nvidia,tegra194-gte-lic
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  nvidia,int-threshold:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      HTE device generates its interrupt based on this u32 FIFO threshold
+      value. The recommended value is 1.
+    minimum: 1
+    maximum: 256
+
+  nvidia,slices:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      HTE lines are arranged in 32 bit slice where each bit represents different
+      line/signal that it can enable/configure for the timestamp. It is u32
+      property and depends on the HTE instance in the chip. The value 3 is for
+      GPIO GTE and 11 for IRQ GTE.
+    enum: [3, 11]
+
+  '#timestamp-cells':
+    description:
+      This represents number of line id arguments as specified by the
+      consumers. For the GTE IRQ, this is IRQ number as mentioned in the
+      SoC technical reference manual. For the GTE GPIO, its value is same as
+      mentioned in the nvidia GPIO device tree binding document.
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - nvidia,slices
+  - "#timestamp-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    tegra_hte_aon: timestamp@c1e0000 {
+              compatible = "nvidia,tegra194-gte-aon";
+              reg = <0xc1e0000 0x10000>;
+              interrupts = <0 13 0x4>;
+              nvidia,int-threshold = <1>;
+              nvidia,slices = <3>;
+              #timestamp-cells = <1>;
+    };
+
+  - |
+    tegra_hte_lic: timestamp@3aa0000 {
+              compatible = "nvidia,tegra194-gte-lic";
+              reg = <0x3aa0000 0x10000>;
+              interrupts = <0 11 0x4>;
+              nvidia,int-threshold = <1>;
+              nvidia,slices = <11>;
+              #timestamp-cells = <1>;
+    };
+
+...
index 7a198a3..654ffc6 100644 (file)
@@ -61,8 +61,9 @@ DMA
   endpoint number (0 … 14 for endpoints 1 … 15 on instance 0 and 15 … 29
   for endpoints 1 … 15 on instance 1). The second number is 0 for RX and
   1 for TX transfers.
-- #dma-channels: should be set to 30 representing the 15 endpoints for
+- dma-channels: should be set to 30 representing the 15 endpoints for
   each USB instance.
+- #dma-channels: deprecated
 
 Example:
 ~~~~~~~~
@@ -193,7 +194,7 @@ usb: usb@47400000 {
                interrupts = <17>;
                interrupt-names = "glue";
                #dma-cells = <2>;
-               #dma-channels = <30>;
-               #dma-requests = <256>;
+               dma-channels = <30>;
+               dma-requests = <256>;
        };
 };
index 9ce2255..fb2027a 100644 (file)
@@ -36,7 +36,8 @@ DMA
 - #dma-cells: should be set to 2. The first number represents the
   channel number (0 … 3 for endpoints 1 … 4).
   The second number is 0 for RX and 1 for TX transfers.
-- #dma-channels: should be set to 4 representing the 4 endpoints.
+- dma-channels: should be set to 4 representing the 4 endpoints.
+- #dma-channels: deprecated
 
 Example:
        usb_phy: usb-phy {
@@ -74,7 +75,7 @@ Example:
                        reg-names = "controller", "scheduler", "queuemgr";
                        interrupts = <58>;
                        #dma-cells = <2>;
-                       #dma-channels = <4>;
+                       dma-channels = <4>;
                };
 
        };
index 17fc471..8d22a98 100644 (file)
@@ -17,6 +17,13 @@ properties:
     oneOf:
       - const: brcm,bcm2835-usb
       - const: hisilicon,hi6220-usb
+      - const: ingenic,jz4775-otg
+      - const: ingenic,jz4780-otg
+      - const: ingenic,x1000-otg
+      - const: ingenic,x1600-otg
+      - const: ingenic,x1700-otg
+      - const: ingenic,x1830-otg
+      - const: ingenic,x2000-otg
       - items:
           - const: rockchip,rk3066-usb
           - const: snps,dwc2
index f77c16e..098b731 100644 (file)
@@ -71,6 +71,10 @@ properties:
         - usb2-phy
         - usb3-phy
 
+  reset-gpios:
+    description: GPIO used for the reset ulpi-phy
+    maxItems: 1
+
 # Required child node:
 
 patternProperties:
diff --git a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
new file mode 100644 (file)
index 0000000..9473f26
--- /dev/null
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/fcs,fsa4480.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: ON Semiconductor Analog Audio Switch
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+properties:
+  compatible:
+    enum:
+      - fcs,fsa4480
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  vcc-supply:
+    description: power supply (2.7V-5.5V)
+
+  mode-switch:
+    description: Flag the port as possible handle of altmode switching
+    type: boolean
+
+  orientation-switch:
+    description: Flag the port as possible handler of orientation switching
+    type: boolean
+
+  port:
+    $ref: /schemas/graph.yaml#/properties/port
+    description:
+      A port node to link the FSA4480 to a TypeC controller for the purpose of
+      handling altmode muxing and orientation switching.
+
+required:
+  - compatible
+  - reg
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c13 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        fsa4480@42 {
+          compatible = "fcs,fsa4480";
+          reg = <0x42>;
+
+          interrupts-extended = <&tlmm 2 IRQ_TYPE_LEVEL_LOW>;
+
+          vcc-supply = <&vreg_bob>;
+
+          mode-switch;
+          orientation-switch;
+
+          port {
+            fsa4480_ept: endpoint {
+              remote-endpoint = <&typec_controller>;
+            };
+          };
+        };
+    };
+...
index 8913497..0b4524b 100644 (file)
@@ -55,6 +55,7 @@ properties:
               - brcm,bcm7420-ehci
               - brcm,bcm7425-ehci
               - brcm,bcm7435-ehci
+              - hpe,gxp-ehci
               - ibm,476gtr-ehci
               - nxp,lpc1850-ehci
               - qca,ar7100-ehci
index acbf94f..e2ac846 100644 (file)
@@ -42,6 +42,7 @@ properties:
               - brcm,bcm7420-ohci
               - brcm,bcm7425-ohci
               - brcm,bcm7435-ohci
+              - hpe,gxp-ohci
               - ibm,476gtr-ohci
               - ingenic,jz4740-ohci
               - snps,hsdk-v1.0-ohci
index df766f8..37b02a8 100644 (file)
@@ -25,6 +25,7 @@ properties:
           - mediatek,mt8173-mtu3
           - mediatek,mt8183-mtu3
           - mediatek,mt8192-mtu3
+          - mediatek,mt8195-mtu3
       - const: mediatek,mtu3
 
   reg:
index ce252db..e336fe2 100644 (file)
@@ -16,16 +16,21 @@ properties:
           - qcom,ipq4019-dwc3
           - qcom,ipq6018-dwc3
           - qcom,ipq8064-dwc3
+          - qcom,ipq8074-dwc3
           - qcom,msm8953-dwc3
+          - qcom,msm8994-dwc3
           - qcom,msm8996-dwc3
           - qcom,msm8998-dwc3
+          - qcom,qcs404-dwc3
           - qcom,sc7180-dwc3
           - qcom,sc7280-dwc3
           - qcom,sdm660-dwc3
           - qcom,sdm845-dwc3
           - qcom,sdx55-dwc3
+          - qcom,sdx65-dwc3
           - qcom,sm4250-dwc3
           - qcom,sm6115-dwc3
+          - qcom,sm6125-dwc3
           - qcom,sm6350-dwc3
           - qcom,sm8150-dwc3
           - qcom,sm8250-dwc3
@@ -50,26 +55,22 @@ properties:
     maxItems: 1
 
   clocks:
-    description:
-      A list of phandle and clock-specifier pairs for the clocks
-      listed in clock-names.
-    items:
-      - description: System Config NOC clock.
-      - description: Master/Core clock, has to be >= 125 MHz
-          for SS operation and >= 60MHz for HS operation.
-      - description: System bus AXI clock.
-      - description: Mock utmi clock needed for ITP/SOF generation
-          in host mode. Its frequency should be 19.2MHz.
-      - description: Sleep clock, used for wakeup when
-          USB3 core goes into low power mode (U3).
+    description: |
+      Several clocks are used, depending on the variant. Typical ones are::
+       - cfg_noc:: System Config NOC clock.
+       - core:: Master/Core clock, has to be >= 125 MHz for SS operation and >=
+                60MHz for HS operation.
+       - iface:: System bus AXI clock.
+       - sleep:: Sleep clock, used for wakeup when USB3 core goes into low
+                 power mode (U3).
+       - mock_utmi:: Mock utmi clock needed for ITP/SOF generation in host
+                     mode. Its frequency should be 19.2MHz.
+    minItems: 1
+    maxItems: 6
 
   clock-names:
-    items:
-      - const: cfg_noc
-      - const: core
-      - const: iface
-      - const: mock_utmi
-      - const: sleep
+    minItems: 1
+    maxItems: 6
 
   assigned-clocks:
     items:
@@ -132,6 +133,185 @@ required:
   - interrupts
   - interrupt-names
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,ipq4019-dwc3
+    then:
+      properties:
+        clocks:
+          maxItems: 3
+        clock-names:
+          items:
+            - const: core
+            - const: sleep
+            - const: mock_utmi
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,ipq8064-dwc3
+    then:
+      properties:
+        clocks:
+          items:
+            - description: Master/Core clock, has to be >= 125 MHz
+                for SS operation and >= 60MHz for HS operation.
+        clock-names:
+          items:
+            - const: core
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,msm8953-dwc3
+              - qcom,msm8996-dwc3
+              - qcom,msm8998-dwc3
+              - qcom,sc7180-dwc3
+              - qcom,sc7280-dwc3
+              - qcom,sdm845-dwc3
+              - qcom,sdx55-dwc3
+              - qcom,sm6350-dwc3
+    then:
+      properties:
+        clocks:
+          maxItems: 5
+        clock-names:
+          items:
+            - const: cfg_noc
+            - const: core
+            - const: iface
+            - const: sleep
+            - const: mock_utmi
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,ipq6018-dwc3
+    then:
+      properties:
+        clocks:
+          minItems: 3
+          maxItems: 4
+        clock-names:
+          oneOf:
+            - items:
+                - const: core
+                - const: sleep
+                - const: mock_utmi
+            - items:
+                - const: cfg_noc
+                - const: core
+                - const: sleep
+                - const: mock_utmi
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,ipq8074-dwc3
+    then:
+      properties:
+        clocks:
+          maxItems: 4
+        clock-names:
+          items:
+            - const: cfg_noc
+            - const: core
+            - const: sleep
+            - const: mock_utmi
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,msm8994-dwc3
+              - qcom,qcs404-dwc3
+    then:
+      properties:
+        clocks:
+          maxItems: 4
+        clock-names:
+          items:
+            - const: core
+            - const: iface
+            - const: sleep
+            - const: mock_utmi
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,sdm660-dwc3
+    then:
+      properties:
+        clocks:
+          minItems: 6
+        clock-names:
+          items:
+            - const: cfg_noc
+            - const: core
+            - const: iface
+            - const: sleep
+            - const: mock_utmi
+            - const: bus
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,sm6125-dwc3
+              - qcom,sm8150-dwc3
+              - qcom,sm8250-dwc3
+              - qcom,sm8450-dwc3
+    then:
+      properties:
+        clocks:
+          minItems: 6
+        clock-names:
+          items:
+            - const: cfg_noc
+            - const: core
+            - const: iface
+            - const: sleep
+            - const: mock_utmi
+            - const: xo
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,sm8350-dwc3
+    then:
+      properties:
+        clocks:
+          minItems: 5
+          maxItems: 6
+        clock-names:
+          minItems: 5
+          items:
+            - const: cfg_noc
+            - const: core
+            - const: iface
+            - const: sleep
+            - const: mock_utmi
+            - const: xo
+
+
 additionalProperties: false
 
 examples:
@@ -153,10 +333,13 @@ examples:
             clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                      <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                      <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                     <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
-                     <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
-            clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                      "sleep";
+                     <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                     <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+            clock-names = "cfg_noc",
+                          "core",
+                          "iface",
+                          "sleep",
+                          "mock_utmi";
 
             assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                           <&gcc GCC_USB30_PRIM_MASTER_CLK>;
index 0bb841b..bad55df 100644 (file)
@@ -19,6 +19,7 @@ properties:
       - items:
           - enum:
               - renesas,usbhs-r7s9210   # RZ/A2
+              - renesas,usbhs-r9a07g043 # RZ/G2UL
               - renesas,usbhs-r9a07g044 # RZ/G2{L,LC}
               - renesas,usbhs-r9a07g054 # RZ/V2L
           - const: renesas,rza2-usbhs
@@ -118,6 +119,7 @@ allOf:
         compatible:
           contains:
             enum:
+              - renesas,usbhs-r9a07g043
               - renesas,usbhs-r9a07g044
               - renesas,usbhs-r9a07g054
     then:
@@ -128,6 +130,8 @@ allOf:
             - description: U2P_INT_DMA[0]
             - description: U2P_INT_DMA[1]
             - description: U2P_INT_DMAERR
+      required:
+        - resets
     else:
       properties:
         interrupts:
index 9c92def..caa572d 100644 (file)
@@ -15,9 +15,6 @@ properties:
       - samsung,exynos4210-ehci
       - samsung,exynos4210-ohci
 
-  '#address-cells':
-    const: 1
-
   clocks:
     maxItems: 1
 
@@ -46,15 +43,6 @@ properties:
       Only for controller in EHCI mode, if present, specifies the GPIO that
       needs to be pulled up for the bus to be powered.
 
-  '#size-cells':
-    const: 0
-
-patternProperties:
-  "^.*@[0-9a-f]{1,2}$":
-    description: The hard wired USB devices
-    type: object
-    $ref: /usb/usb-device.yaml
-
 required:
   - compatible
   - clocks
@@ -65,6 +53,7 @@ required:
   - reg
 
 allOf:
+  - $ref: usb-hcd.yaml#
   - if:
       properties:
         compatible:
@@ -74,7 +63,7 @@ allOf:
       properties:
         samsung,vbus-gpio: false
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index f4471f8..d41265b 100644 (file)
@@ -68,6 +68,8 @@ properties:
         - enum: [bus_early, ref, suspend]
         - true
 
+  dma-coherent: true
+
   iommus:
     maxItems: 1
 
diff --git a/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml b/Documentation/devicetree/bindings/usb/ti,am62-usb.yaml
new file mode 100644 (file)
index 0000000..d25fc70
--- /dev/null
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/ti,am62-usb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI's AM62 wrapper module for the Synopsys USBSS-DRD controller
+
+maintainers:
+  - Aswath Govindraju <a-govindraju@ti.com>
+
+properties:
+  compatible:
+    const: ti,am62-usb
+
+  reg:
+    maxItems: 1
+
+  ranges: true
+
+  power-domains:
+    description:
+      PM domain provider node and an args specifier containing
+      the USB ISO device id value. See,
+      Documentation/devicetree/bindings/soc/ti/sci-pm-domain.yaml
+    maxItems: 1
+
+  clocks:
+    description: Clock phandle to usb2_refclk
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: ref
+
+  ti,vbus-divider:
+    description:
+      Should be present if USB VBUS line is connected to the
+      VBUS pin of the SoC via a 1/3 voltage divider.
+    type: boolean
+
+  ti,syscon-phy-pll-refclk:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    items:
+      - items:
+          - description: Phandle to the SYSCON entry
+          - description: USB phy control register offset within SYSCON
+    description:
+      Specifier for conveying frequency of ref clock input, for the
+      operation of USB2PHY.
+
+  '#address-cells':
+    const: 2
+
+  '#size-cells':
+    const: 2
+
+patternProperties:
+  "^usb@[0-9a-f]+$":
+    $ref: snps,dwc3.yaml#
+    description: Required child node
+
+required:
+  - compatible
+  - reg
+  - power-domains
+  - clocks
+  - clock-names
+  - ti,syscon-phy-pll-refclk
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/gpio/gpio.h>
+
+    bus {
+      #address-cells = <2>;
+      #size-cells = <2>;
+
+      usbss1: usb@f910000 {
+        compatible = "ti,am62-usb";
+        reg = <0x00 0x0f910000 0x00 0x800>;
+        clocks = <&k3_clks 162 3>;
+        clock-names = "ref";
+        ti,syscon-phy-pll-refclk = <&wkup_conf 0x4018>;
+        power-domains = <&k3_pds 179 TI_SCI_PD_EXCLUSIVE>;
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        usb@31100000 {
+          compatible = "snps,dwc3";
+          reg =<0x00 0x31100000 0x00 0x50000>;
+          interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+                       <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; /* irq.0 */
+          interrupt-names = "host", "peripheral";
+          maximum-speed = "high-speed";
+          dr_mode = "otg";
+        };
+      };
+    };
index 495a01c..6bb20b4 100644 (file)
@@ -1207,6 +1207,8 @@ patternProperties:
     description: Summit microelectronics
   "^sunchip,.*":
     description: Shenzhen Sunchip Technology Co., Ltd
+  "^sundance,.*":
+    description: Sundance DSP Inc.
   "^sunplus,.*":
     description: Sunplus Technology Co., Ltd.
   "^SUNW,.*":
index 950e4fb..354314d 100644 (file)
@@ -10,6 +10,12 @@ Optional properties:
 - dlg,use-sw-pm: Add this property to disable the watchdog during suspend.
        Only use this option if you can't use the watchdog automatic suspend
        function during a suspend (see register CONTROL_B).
+- dlg,wdt-sd: Set what happens on watchdog timeout. If this bit is set the
+       watchdog timeout triggers SHUTDOWN, if cleared the watchdog triggers
+       POWERDOWN. Can be 0 or 1. Only use this option if you want to change the
+       default chip's OTP setting for WATCHDOG_SD bit. If this property is NOT
+       set the WATCHDOG_SD bit and on timeout watchdog behavior will match the
+       chip's OTP settings.
 
 Example: DA9062
 
diff --git a/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.txt b/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.txt
deleted file mode 100644 (file)
index 9ecdb50..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Faraday Technology FTWDT010 watchdog
-
-This is an IP part from Faraday Technology found in the Gemini
-SoCs and others.
-
-Required properties:
-- compatible : must be one of
-  "faraday,ftwdt010"
-  "cortina,gemini-watchdog", "faraday,ftwdt010"
-- reg : shall contain base register location and length
-- interrupts : shall contain the interrupt for the watchdog
-
-Optional properties:
-- timeout-sec : the default watchdog timeout in seconds.
-
-Example:
-
-watchdog@41000000 {
-       compatible = "faraday,ftwdt010";
-       reg = <0x41000000 0x1000>;
-       interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml b/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml
new file mode 100644 (file)
index 0000000..ca9e1be
--- /dev/null
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/faraday,ftwdt010.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Faraday Technology FTWDT010 watchdog
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+  - Corentin Labbe <clabbe@baylibre.com>
+
+description: |
+  This is an IP part from Faraday Technology found in the Gemini
+  SoCs and others.
+
+allOf:
+  - $ref: "watchdog.yaml#"
+
+properties:
+  compatible:
+    oneOf:
+      - const: faraday,ftwdt010
+      - items:
+          - enum:
+              - cortina,gemini-watchdog
+              - moxa,moxart-watchdog
+          - const: faraday,ftwdt010
+
+  reg:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: PCLK
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    watchdog@41000000 {
+      compatible = "faraday,ftwdt010";
+      reg = <0x41000000 0x1000>;
+      interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+      timeout-secs = <5>;
+    };
+  - |
+    watchdog: watchdog@98500000 {
+      compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
+      reg = <0x98500000 0x10>;
+      clocks = <&clk_apb>;
+      clock-names = "PCLK";
+    };
+...
index 4ca8a31..8562978 100644 (file)
@@ -19,6 +19,7 @@ properties:
       - items:
           - const: fsl,imx8ulp-wdt
           - const: fsl,imx7ulp-wdt
+      - const: fsl,imx93-wdt
 
   reg:
     maxItems: 1
index a97418c..762c62e 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
        "mediatek,mt7629-wdt", "mediatek,mt6589-wdt": for MT7629
        "mediatek,mt7986-wdt", "mediatek,mt6589-wdt": for MT7986
        "mediatek,mt8183-wdt": for MT8183
+       "mediatek,mt8186-wdt", "mediatek,mt6589-wdt": for MT8186
        "mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516
        "mediatek,mt8192-wdt": for MT8192
        "mediatek,mt8195-wdt", "mediatek,mt6589-wdt": for MT8195
index 16c6f82..2bd6b4a 100644 (file)
@@ -14,22 +14,29 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - qcom,apss-wdt-qcs404
-      - qcom,apss-wdt-sc7180
-      - qcom,apss-wdt-sc7280
-      - qcom,apss-wdt-sdm845
-      - qcom,apss-wdt-sdx55
-      - qcom,apss-wdt-sm6350
-      - qcom,apss-wdt-sm8150
-      - qcom,apss-wdt-sm8250
-      - qcom,kpss-timer
-      - qcom,kpss-wdt
-      - qcom,kpss-wdt-apq8064
-      - qcom,kpss-wdt-ipq4019
-      - qcom,kpss-wdt-ipq8064
-      - qcom,kpss-wdt-msm8960
-      - qcom,scss-timer
+    oneOf:
+      - items:
+          - enum:
+              - qcom,apss-wdt-qcs404
+              - qcom,apss-wdt-sc7180
+              - qcom,apss-wdt-sc7280
+              - qcom,apss-wdt-sc8180x
+              - qcom,apss-wdt-sc8280xp
+              - qcom,apss-wdt-sdm845
+              - qcom,apss-wdt-sdx55
+              - qcom,apss-wdt-sm6350
+              - qcom,apss-wdt-sm8150
+              - qcom,apss-wdt-sm8250
+          - const: qcom,kpss-wdt
+      - items:
+          - enum:
+              - qcom,kpss-wdt
+              - qcom,kpss-timer
+              - qcom,kpss-wdt-apq8064
+              - qcom,kpss-wdt-ipq4019
+              - qcom,kpss-wdt-ipq8064
+              - qcom,kpss-wdt-msm8960
+              - qcom,scss-timer
 
   reg:
     maxItems: 1
index d060438..a8d7dde 100644 (file)
@@ -21,8 +21,15 @@ properties:
 
       - items:
           - enum:
+              - renesas,r9a06g032-wdt    # RZ/N1D
+          - const: renesas,rzn1-wdt      # RZ/N1
+
+      - items:
+          - enum:
+              - renesas,r9a07g043-wdt    # RZ/G2UL
               - renesas,r9a07g044-wdt    # RZ/G2{L,LC}
-          - const: renesas,rzg2l-wdt     # RZ/G2L
+              - renesas,r9a07g054-wdt    # RZ/V2L
+          - const: renesas,rzg2l-wdt
 
       - items:
           - enum:
@@ -52,11 +59,11 @@ properties:
               - renesas,r8a77980-wdt     # R-Car V3H
               - renesas,r8a77990-wdt     # R-Car E3
               - renesas,r8a77995-wdt     # R-Car D3
-              - renesas,r8a779a0-wdt     # R-Car V3U
           - const: renesas,rcar-gen3-wdt # R-Car Gen3 and RZ/G2
 
       - items:
           - enum:
+              - renesas,r8a779a0-wdt     # R-Car V3U
               - renesas,r8a779f0-wdt     # R-Car S4-8
           - const: renesas,rcar-gen4-wdt # R-Car Gen4
 
@@ -94,6 +101,7 @@ allOf:
             contains:
               enum:
                 - renesas,rza-wdt
+                - renesas,rzn1-wdt
     then:
       required:
         - power-domains
diff --git a/Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml b/Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml
new file mode 100644 (file)
index 0000000..d902710
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) Sunplus Co., Ltd. 2021
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/sunplus,sp7021-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sunplus SoCs Watchdog
+
+maintainers:
+  - XianTao Hu <xt.hu@cqplus1.com>
+
+allOf:
+  - $ref: watchdog.yaml#
+
+properties:
+  compatible:
+    const: sunplus,sp7021-wdt
+
+  reg:
+    items:
+      - description: watchdog registers regions
+      - description: miscellaneous control registers regions
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - resets
+
+additionalProperties: false
+
+examples:
+  - |
+    watchdog: watchdog@9c000630 {
+        compatible = "sunplus,sp7021-wdt";
+        reg = <0x9c000630 0x08>, <0x9c000274 0x04>;
+        clocks = <&clkc 0x24>;
+        resets = <&rstc 0x14>;
+    };
+...
diff --git a/Documentation/driver-api/firmware/fw_upload.rst b/Documentation/driver-api/firmware/fw_upload.rst
new file mode 100644 (file)
index 0000000..7692259
--- /dev/null
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+Firmware Upload API
+===================
+
+A device driver that registers with the firmware loader will expose
+persistent sysfs nodes to enable users to initiate firmware updates for
+that device.  It is the responsibility of the device driver and/or the
+device itself to perform any validation on the data received. Firmware
+upload uses the same *loading* and *data* sysfs files described in the
+documentation for firmware fallback. It also adds additional sysfs files
+to provide status on the transfer of the firmware image to the device.
+
+Register for firmware upload
+============================
+
+A device driver registers for firmware upload by calling
+firmware_upload_register(). Among the parameter list is a name to
+identify the device under /sys/class/firmware. A user may initiate a
+firmware upload by echoing a 1 to the *loading* sysfs file for the target
+device. Next, the user writes the firmware image to the *data* sysfs
+file. After writing the firmware data, the user echos 0 to the *loading*
+sysfs file to signal completion. Echoing 0 to *loading* also triggers the
+transfer of the firmware to the lower-lever device driver in the context
+of a kernel worker thread.
+
+To use the firmware upload API, write a driver that implements a set of
+ops.  The probe function calls firmware_upload_register() and the remove
+function calls firmware_upload_unregister() such as::
+
+       static const struct fw_upload_ops m10bmc_ops = {
+               .prepare = m10bmc_sec_prepare,
+               .write = m10bmc_sec_write,
+               .poll_complete = m10bmc_sec_poll_complete,
+               .cancel = m10bmc_sec_cancel,
+               .cleanup = m10bmc_sec_cleanup,
+       };
+
+       static int m10bmc_sec_probe(struct platform_device *pdev)
+       {
+               const char *fw_name, *truncate;
+               struct m10bmc_sec *sec;
+               struct fw_upload *fwl;
+               unsigned int len;
+
+               sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+               if (!sec)
+                       return -ENOMEM;
+
+               sec->dev = &pdev->dev;
+               sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
+               dev_set_drvdata(&pdev->dev, sec);
+
+               fw_name = dev_name(sec->dev);
+               truncate = strstr(fw_name, ".auto");
+               len = (truncate) ? truncate - fw_name : strlen(fw_name);
+               sec->fw_name = kmemdup_nul(fw_name, len, GFP_KERNEL);
+
+               fwl = firmware_upload_register(sec->dev, sec->fw_name, &m10bmc_ops, sec);
+               if (IS_ERR(fwl)) {
+                       dev_err(sec->dev, "Firmware Upload driver failed to start\n");
+                       kfree(sec->fw_name);
+                       return PTR_ERR(fwl);
+               }
+
+               sec->fwl = fwl;
+               return 0;
+       }
+
+       static int m10bmc_sec_remove(struct platform_device *pdev)
+       {
+               struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
+
+               firmware_upload_unregister(sec->fwl);
+               kfree(sec->fw_name);
+               return 0;
+       }
+
+firmware_upload_register
+------------------------
+.. kernel-doc:: drivers/base/firmware_loader/sysfs_upload.c
+   :identifiers: firmware_upload_register
+
+firmware_upload_unregister
+--------------------------
+.. kernel-doc:: drivers/base/firmware_loader/sysfs_upload.c
+   :identifiers: firmware_upload_unregister
+
+Firmware Upload Ops
+-------------------
+.. kernel-doc:: include/linux/firmware.h
+   :identifiers: fw_upload_ops
+
+Firmware Upload Progress Codes
+------------------------------
+The following progress codes are used internally by the firmware loader.
+Corresponding strings are reported through the status sysfs node that
+is described below and are documented in the ABI documentation.
+
+.. kernel-doc:: drivers/base/firmware_loader/sysfs_upload.h
+   :identifiers: fw_upload_prog
+
+Firmware Upload Error Codes
+---------------------------
+The following error codes may be returned by the driver ops in case of
+failure:
+
+.. kernel-doc:: include/linux/firmware.h
+   :identifiers: fw_upload_err
+
+Sysfs Attributes
+================
+
+In addition to the *loading* and *data* sysfs files, there are additional
+sysfs files to monitor the status of the data transfer to the target
+device and to determine the final pass/fail status of the transfer.
+Depending on the device and the size of the firmware image, a firmware
+update could take milliseconds or minutes.
+
+The additional sysfs files are:
+
+* status - provides an indication of the progress of a firmware update
+* error - provides error information for a failed firmware update
+* remaining_size - tracks the data transfer portion of an update
+* cancel - echo 1 to this file to cancel the update
index 57415d6..9d2c19d 100644 (file)
@@ -8,6 +8,7 @@ Linux Firmware API
    core
    efi/index
    request_firmware
+   fw_upload
    other_interfaces
 
 .. only::  subproject and html
index a7b0223..d76a60d 100644 (file)
@@ -101,6 +101,7 @@ available subsections can be seen below.
    surface_aggregator/index
    switchtec
    sync_file
+   tty/index
    vfio-mediated-device
    vfio
    vfio-pci-device-specific-driver-acceptance
index ccb06e4..fd26c3d 100644 (file)
@@ -49,6 +49,12 @@ After being requested, a PWM has to be configured using::
 
 This API controls both the PWM period/duty_cycle config and the
 enable/disable state.
+
+As a consumer, don't rely on the output's state for a disabled PWM. If it's
+easily possible, drivers are supposed to emit the inactive state, but some
+drivers cannot. If you rely on getting the inactive state, use .duty_cycle=0,
+.enabled=true.
+
 There is also a usage_power setting: If set, the PWM driver is only required to
 maintain the power output but has more freedom regarding signal form.
 If supported by the driver, the signal can be optimized, for example to improve
index 06ec04b..7ef83fd 100644 (file)
@@ -311,7 +311,7 @@ hardware.
        This call must not sleep
 
   set_ldisc(port,termios)
-       Notifier for discipline change. See Documentation/tty/tty_ldisc.rst.
+       Notifier for discipline change. See ../tty/tty_ldisc.rst.
 
        Locking: caller holds tty_port->mutex
 
index 7eb21a6..03a55b9 100644 (file)
@@ -16,8 +16,6 @@ Serial drivers
 .. toctree::
     :maxdepth: 1
 
-    moxa-smartio
-    n_gsm
     serial-iso7816
     serial-rs485
 
diff --git a/Documentation/driver-api/serial/n_gsm.rst b/Documentation/driver-api/serial/n_gsm.rst
deleted file mode 100644 (file)
index 4995650..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-==============================
-GSM 0710 tty multiplexor HOWTO
-==============================
-
-This line discipline implements the GSM 07.10 multiplexing protocol
-detailed in the following 3GPP document:
-
-       https://www.3gpp.org/ftp/Specs/archive/07_series/07.10/0710-720.zip
-
-This document give some hints on how to use this driver with GPRS and 3G
-modems connected to a physical serial port.
-
-How to use it
--------------
-1. config initiator
-^^^^^^^^^^^^^^^^^^^^^
-
-1.1 initialize the modem in 0710 mux mode (usually AT+CMUX= command) through
-    its serial port. Depending on the modem used, you can pass more or less
-    parameters to this command.
-
-1.2 switch the serial line to using the n_gsm line discipline by using
-    TIOCSETD ioctl.
-
-1.3 configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl.
-
-1.4 obtain base gsmtty number for the used serial port.
-
-Major parts of the initialization program :
-(a good starting point is util-linux-ng/sys-utils/ldattach.c)::
-
-  #include <stdio.h>
-  #include <stdint.h>
-  #include <linux/gsmmux.h>
-  #include <linux/tty.h>
-  #define DEFAULT_SPEED        B115200
-  #define SERIAL_PORT  /dev/ttyS0
-
-       int ldisc = N_GSM0710;
-       struct gsm_config c;
-       struct termios configuration;
-       uint32_t first;
-
-       /* open the serial port connected to the modem */
-       fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
-
-       /* configure the serial port : speed, flow control ... */
-
-       /* send the AT commands to switch the modem to CMUX mode
-          and check that it's successful (should return OK) */
-       write(fd, "AT+CMUX=0\r", 10);
-
-       /* experience showed that some modems need some time before
-          being able to answer to the first MUX packet so a delay
-          may be needed here in some case */
-       sleep(3);
-
-       /* use n_gsm line discipline */
-       ioctl(fd, TIOCSETD, &ldisc);
-
-       /* get n_gsm configuration */
-       ioctl(fd, GSMIOC_GETCONF, &c);
-       /* we are initiator and need encoding 0 (basic) */
-       c.initiator = 1;
-       c.encapsulation = 0;
-       /* our modem defaults to a maximum size of 127 bytes */
-       c.mru = 127;
-       c.mtu = 127;
-       /* set the new configuration */
-       ioctl(fd, GSMIOC_SETCONF, &c);
-       /* get first gsmtty device node */
-       ioctl(fd, GSMIOC_GETFIRST, &first);
-       printf("first muxed line: /dev/gsmtty%i\n", first);
-
-       /* and wait for ever to keep the line discipline enabled */
-       daemon(0,0);
-       pause();
-
-1.5 use these devices as plain serial ports.
-
-   for example, it's possible:
-
-   - and to use gnokii to send / receive SMS on ttygsm1
-   - to use ppp to establish a datalink on ttygsm2
-
-1.6 first close all virtual ports before closing the physical port.
-
-   Note that after closing the physical port the modem is still in multiplexing
-   mode. This may prevent a successful re-opening of the port later. To avoid
-   this situation either reset the modem if your hardware allows that or send
-   a disconnect command frame manually before initializing the multiplexing mode
-   for the second time. The byte sequence for the disconnect command frame is::
-
-      0xf9, 0x03, 0xef, 0x03, 0xc3, 0x16, 0xf9.
-
-2. config requester
-^^^^^^^^^^^^^^^^^^^^^
-
-2.1 receive string "AT+CMUX= command" through its serial port,initialize
-    mux mode config
-
-2.2 switch the serial line to using the n_gsm line discipline by using
-    TIOCSETD ioctl.
-
-2.3 configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl.
-
-2.4 obtain base gsmtty number for the used serial port::
-
-  #include <stdio.h>
-  #include <stdint.h>
-  #include <linux/gsmmux.h>
-  #include <linux/tty.h>
-  #define DEFAULT_SPEED        B115200
-  #define SERIAL_PORT  /dev/ttyS0
-
-       int ldisc = N_GSM0710;
-       struct gsm_config c;
-       struct termios configuration;
-       uint32_t first;
-
-       /* open the serial port */
-       fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
-
-       /* configure the serial port : speed, flow control ... */
-
-       /* get serial data and check "AT+CMUX=command" parameter ... */
-
-       /* use n_gsm line discipline */
-       ioctl(fd, TIOCSETD, &ldisc);
-
-       /* get n_gsm configuration */
-       ioctl(fd, GSMIOC_GETCONF, &c);
-       /* we are requester and need encoding 0 (basic) */
-       c.initiator = 0;
-       c.encapsulation = 0;
-       /* our modem defaults to a maximum size of 127 bytes */
-       c.mru = 127;
-       c.mtu = 127;
-       /* set the new configuration */
-       ioctl(fd, GSMIOC_SETCONF, &c);
-       /* get first gsmtty device node */
-       ioctl(fd, GSMIOC_GETFIRST, &first);
-       printf("first muxed line: /dev/gsmtty%i\n", first);
-
-       /* and wait for ever to keep the line discipline enabled */
-       daemon(0,0);
-       pause();
-
-Additional Documentation
-------------------------
-More practical details on the protocol and how it's supported by industrial
-modems can be found in the following documents :
-
-- http://www.telit.com/module/infopool/download.php?id=616
-- http://www.u-blox.com/images/downloads/Product_Docs/LEON-G100-G200-MuxImplementation_ApplicationNote_%28GSM%20G1-CS-10002%29.pdf
-- http://www.sierrawireless.com/Support/Downloads/AirPrime/WMP_Series/~/media/Support_Downloads/AirPrime/Application_notes/CMUX_Feature_Application_Note-Rev004.ashx
-- http://wm.sim.com/sim/News/photo/2010721161442.pdf
-
-11-03-08 - Eric Bénard - <eric@eukrea.com>
similarity index 81%
rename from Documentation/tty/index.rst
rename to Documentation/driver-api/tty/index.rst
index 21ea0cb..2d32606 100644 (file)
@@ -36,18 +36,16 @@ In-detail description of the named TTY structures is in separate documents:
    tty_struct
    tty_ldisc
    tty_buffer
-   n_tty
    tty_internals
 
 Writing TTY Driver
 ==================
 
 Before one starts writing a TTY driver, they must consider
-:doc:`Serial <../driver-api/serial/driver>` and :doc:`USB Serial
-<../usb/usb-serial>` layers
-first. Drivers for serial devices can often use one of these specific layers to
-implement a serial driver. Only special devices should be handled directly by
-the TTY Layer. If you are about to write such a driver, read on.
+:doc:`Serial <../serial/driver>` and :doc:`USB Serial <../../usb/usb-serial>`
+layers first. Drivers for serial devices can often use one of these specific
+layers to implement a serial driver. Only special devices should be handled
+directly by the TTY Layer. If you are about to write such a driver, read on.
 
 A *typical* sequence a TTY driver performs is as follows:
 
@@ -61,3 +59,15 @@ A *typical* sequence a TTY driver performs is as follows:
 Steps regarding driver, i.e. 1., 3., and 5. are described in detail in
 :doc:`tty_driver`. For the other two (devices handling), look into
 :doc:`tty_port`.
+
+Other Documentation
+===================
+
+Miscellaneous documentation can be further found in these documents:
+
+.. toctree::
+   :maxdepth: 2
+
+   moxa-smartio
+   n_gsm
+   n_tty
diff --git a/Documentation/driver-api/tty/n_gsm.rst b/Documentation/driver-api/tty/n_gsm.rst
new file mode 100644 (file)
index 0000000..35d7381
--- /dev/null
@@ -0,0 +1,153 @@
+==============================
+GSM 0710 tty multiplexor HOWTO
+==============================
+
+.. contents:: :local:
+
+This line discipline implements the GSM 07.10 multiplexing protocol
+detailed in the following 3GPP document:
+
+       https://www.3gpp.org/ftp/Specs/archive/07_series/07.10/0710-720.zip
+
+This document give some hints on how to use this driver with GPRS and 3G
+modems connected to a physical serial port.
+
+How to use it
+=============
+
+Config Initiator
+----------------
+
+#. Initialize the modem in 0710 mux mode (usually ``AT+CMUX=`` command) through
+   its serial port. Depending on the modem used, you can pass more or less
+   parameters to this command.
+
+#. Switch the serial line to using the n_gsm line discipline by using
+   ``TIOCSETD`` ioctl.
+
+#. Configure the mux using ``GSMIOC_GETCONF``/``GSMIOC_SETCONF`` ioctl.
+
+#. Obtain base gsmtty number for the used serial port.
+
+   Major parts of the initialization program
+   (a good starting point is util-linux-ng/sys-utils/ldattach.c)::
+
+      #include <stdio.h>
+      #include <stdint.h>
+      #include <linux/gsmmux.h>
+      #include <linux/tty.h>
+
+      #define DEFAULT_SPEED    B115200
+      #define SERIAL_PORT      /dev/ttyS0
+
+      int ldisc = N_GSM0710;
+      struct gsm_config c;
+      struct termios configuration;
+      uint32_t first;
+
+      /* open the serial port connected to the modem */
+      fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
+
+      /* configure the serial port : speed, flow control ... */
+
+      /* send the AT commands to switch the modem to CMUX mode
+         and check that it's successful (should return OK) */
+      write(fd, "AT+CMUX=0\r", 10);
+
+      /* experience showed that some modems need some time before
+         being able to answer to the first MUX packet so a delay
+         may be needed here in some case */
+      sleep(3);
+
+      /* use n_gsm line discipline */
+      ioctl(fd, TIOCSETD, &ldisc);
+
+      /* get n_gsm configuration */
+      ioctl(fd, GSMIOC_GETCONF, &c);
+      /* we are initiator and need encoding 0 (basic) */
+      c.initiator = 1;
+      c.encapsulation = 0;
+      /* our modem defaults to a maximum size of 127 bytes */
+      c.mru = 127;
+      c.mtu = 127;
+      /* set the new configuration */
+      ioctl(fd, GSMIOC_SETCONF, &c);
+      /* get first gsmtty device node */
+      ioctl(fd, GSMIOC_GETFIRST, &first);
+      printf("first muxed line: /dev/gsmtty%i\n", first);
+
+      /* and wait for ever to keep the line discipline enabled */
+      daemon(0,0);
+      pause();
+
+#. Use these devices as plain serial ports.
+
+   For example, it's possible:
+
+   - to use *gnokii* to send / receive SMS on ``ttygsm1``
+   - to use *ppp* to establish a datalink on ``ttygsm2``
+
+#. First close all virtual ports before closing the physical port.
+
+   Note that after closing the physical port the modem is still in multiplexing
+   mode. This may prevent a successful re-opening of the port later. To avoid
+   this situation either reset the modem if your hardware allows that or send
+   a disconnect command frame manually before initializing the multiplexing mode
+   for the second time. The byte sequence for the disconnect command frame is::
+
+      0xf9, 0x03, 0xef, 0x03, 0xc3, 0x16, 0xf9
+
+Config Requester
+----------------
+
+#. Receive ``AT+CMUX=`` command through its serial port, initialize mux mode
+   config.
+
+#. Switch the serial line to using the *n_gsm* line discipline by using
+   ``TIOCSETD`` ioctl.
+
+#. Configure the mux using ``GSMIOC_GETCONF``/``GSMIOC_SETCONF`` ioctl.
+
+#. Obtain base gsmtty number for the used serial port::
+
+        #include <stdio.h>
+        #include <stdint.h>
+        #include <linux/gsmmux.h>
+        #include <linux/tty.h>
+        #define DEFAULT_SPEED  B115200
+        #define SERIAL_PORT    /dev/ttyS0
+
+       int ldisc = N_GSM0710;
+       struct gsm_config c;
+       struct termios configuration;
+       uint32_t first;
+
+       /* open the serial port */
+       fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
+
+       /* configure the serial port : speed, flow control ... */
+
+       /* get serial data and check "AT+CMUX=command" parameter ... */
+
+       /* use n_gsm line discipline */
+       ioctl(fd, TIOCSETD, &ldisc);
+
+       /* get n_gsm configuration */
+       ioctl(fd, GSMIOC_GETCONF, &c);
+       /* we are requester and need encoding 0 (basic) */
+       c.initiator = 0;
+       c.encapsulation = 0;
+       /* our modem defaults to a maximum size of 127 bytes */
+       c.mru = 127;
+       c.mtu = 127;
+       /* set the new configuration */
+       ioctl(fd, GSMIOC_SETCONF, &c);
+       /* get first gsmtty device node */
+       ioctl(fd, GSMIOC_GETFIRST, &first);
+       printf("first muxed line: /dev/gsmtty%i\n", first);
+
+       /* and wait for ever to keep the line discipline enabled */
+       daemon(0,0);
+       pause();
+
+11-03-08 - Eric Bénard - <eric@eukrea.com>
index 784bbeb..eded871 100644 (file)
@@ -262,10 +262,10 @@ Translation APIs for Mediated Devices
 The following APIs are provided for translating user pfn to host pfn in a VFIO
 driver::
 
-       extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+       int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
                                  int npage, int prot, unsigned long *phys_pfn);
 
-       extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+       int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
                                    int npage);
 
 These functions call back into the back-end IOMMU module by using the pin_pages
index bef6d30..05e03d5 100644 (file)
@@ -1,63 +1,82 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 ======================================
-Enhanced Read-Only File System - EROFS
+EROFS - Enhanced Read-Only File System
 ======================================
 
 Overview
 ========
 
-EROFS file-system stands for Enhanced Read-Only File System. Different
-from other read-only file systems, it aims to be designed for flexibility,
-scalability, but be kept simple and high performance.
+EROFS filesystem stands for Enhanced Read-Only File System.  It aims to form a
+generic read-only filesystem solution for various read-only use cases instead
+of just focusing on storage space saving without considering any side effects
+of runtime performance.
 
-It is designed as a better filesystem solution for the following scenarios:
+It is designed to meet the needs of flexibility, feature extendability and user
+payload friendly, etc.  Apart from those, it is still kept as a simple
+random-access friendly high-performance filesystem to get rid of unneeded I/O
+amplification and memory-resident overhead compared to similar approaches.
+
+It is implemented to be a better choice for the following scenarios:
 
  - read-only storage media or
 
  - part of a fully trusted read-only solution, which means it needs to be
    immutable and bit-for-bit identical to the official golden image for
-   their releases due to security and other considerations and
+   their releases due to security or other considerations and
 
  - hope to minimize extra storage space with guaranteed end-to-end performance
    by using compact layout, transparent file compression and direct access,
    especially for those embedded devices with limited memory and high-density
-   hosts with numerous containers;
+   hosts with numerous containers.
 
 Here is the main features of EROFS:
 
  - Little endian on-disk design;
 
- - Currently 4KB block size (nobh) and therefore maximum 16TB address space;
-
- - Metadata & data could be mixed by design;
+ - 4KiB block size and 32-bit block addresses, therefore 16TiB address space
+   at most for now;
 
- - 2 inode versions for different requirements:
+ - Two inode layouts for different requirements:
 
-   =====================  ============  =====================================
+   =====================  ============  ======================================
                           compact (v1)  extended (v2)
-   =====================  ============  =====================================
+   =====================  ============  ======================================
    Inode metadata size    32 bytes      64 bytes
-   Max file size          4 GB          16 EB (also limited by max. vol size)
+   Max file size          4 GiB         16 EiB (also limited by max. vol size)
    Max uids/gids          65536         4294967296
    Per-inode timestamp    no            yes (64 + 32-bit timestamp)
    Max hardlinks          65536         4294967296
-   Metadata reserved      4 bytes       14 bytes
-   =====================  ============  =====================================
+   Metadata reserved      8 bytes       18 bytes
+   =====================  ============  ======================================
+
+ - Metadata and data could be mixed as an option;
 
  - Support extended attributes (xattrs) as an option;
 
- - Support xattr inline and tail-end data inline for all files;
+ - Support tailpacking data and xattr inline compared to byte-addressed
+   unaligned metadata or smaller block size alternatives;
 
  - Support POSIX.1e ACLs by using xattrs;
 
  - Support transparent data compression as an option:
-   LZ4 algorithm with the fixed-sized output compression for high performance;
+   LZ4 and MicroLZMA algorithms can be used on a per-file basis; In addition,
+   inplace decompression is also supported to avoid bounce compressed buffers
+   and page cache thrashing.
+
+ - Support direct I/O on uncompressed files to avoid double caching for loop
+   devices;
 
- - Multiple device support for multi-layer container images.
+ - Support FSDAX on uncompressed images for secure containers and ramdisks in
+   order to get rid of unnecessary page cache.
+
+ - Support multiple devices for multi blob container images;
+
+ - Support file-based on-demand loading with the Fscache infrastructure.
 
 The following git tree provides the file system user-space tools under
-development (ex, formatting tool mkfs.erofs):
+development, such as a formatting tool (mkfs.erofs), an on-disk consistency &
+compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs):
 
 - git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
 
@@ -91,6 +110,7 @@ dax={always,never}     Use direct access (no page cache).  See
                        Documentation/filesystems/dax.rst.
 dax                    A legacy option which is an alias for ``dax=always``.
 device=%s              Specify a path to an extra device to be used together.
+fsid=%s                Specify a filesystem image ID for Fscache back-end.
 ===================    =========================================================
 
 Sysfs Entries
@@ -226,8 +246,8 @@ Note that apart from the offset of the first filename, nameoff0 also indicates
 the total number of directory entries in this block since it is no need to
 introduce another on-disk field at all.
 
-Chunk-based file
-----------------
+Chunk-based files
+-----------------
 In order to support chunk-based data deduplication, a new inode data layout has
 been supported since Linux v5.15: Files are split in equal-sized data chunks
 with ``extents`` area of the inode metadata indicating how to get the chunk
diff --git a/Documentation/filesystems/nfs/client-identifier.rst b/Documentation/filesystems/nfs/client-identifier.rst
new file mode 100644 (file)
index 0000000..5147e15
--- /dev/null
@@ -0,0 +1,216 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+NFSv4 client identifier
+=======================
+
+This document explains how the NFSv4 protocol identifies client
+instances in order to maintain file open and lock state during
+system restarts. A special identifier and principal are maintained
+on each client. These can be set by administrators, scripts
+provided by site administrators, or tools provided by Linux
+distributors.
+
+There are risks if a client's NFSv4 identifier and its principal
+are not chosen carefully.
+
+
+Introduction
+------------
+
+The NFSv4 protocol uses "lease-based file locking". Leases help
+NFSv4 servers provide file lock guarantees and manage their
+resources.
+
+Simply put, an NFSv4 server creates a lease for each NFSv4 client.
+The server collects each client's file open and lock state under
+the lease for that client.
+
+The client is responsible for periodically renewing its leases.
+While a lease remains valid, the server holding that lease
+guarantees the file locks the client has created remain in place.
+
+If a client stops renewing its lease (for example, if it crashes),
+the NFSv4 protocol allows the server to remove the client's open
+and lock state after a certain period of time. When a client
+restarts, it indicates to servers that open and lock state
+associated with its previous leases is no longer valid and can be
+destroyed immediately.
+
+In addition, each NFSv4 server manages a persistent list of client
+leases. When the server restarts and clients attempt to recover
+their state, the server uses this list to distinguish amongst
+clients that held state before the server restarted and clients
+sending fresh OPEN and LOCK requests. This enables file locks to
+persist safely across server restarts.
+
+NFSv4 client identifiers
+------------------------
+
+Each NFSv4 client presents an identifier to NFSv4 servers so that
+they can associate the client with its lease. Each client's
+identifier consists of two elements:
+
+  - co_ownerid: An arbitrary but fixed string.
+
+  - boot verifier: A 64-bit incarnation verifier that enables a
+    server to distinguish successive boot epochs of the same client.
+
+The NFSv4.0 specification refers to these two items as an
+"nfs_client_id4". The NFSv4.1 specification refers to these two
+items as a "client_owner4".
+
+NFSv4 servers tie this identifier to the principal and security
+flavor that the client used when presenting it. Servers use this
+principal to authorize subsequent lease modification operations
+sent by the client. Effectively this principal is a third element of
+the identifier.
+
+As part of the identity presented to servers, a good
+"co_ownerid" string has several important properties:
+
+  - The "co_ownerid" string identifies the client during reboot
+    recovery, therefore the string is persistent across client
+    reboots.
+  - The "co_ownerid" string helps servers distinguish the client
+    from others, therefore the string is globally unique. Note
+    that there is no central authority that assigns "co_ownerid"
+    strings.
+  - Because it often appears on the network in the clear, the
+    "co_ownerid" string does not reveal private information about
+    the client itself.
+  - The content of the "co_ownerid" string is set and unchanging
+    before the client attempts NFSv4 mounts after a restart.
+  - The NFSv4 protocol places a 1024-byte limit on the size of the
+    "co_ownerid" string.
+
+Protecting NFSv4 lease state
+----------------------------
+
+NFSv4 servers utilize the "client_owner4" as described above to
+assign a unique lease to each client. Under this scheme, there are
+circumstances where clients can interfere with each other. This is
+referred to as "lease stealing".
+
+If distinct clients present the same "co_ownerid" string and use
+the same principal (for example, AUTH_SYS and UID 0), a server is
+unable to tell that the clients are not the same. Each distinct
+client presents a different boot verifier, so it appears to the
+server as if there is one client that is rebooting frequently.
+Neither client can maintain open or lock state in this scenario.
+
+If distinct clients present the same "co_ownerid" string and use
+distinct principals, the server is likely to allow the first client
+to operate normally but reject subsequent clients with the same
+"co_ownerid" string.
+
+If a client's "co_ownerid" string or principal are not stable,
+state recovery after a server or client reboot is not guaranteed.
+If a client unexpectedly restarts but presents a different
+"co_ownerid" string or principal to the server, the server orphans
+the client's previous open and lock state. This blocks access to
+locked files until the server removes the orphaned state.
+
+If the server restarts and a client presents a changed "co_ownerid"
+string or principal to the server, the server will not allow the
+client to reclaim its open and lock state, and may give those locks
+to other clients in the meantime. This is referred to as "lock
+stealing".
+
+Lease stealing and lock stealing increase the potential for denial
+of service and in rare cases even data corruption.
+
+Selecting an appropriate client identifier
+------------------------------------------
+
+By default, the Linux NFSv4 client implementation constructs its
+"co_ownerid" string starting with the words "Linux NFS" followed by
+the client's UTS node name (the same node name, incidentally, that
+is used as the "machine name" in an AUTH_SYS credential). In small
+deployments, this construction is usually adequate. Often, however,
+the node name by itself is not adequately unique, and can change
+unexpectedly. Problematic situations include:
+
+  - NFS-root (diskless) clients, where the local DCHP server (or
+    equivalent) does not provide a unique host name.
+
+  - "Containers" within a single Linux host.  If each container has
+    a separate network namespace, but does not use the UTS namespace
+    to provide a unique host name, then there can be multiple NFS
+    client instances with the same host name.
+
+  - Clients across multiple administrative domains that access a
+    common NFS server. If hostnames are not assigned centrally
+    then uniqueness cannot be guaranteed unless a domain name is
+    included in the hostname.
+
+Linux provides two mechanisms to add uniqueness to its "co_ownerid"
+string:
+
+    nfs.nfs4_unique_id
+      This module parameter can set an arbitrary uniquifier string
+      via the kernel command line, or when the "nfs" module is
+      loaded.
+
+    /sys/fs/nfs/client/net/identifier
+      This virtual file, available since Linux 5.3, is local to the
+      network namespace in which it is accessed and so can provide
+      distinction between network namespaces (containers) when the
+      hostname remains uniform.
+
+Note that this file is empty on name-space creation. If the
+container system has access to some sort of per-container identity
+then that uniquifier can be used. For example, a uniquifier might
+be formed at boot using the container's internal identifier:
+
+    sha256sum /etc/machine-id | awk '{print $1}' \\
+        > /sys/fs/nfs/client/net/identifier
+
+Security considerations
+-----------------------
+
+The use of cryptographic security for lease management operations
+is strongly encouraged.
+
+If NFS with Kerberos is not configured, a Linux NFSv4 client uses
+AUTH_SYS and UID 0 as the principal part of its client identity.
+This configuration is not only insecure, it increases the risk of
+lease and lock stealing. However, it might be the only choice for
+client configurations that have no local persistent storage.
+"co_ownerid" string uniqueness and persistence is critical in this
+case.
+
+When a Kerberos keytab is present on a Linux NFS client, the client
+attempts to use one of the principals in that keytab when
+identifying itself to servers. The "sec=" mount option does not
+control this behavior. Alternately, a single-user client with a
+Kerberos principal can use that principal in place of the client's
+host principal.
+
+Using Kerberos for this purpose enables the client and server to
+use the same lease for operations covered by all "sec=" settings.
+Additionally, the Linux NFS client uses the RPCSEC_GSS security
+flavor with Kerberos and the integrity QOS to prevent in-transit
+modification of lease modification requests.
+
+Additional notes
+----------------
+The Linux NFSv4 client establishes a single lease on each NFSv4
+server it accesses. NFSv4 mounts from a Linux NFSv4 client of a
+particular server then share that lease.
+
+Once a client establishes open and lock state, the NFSv4 protocol
+enables lease state to transition to other servers, following data
+that has been migrated. This hides data migration completely from
+running applications. The Linux NFSv4 client facilitates state
+migration by presenting the same "client_owner4" to all servers it
+encounters.
+
+========
+See Also
+========
+
+  - nfs(5)
+  - kerberos(7)
+  - RFC 7530 for the NFSv4.0 specification
+  - RFC 8881 for the NFSv4.1 specification.
index 288d8dd..8536134 100644 (file)
@@ -6,6 +6,8 @@ NFS
 .. toctree::
    :maxdepth: 1
 
+   client-identifier
+   exporting
    pnfs
    rpc-cache
    rpc-server-gss
index 6b62425..dbb0302 100644 (file)
@@ -389,6 +389,31 @@ descriptors once the device is released.
 See Documentation/firmware-guide/acpi/gpio-properties.rst for more information
 about the _DSD binding related to GPIOs.
 
+RS-485 support
+==============
+
+ACPI _DSD (Device Specific Data) can be used to describe RS-485 capability
+of UART.
+
+For example::
+
+       Device (DEV)
+       {
+               ...
+
+               // ACPI 5.1 _DSD used for RS-485 capabilities
+               Name (_DSD, Package ()
+               {
+                       ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+                       Package ()
+                       {
+                               Package () {"rs485-rts-active-low", Zero},
+                               Package () {"rs485-rx-active-high", Zero},
+                               Package () {"rs485-rx-during-tx", Zero},
+                       }
+               })
+               ...
+
 MFD devices
 ===========
 
index ef9eec7..15b6709 100644 (file)
@@ -502,6 +502,11 @@ Developer only needs to provide a sub feature driver with matched feature id.
 FME Partial Reconfiguration Sub Feature driver (see drivers/fpga/dfl-fme-pr.c)
 could be a reference.
 
+Please refer to below link to existing feature id table and guide for new feature
+ids application.
+https://github.com/OPAE/dfl-feature-id
+
+
 Location of DFLs on a PCI Device
 ================================
 The original method for finding a DFL on a PCI device assumed the start of the
diff --git a/Documentation/hte/hte.rst b/Documentation/hte/hte.rst
new file mode 100644 (file)
index 0000000..153f323
--- /dev/null
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+============================================
+The Linux Hardware Timestamping Engine (HTE)
+============================================
+
+:Author: Dipen Patel
+
+Introduction
+------------
+
+Certain devices have built in hardware timestamping engines which can
+monitor sets of system signals, lines, buses etc... in realtime for state
+change; upon detecting the change they can automatically store the timestamp at
+the moment of occurrence. Such functionality may help achieve better accuracy
+in obtaining timestamps than using software counterparts i.e. ktime and
+friends.
+
+This document describes the API that can be used by hardware timestamping
+engine provider and consumer drivers that want to use the hardware timestamping
+engine (HTE) framework. Both consumers and providers must include
+``#include <linux/hte.h>``.
+
+The HTE framework APIs for the providers
+----------------------------------------
+
+.. kernel-doc:: drivers/hte/hte.c
+   :functions: devm_hte_register_chip hte_push_ts_ns
+
+The HTE framework APIs for the consumers
+----------------------------------------
+
+.. kernel-doc:: drivers/hte/hte.c
+   :functions: hte_init_line_attr hte_ts_get hte_ts_put devm_hte_request_ts_ns hte_request_ts_ns hte_enable_ts hte_disable_ts of_hte_req_count hte_get_clk_src_info
+
+The HTE framework public structures
+-----------------------------------
+.. kernel-doc:: include/linux/hte.h
+
+More on the HTE timestamp data
+------------------------------
+The ``struct hte_ts_data`` is used to pass timestamp details between the
+consumers and the providers. It expresses timestamp data in nanoseconds in
+u64. An example of the typical timestamp data life cycle, for the GPIO line is
+as follows::
+
+ - Monitors GPIO line change.
+ - Detects the state change on GPIO line.
+ - Converts timestamps in nanoseconds.
+ - Stores GPIO raw level in raw_level variable if the provider has that
+ hardware capability.
+ - Pushes this hte_ts_data object to HTE subsystem.
+ - HTE subsystem increments seq counter and invokes consumer provided callback.
+ Based on callback return value, the HTE core invokes secondary callback in
+ the thread context.
+
+HTE subsystem debugfs attributes
+--------------------------------
+HTE subsystem creates debugfs attributes at ``/sys/kernel/debug/hte/``.
+It also creates line/signal-related debugfs attributes at
+``/sys/kernel/debug/hte/<provider>/<label or line id>/``. Note that these
+attributes are read-only.
+
+`ts_requested`
+               The total number of entities requested from the given provider,
+               where entity is specified by the provider and could represent
+               lines, GPIO, chip signals, buses etc...
+                The attribute will be available at
+               ``/sys/kernel/debug/hte/<provider>/``.
+
+`total_ts`
+               The total number of entities supported by the provider.
+                The attribute will be available at
+               ``/sys/kernel/debug/hte/<provider>/``.
+
+`dropped_timestamps`
+               The dropped timestamps for a given line.
+                The attribute will be available at
+               ``/sys/kernel/debug/hte/<provider>/<label or line id>/``.
diff --git a/Documentation/hte/index.rst b/Documentation/hte/index.rst
new file mode 100644 (file)
index 0000000..9f43301
--- /dev/null
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================================
+The Linux Hardware Timestamping Engine (HTE)
+============================================
+
+The HTE Subsystem
+=================
+
+.. toctree::
+   :maxdepth: 1
+
+   hte
+
+HTE Tegra Provider
+==================
+
+.. toctree::
+   :maxdepth: 1
+
+   tegra194-hte
+
diff --git a/Documentation/hte/tegra194-hte.rst b/Documentation/hte/tegra194-hte.rst
new file mode 100644 (file)
index 0000000..41983e0
--- /dev/null
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+HTE Kernel provider driver
+==========================
+
+Description
+-----------
+The Nvidia tegra194 HTE provider driver implements two GTE
+(Generic Timestamping Engine) instances: 1) GPIO GTE and 2) LIC
+(Legacy Interrupt Controller) IRQ GTE. Both GTE instances get the
+timestamp from the system counter TSC which has 31.25MHz clock rate, and the
+driver converts clock tick rate to nanoseconds before storing it as timestamp
+value.
+
+GPIO GTE
+--------
+
+This GTE instance timestamps GPIO in real time. For that to happen GPIO
+needs to be configured as input. The always on (AON) GPIO controller instance
+supports timestamping GPIOs in real time and it has 39 GPIO lines. The GPIO GTE
+and AON GPIO controller are tightly coupled as it requires very specific bits
+to be set in GPIO config register before GPIO GTE can be used, for that GPIOLIB
+adds two optional APIs as below. The GPIO GTE code supports both kernel
+and userspace consumers. The kernel space consumers can directly talk to HTE
+subsystem while userspace consumers timestamp requests go through GPIOLIB CDEV
+framework to HTE subsystem.
+
+.. kernel-doc:: drivers/gpio/gpiolib.c
+   :functions: gpiod_enable_hw_timestamp_ns gpiod_disable_hw_timestamp_ns
+
+For userspace consumers, GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE flag must be
+specified during IOCTL calls. Refer to ``tools/gpio/gpio-event-mon.c``, which
+returns the timestamp in nanoseconds.
+
+LIC (Legacy Interrupt Controller) IRQ GTE
+-----------------------------------------
+
+This GTE instance timestamps LIC IRQ lines in real time. There are 352 IRQ
+lines which this instance can add timestamps to in real time. The hte
+devicetree binding described at ``Documentation/devicetree/bindings/hte/``
+provides an example of how a consumer can request an IRQ line. Since it is a
+one-to-one mapping with IRQ GTE provider, consumers can simply specify the IRQ
+number that they are interested in. There is no userspace consumer support for
+this GTE instance in the HTE framework.
+
+The provider source code of both IRQ and GPIO GTE instances is located at
+``drivers/hte/hte-tegra194.c``. The test driver
+``drivers/hte/hte-tegra194-test.c`` demonstrates HTE API usage for both IRQ
+and GPIO GTE.
index 978cc82..e3b126c 100644 (file)
@@ -46,7 +46,7 @@ driver model device node, and its I2C address.
        },
 
        .id_table       = foo_idtable,
-       .probe          = foo_probe,
+       .probe_new      = foo_probe,
        .remove         = foo_remove,
        /* if device autodetection is needed: */
        .class          = I2C_CLASS_SOMETHING,
@@ -155,8 +155,7 @@ those devices, and a remove() method to unbind.
 
 ::
 
-       static int foo_probe(struct i2c_client *client,
-                            const struct i2c_device_id *id);
+       static int foo_probe(struct i2c_client *client);
        static int foo_remove(struct i2c_client *client);
 
 Remember that the i2c_driver does not create those client handles.  The
@@ -165,8 +164,12 @@ handle may be used during foo_probe().  If foo_probe() reports success
 foo_remove() returns.  That binding model is used by most Linux drivers.
 
 The probe function is called when an entry in the id_table name field
-matches the device's name. It is passed the entry that was matched so
-the driver knows which one in the table matched.
+matches the device's name. If the probe function needs that entry, it
+can retrieve it using
+
+::
+
+       const struct i2c_device_id *id = i2c_match_id(foo_idtable, client);
 
 
 Device Creation
similarity index 64%
rename from Documentation/COPYING-logo
rename to Documentation/images/COPYING-logo
index b21c7cf..6a441d4 100644 (file)
@@ -11,3 +11,11 @@ Larry's web-page:
 
        https://www.isc.tamu.edu/~lewing/linux/
 
+The SVG version was re-illustrated in vector by Garrett LeSage and
+refined and cleaned up by IFo Hancroft. It is also freely usable
+as long as you acknowledge Larry, Garrett and IFo as above.
+
+There are also black-and-white and inverted vector versions at
+Garrett's repository:
+
+       https://github.com/garrett/Tux
diff --git a/Documentation/images/logo.svg b/Documentation/images/logo.svg
new file mode 100644 (file)
index 0000000..58a6881
--- /dev/null
@@ -0,0 +1,2040 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:xlink="http://www.w3.org/1999/xlink"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   inkscape:export-ydpi="300"
+   inkscape:export-xdpi="300"
+   inkscape:export-filename="./tux-large.png"
+   sodipodi:docname="tux.svg"
+   inkscape:version="1.1-dev (79c9cd5f54, 2020-05-04)"
+   version="1.1"
+   id="svg27450"
+   height="304.18802"
+   width="249.14903">
+  <title
+     id="title20046">Tux</title>
+  <sodipodi:namedview
+     showborder="true"
+     inkscape:showpageshadow="false"
+     inkscape:guide-bbox="true"
+     showguides="true"
+     fit-margin-bottom="0"
+     fit-margin-right="0"
+     fit-margin-left="0"
+     fit-margin-top="0"
+     inkscape:window-maximized="0"
+     inkscape:window-y="0"
+     inkscape:window-x="0"
+     inkscape:window-height="1080"
+     inkscape:window-width="1920"
+     showgrid="false"
+     inkscape:current-layer="layer2"
+     inkscape:document-units="px"
+     inkscape:cy="145.53575"
+     inkscape:cx="157.20051"
+     inkscape:zoom="2.8284271"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     borderopacity="0.41176471"
+     bordercolor="#666666"
+     pagecolor="#ffffff"
+     id="base" />
+  <defs
+     id="defs27452">
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect826"
+       effect="spiro" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect408"
+       effect="spiro" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect396"
+       effect="spiro" />
+    <linearGradient
+       id="linearGradient28799-4">
+      <stop
+         style="stop-color:#fefefc;stop-opacity:1;"
+         offset="0"
+         id="stop28801-13" />
+      <stop
+         id="stop28807-8"
+         offset="0.75733864"
+         style="stop-color:#fefefc;stop-opacity:1" />
+      <stop
+         style="stop-color:#d4d4d4;stop-opacity:1"
+         offset="1"
+         id="stop28803-8" />
+    </linearGradient>
+    <linearGradient
+       id="linearGradient28799-5-4">
+      <stop
+         style="stop-color:#fefefc;stop-opacity:1;"
+         offset="0"
+         id="stop28801-1-2" />
+      <stop
+         id="stop28807-2-5"
+         offset="0.75733864"
+         style="stop-color:#fefefc;stop-opacity:1" />
+      <stop
+         style="stop-color:#d4d4d4;stop-opacity:1"
+         offset="1"
+         id="stop28803-6-6" />
+    </linearGradient>
+    <clipPath
+       id="clipPath14186-9"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         style="fill:#402c07;fill-opacity:1;stroke:none"
+         d="m 137.57703,281.0191 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+         id="path14188-1"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="ssssssssssssssss" />
+    </clipPath>
+    <linearGradient
+       id="linearGradient14132-6"
+       inkscape:collect="always">
+      <stop
+         id="stop14134-2"
+         offset="0"
+         style="stop-color:#b98309;stop-opacity:1" />
+      <stop
+         id="stop14136-2"
+         offset="1"
+         style="stop-color:#382605;stop-opacity:1" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4669-1"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.2169911"
+       y="-0.10849553"
+       width="1.215018"
+       x="-0.10750898"
+       id="filter14148-8"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14150-8"
+         stdDeviation="3.9237191"
+         inkscape:collect="always" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4669-4-3"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       id="filter14140-3"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14142-5"
+         stdDeviation="2.4365744"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient14168-5"
+       inkscape:collect="always">
+      <stop
+         id="stop14170-8"
+         offset="0"
+         style="stop-color:#ebc40c;stop-opacity:1;" />
+      <stop
+         id="stop14172-0"
+         offset="1"
+         style="stop-color:#ebc40c;stop-opacity:0;" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       id="filter14176-5"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14178-0"
+         stdDeviation="0.3702557"
+         inkscape:collect="always" />
+    </filter>
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect14972-2-2"
+       effect="spiro" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect15017-9"
+       effect="spiro" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.0853395"
+       y="-0.042669769"
+       width="1.2020705"
+       x="-0.10103524"
+       id="filter15053-7"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15055-3"
+         stdDeviation="1.1322032"
+         inkscape:collect="always" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29716-6-1"
+       is_visible="true" />
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient14830-4">
+      <stop
+         style="stop-color:#7c7c7c;stop-opacity:1;"
+         offset="0"
+         id="stop14832-6" />
+      <stop
+         style="stop-color:#7c7c7c;stop-opacity:0.32941177"
+         offset="1"
+         id="stop14834-5" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.3012044"
+       y="-0.15060219"
+       width="1.1409113"
+       x="-0.070455663"
+       id="filter14812-5"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14814-2"
+         stdDeviation="1.0252435"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient14830-8-1">
+      <stop
+         style="stop-color:#7c7c7c;stop-opacity:1;"
+         offset="0"
+         id="stop14832-2-5" />
+      <stop
+         style="stop-color:#7c7c7c;stop-opacity:0.32941177"
+         offset="1"
+         id="stop14834-8-0" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.3012044"
+       y="-0.15060219"
+       width="1.1409113"
+       x="-0.070455663"
+       id="filter14812-0-9"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14814-6-7"
+         stdDeviation="1.0252435"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient14518-6">
+      <stop
+         style="stop-color:#110800;stop-opacity:1;"
+         offset="0"
+         id="stop14540-0" />
+      <stop
+         id="stop14542-5"
+         offset="0.59066743"
+         style="stop-color:#a65a00;stop-opacity:0.80000001;" />
+      <stop
+         id="stop14522-4"
+         offset="1"
+         style="stop-color:#ff921e;stop-opacity:0;" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29716-0-0"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.4681805"
+       y="-0.23409025"
+       width="1.4010103"
+       x="-0.20050517"
+       id="filter14897-2"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14899-5"
+         stdDeviation="2.8444356"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient14518-2-2">
+      <stop
+         style="stop-color:#110800;stop-opacity:1;"
+         offset="0"
+         id="stop14540-5-0" />
+      <stop
+         id="stop14542-2-7"
+         offset="0.59066743"
+         style="stop-color:#a65a00;stop-opacity:0.80000001;" />
+      <stop
+         id="stop14522-2-1"
+         offset="1"
+         style="stop-color:#ff921e;stop-opacity:0;" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29716-0-8-3"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.2134444"
+       y="-0.10672215"
+       width="1.1828213"
+       x="-0.091410659"
+       id="filter14951-8"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14953-7"
+         stdDeviation="1.2967831"
+         inkscape:collect="always" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.5474488"
+       y="-0.27372435"
+       width="1.9386586"
+       x="-0.46932927"
+       id="filter15211-9"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15213-7"
+         stdDeviation="0.85991809"
+         inkscape:collect="always" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.9645272"
+       y="-0.48226368"
+       width="2.2077935"
+       x="-0.60389674"
+       id="filter14706-3"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14708-8"
+         stdDeviation="2.6400036"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient15103-0"
+       inkscape:collect="always">
+      <stop
+         id="stop15105-1"
+         offset="0"
+         style="stop-color:#000000;stop-opacity:1;" />
+      <stop
+         id="stop15107-6"
+         offset="1"
+         style="stop-color:#000000;stop-opacity:0;" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       id="filter15115-3"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15117-8"
+         stdDeviation="2.2091576"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient14392-8"
+       inkscape:collect="always">
+      <stop
+         id="stop14394-3"
+         offset="0"
+         style="stop-color:#3e2a06;stop-opacity:1" />
+      <stop
+         id="stop14396-0"
+         offset="1"
+         style="stop-color:#ad780a;stop-opacity:1" />
+    </linearGradient>
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect14300-7"
+       effect="spiro" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect14300-2-7"
+       effect="spiro" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.2859976"
+       y="-0.14299878"
+       width="1.2900307"
+       x="-0.14501533"
+       id="filter14416-8"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14418-5"
+         stdDeviation="4.7787162"
+         inkscape:collect="always" />
+    </filter>
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect14300-2-2-0"
+       effect="spiro" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.2498901"
+       y="-0.12494507"
+       width="1.2305853"
+       x="-0.11529266"
+       id="filter14432-2"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14434-2"
+         stdDeviation="3.1725155"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient17009"
+       inkscape:collect="always">
+      <stop
+         id="stop17011"
+         offset="0"
+         style="stop-color:#f3cd0c;stop-opacity:1;" />
+      <stop
+         id="stop17013"
+         offset="1"
+         style="stop-color:#f3cd0c;stop-opacity:0;" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.2585374"
+       y="-0.12926871"
+       width="1.0418237"
+       x="-0.020911871"
+       id="filter17044"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur17046"
+         stdDeviation="0.47946431"
+         inkscape:collect="always" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.3188565"
+       y="-0.15942827"
+       width="1.3550917"
+       x="-0.17754583"
+       id="filter15185-2"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15187-7"
+         stdDeviation="0.90083196"
+         inkscape:collect="always" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4582-3"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter4592-2"
+       x="-0.81659585"
+       width="2.6331918"
+       y="-0.057823781"
+       height="1.1156476">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="1.4463082"
+         id="feGaussianBlur4594-6" />
+    </filter>
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient4417-8-5">
+      <stop
+         style="stop-color:#000000;stop-opacity:1;"
+         offset="0"
+         id="stop4419-4-4" />
+      <stop
+         style="stop-color:#000000;stop-opacity:0.24886878"
+         offset="1"
+         id="stop4421-2-0" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4500-7"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter4538-7">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.7854602"
+         id="feGaussianBlur4540-9" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter4427-5-7">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="1.1383167"
+         id="feGaussianBlur4429-4-8" />
+    </filter>
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient4417-0">
+      <stop
+         style="stop-color:#000000;stop-opacity:1;"
+         offset="0"
+         id="stop4419-8" />
+      <stop
+         style="stop-color:#000000;stop-opacity:0.24886878"
+         offset="1"
+         id="stop4421-8" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter4487-6">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.6434074"
+         id="feGaussianBlur4489-4" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.0800927"
+       y="-0.040046327"
+       width="1.2350631"
+       x="-0.11753157"
+       id="filter14666-9"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14668-7"
+         stdDeviation="1.352085"
+         inkscape:collect="always" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29707-7-6"
+       is_visible="true" />
+    <linearGradient
+       id="linearGradient29652-2"
+       inkscape:collect="always">
+      <stop
+         id="stop29654-6"
+         offset="0"
+         style="stop-color:#c8c8c8;stop-opacity:1;" />
+      <stop
+         id="stop29656-1"
+         offset="1"
+         style="stop-color:#797978;stop-opacity:1" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29553-7"
+       is_visible="true" />
+    <linearGradient
+       id="linearGradient28469-0">
+      <stop
+         id="stop28479-4"
+         offset="0"
+         style="stop-color:#d2940a;stop-opacity:1" />
+      <stop
+         id="stop28477-2"
+         offset="0.75143719"
+         style="stop-color:#d89c08;stop-opacity:1" />
+      <stop
+         style="stop-color:#b67e07;stop-opacity:1;"
+         offset="0.86579126"
+         id="stop28485-7" />
+      <stop
+         style="stop-color:#946106;stop-opacity:1"
+         offset="1"
+         id="stop28473-1" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28463-4"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28489-4"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter28502-8"
+       x="-0.1548306"
+       width="1.3096611"
+       y="-0.21494099"
+       height="1.429882">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.89858666"
+         id="feGaussianBlur28504-3" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28273-54-7"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       id="filter15145-6"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15147-1"
+         stdDeviation="0.75821369"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient28275-8">
+      <stop
+         style="stop-color:#ad780a;stop-opacity:1"
+         offset="0"
+         id="stop28277-1" />
+      <stop
+         id="stop28291-9"
+         offset="0.11972899"
+         style="stop-color:#d89e08;stop-opacity:1" />
+      <stop
+         id="stop28289-6"
+         offset="0.25514477"
+         style="stop-color:#edb80b;stop-opacity:1" />
+      <stop
+         id="stop28287-6"
+         offset="0.39194193"
+         style="stop-color:#ebc80d;stop-opacity:1" />
+      <stop
+         id="stop28285-7"
+         offset="0.52741116"
+         style="stop-color:#f5d838;stop-opacity:1" />
+      <stop
+         id="stop28283-7"
+         offset="0.76906693"
+         style="stop-color:#f6d811;stop-opacity:1" />
+      <stop
+         style="stop-color:#f5cd31;stop-opacity:1"
+         offset="1"
+         id="stop28279-5" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28273-44"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28384-7"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.260552"
+       y="-0.13027605"
+       width="1.3219118"
+       x="-0.16095592"
+       id="filter14963-7"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur14965-0"
+         stdDeviation="0.84878819"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient29529-6">
+      <stop
+         style="stop-color:#3a2903;stop-opacity:1;"
+         offset="0"
+         id="stop29531-0" />
+      <stop
+         id="stop29539-7"
+         offset="0.55472803"
+         style="stop-color:#735208;stop-opacity:1" />
+      <stop
+         style="stop-color:#ac8c04;stop-opacity:1"
+         offset="1"
+         id="stop29533-8" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       id="filter15177-1"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15179-3"
+         stdDeviation="0.11039302"
+         inkscape:collect="always" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.2114592"
+       y="-0.10572958"
+       width="1.2328929"
+       x="-0.11644644"
+       id="filter15173-0"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15175-4"
+         stdDeviation="0.11039302"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       inkscape:collect="always"
+       id="linearGradient28572-7">
+      <stop
+         style="stop-color:#f5ce2d;stop-opacity:1;"
+         offset="0"
+         id="stop28574-5" />
+      <stop
+         style="stop-color:#d79b08;stop-opacity:1"
+         offset="1"
+         id="stop28576-0" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter28584-4"
+       x="-0.10730159"
+       width="1.2146032"
+       y="-0.13610739"
+       height="1.2722148">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.2640625"
+         id="feGaussianBlur28586-3" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29455-3"
+       is_visible="true" />
+    <linearGradient
+       id="linearGradient29477-3">
+      <stop
+         style="stop-color:#757574;stop-opacity:0"
+         offset="0"
+         id="stop29479-5" />
+      <stop
+         id="stop29487-5"
+         offset="0.26291031"
+         style="stop-color:#757574;stop-opacity:1;" />
+      <stop
+         id="stop29485-0"
+         offset="0.5"
+         style="stop-color:#757574;stop-opacity:1;" />
+      <stop
+         style="stop-color:#757574;stop-opacity:0"
+         offset="1"
+         id="stop29481-3" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter29493-2"
+       x="-0.23446631"
+       width="1.4689326"
+       y="-0.14606842"
+       height="1.2921369">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.51262416"
+         id="feGaussianBlur29495-7" />
+    </filter>
+    <linearGradient
+       id="linearGradient29336-6">
+      <stop
+         style="stop-color:#646464;stop-opacity:0"
+         offset="0"
+         id="stop29338-1" />
+      <stop
+         id="stop29344-7"
+         offset="0.30628255"
+         style="stop-color:#646464;stop-opacity:0.5825243" />
+      <stop
+         style="stop-color:#646464;stop-opacity:1"
+         offset="0.47000006"
+         id="stop29354-5" />
+      <stop
+         id="stop29356-3"
+         offset="0.72834015"
+         style="stop-color:#646464;stop-opacity:0.25728154" />
+      <stop
+         style="stop-color:#646464;stop-opacity:0;"
+         offset="1"
+         id="stop29340-6" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter29447-1">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.13475369"
+         id="feGaussianBlur29449-4" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter29350-1">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.1475"
+         id="feGaussianBlur29352-7" />
+    </filter>
+    <linearGradient
+       id="linearGradient28976-3">
+      <stop
+         style="stop-color:#747474;stop-opacity:1"
+         offset="0"
+         id="stop28978-7" />
+      <stop
+         id="stop29259-5"
+         offset="0.125"
+         style="stop-color:#8c8c8c;stop-opacity:1;" />
+      <stop
+         id="stop29257-0"
+         offset="0.25"
+         style="stop-color:#a4a4a4;stop-opacity:1;" />
+      <stop
+         id="stop28984-4"
+         offset="0.5"
+         style="stop-color:#d4d4d4;stop-opacity:1" />
+      <stop
+         style="stop-color:#d4d4d4;stop-opacity:1"
+         offset="0.61919296"
+         id="stop28986-0" />
+      <stop
+         style="stop-color:#7c7c7c;stop-opacity:1"
+         offset="1"
+         id="stop28980-2" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28974-8"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28881-6"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter28927-8"
+       x="-0.15795375"
+       width="1.3159075"
+       y="-0.2091987"
+       height="1.4183974">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.24891089"
+         id="feGaussianBlur28929-2" />
+    </filter>
+    <linearGradient
+       id="linearGradient28935-8">
+      <stop
+         style="stop-color:#949494;stop-opacity:0.39215687;"
+         offset="0"
+         id="stop28937-7" />
+      <stop
+         id="stop28943-3"
+         offset="0.5"
+         style="stop-color:#949494;stop-opacity:1;" />
+      <stop
+         style="stop-color:#949494;stop-opacity:0.39215687;"
+         offset="1"
+         id="stop28939-8" />
+    </linearGradient>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter28949-8"
+       x="-0.17867894"
+       width="1.3573579"
+       y="-0.18134074"
+       height="1.3626815">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.51947927"
+         id="feGaussianBlur28951-1" />
+    </filter>
+    <filter
+       style="color-interpolation-filters:sRGB"
+       height="1.6165009"
+       y="-0.30825046"
+       width="1.5843594"
+       x="-0.2921797"
+       id="filter15133-1"
+       inkscape:collect="always">
+      <feGaussianBlur
+         id="feGaussianBlur15135-5"
+         stdDeviation="1.7403319"
+         inkscape:collect="always" />
+    </filter>
+    <linearGradient
+       id="linearGradient28853-5">
+      <stop
+         style="stop-color:#020204;stop-opacity:1"
+         offset="0"
+         id="stop28855-3" />
+      <stop
+         id="stop28865-0"
+         offset="0.73448181"
+         style="stop-color:#020204;stop-opacity:1" />
+      <stop
+         style="stop-color:#5c5c5c;stop-opacity:1;"
+         offset="1"
+         id="stop28857-9" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28851-0"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28273-4-2"
+       is_visible="true" />
+    <linearGradient
+       id="linearGradient28799-3">
+      <stop
+         style="stop-color:#fefefc;stop-opacity:1;"
+         offset="0"
+         id="stop28801-6" />
+      <stop
+         id="stop28807-7"
+         offset="0.75733864"
+         style="stop-color:#fefefc;stop-opacity:1" />
+      <stop
+         style="stop-color:#d4d4d4;stop-opacity:1"
+         offset="1"
+         id="stop28803-0" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28797-81"
+       is_visible="true" />
+    <linearGradient
+       id="linearGradient28799-5-5">
+      <stop
+         style="stop-color:#fefefc;stop-opacity:1;"
+         offset="0"
+         id="stop28801-1-9" />
+      <stop
+         id="stop28807-2-6"
+         offset="0.75733864"
+         style="stop-color:#fefefc;stop-opacity:1" />
+      <stop
+         style="stop-color:#d4d4d4;stop-opacity:1"
+         offset="1"
+         id="stop28803-6-3" />
+    </linearGradient>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28797-8-1"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28463-0-7"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter30475-4">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="0.93152507"
+         id="feGaussianBlur30477-9" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28463-8-0"
+       is_visible="true" />
+    <filter
+       style="color-interpolation-filters:sRGB"
+       inkscape:collect="always"
+       id="filter30479-2"
+       x="-0.07637769"
+       width="1.1527554"
+       y="-0.12919053"
+       height="1.2583811">
+      <feGaussianBlur
+         inkscape:collect="always"
+         stdDeviation="2.0355046"
+         id="feGaussianBlur30481-9" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29721-2"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect28714-5"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29678-4-6"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29678-1"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect29672-0"
+       is_visible="true" />
+    <radialGradient
+       r="14.572236"
+       fy="137.66095"
+       fx="223.19559"
+       cy="137.66095"
+       cx="223.19559"
+       gradientTransform="matrix(0.81524244,-0.03431182,0.02961133,1.2479887,-208.43744,-35.542647)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18806"
+       xlink:href="#linearGradient28799-5-5"
+       inkscape:collect="always" />
+    <radialGradient
+       r="14.572236"
+       fy="137.66095"
+       fx="223.19559"
+       cy="137.66095"
+       cx="223.19559"
+       gradientTransform="matrix(1.0857794,-0.03431182,0.03943781,1.2479887,-233.54194,-35.542647)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18808"
+       xlink:href="#linearGradient28799-3"
+       inkscape:collect="always" />
+    <radialGradient
+       r="15.382211"
+       fy="150.65126"
+       fx="275.53763"
+       cy="150.65126"
+       cx="275.53763"
+       gradientTransform="matrix(0.69784558,-0.50717348,0.46034105,0.63340638,-271.10191,183.03011)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18810"
+       xlink:href="#linearGradient28853-5"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="140.72476"
+       x2="219.73343"
+       y1="132.76981"
+       x1="213.01591"
+       gradientTransform="translate(-60.00015,-58.362183)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18812"
+       xlink:href="#linearGradient28935-8"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="132.48718"
+       x2="358.625"
+       y1="119.98718"
+       x1="337.25"
+       gradientTransform="translate(-250)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18814"
+       xlink:href="#linearGradient28976-3"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="127.99684"
+       x2="308.74051"
+       y1="114.56181"
+       x1="294.50998"
+       gradientTransform="translate(-300.00015,-0.9999998)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18816"
+       xlink:href="#linearGradient29336-6"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="128.57106"
+       x2="266.62701"
+       y1="115.66637"
+       x1="253.22745"
+       gradientTransform="translate(-300.00015,-0.9999998)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18818"
+       xlink:href="#linearGradient29336-6"
+       inkscape:collect="always" />
+    <linearGradient
+       gradientTransform="translate(-50.00015,-58.362183)"
+       y2="142.49252"
+       x2="169.8824"
+       y1="132.06271"
+       x1="164.04878"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18820"
+       xlink:href="#linearGradient29477-3"
+       inkscape:collect="always" />
+    <radialGradient
+       r="31.111488"
+       fy="193.09949"
+       fx="294.48483"
+       cy="193.09949"
+       cx="294.48483"
+       gradientTransform="matrix(0.93618683,-0.38640412,0.27133164,0.65738721,-244.47527,146.7229)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18822"
+       xlink:href="#linearGradient28469-0"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="157.8721"
+       x2="313.3367"
+       y1="158.31404"
+       x1="256.85657"
+       gradientTransform="translate(-210)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18824"
+       xlink:href="#linearGradient28275-8"
+       inkscape:collect="always" />
+    <radialGradient
+       r="3.2300935"
+       fy="147.09335"
+       fx="77.67215"
+       cy="147.09335"
+       cx="77.67215"
+       gradientTransform="matrix(1.0000004,0,0,0.5833264,59.999805,3.054009)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18826"
+       xlink:href="#linearGradient29529-6"
+       inkscape:collect="always" />
+    <radialGradient
+       r="1.5350333"
+       fy="147.44128"
+       fx="63.125401"
+       cy="147.44128"
+       cx="63.125401"
+       gradientTransform="matrix(1,0,0,1.0751189,59.99984,-69.456344)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18828"
+       xlink:href="#linearGradient29529-6"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="159.76843"
+       x2="243.46875"
+       y1="157.01843"
+       x1="243.03125"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18830"
+       xlink:href="#linearGradient28572-7"
+       inkscape:collect="always" />
+    <radialGradient
+       r="35.51144"
+       fy="126.53491"
+       fx="268.06998"
+       cy="126.53491"
+       cx="268.06998"
+       gradientTransform="matrix(0.20141143,-0.03316079,0.03065006,0.18616184,-3.1263574,114.03586)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18832"
+       xlink:href="#linearGradient29652-2"
+       inkscape:collect="always" />
+    <radialGradient
+       r="27.391165"
+       fy="220.53755"
+       fx="336.22372"
+       cy="220.53755"
+       cx="336.22372"
+       gradientTransform="matrix(-0.69844216,0,0,0.76335815,166.3057,50.219935)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18834"
+       xlink:href="#linearGradient4417-0"
+       inkscape:collect="always" />
+    <radialGradient
+       r="27.391165"
+       fy="236.36569"
+       fx="312.14502"
+       cy="236.36569"
+       cx="312.14502"
+       gradientTransform="matrix(1,0,0,0.76335815,-150.00015,-8.142243)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18836"
+       xlink:href="#linearGradient4417-8-5"
+       inkscape:collect="always" />
+    <radialGradient
+       r="10.84542"
+       fy="225.13487"
+       fx="275.55389"
+       cy="225.13487"
+       cx="275.55389"
+       gradientTransform="matrix(1,0,0,1.0692348,-150.00016,-73.222483)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18838"
+       xlink:href="#linearGradient4417-8-5"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="351.48654"
+       x2="341.98224"
+       y1="323.90076"
+       x1="338.28552"
+       gradientTransform="translate(-310)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18840"
+       xlink:href="#linearGradient15103-0"
+       inkscape:collect="always" />
+    <linearGradient
+       gradientTransform="translate(-250.00016,-58.362183)"
+       y2="293.58548"
+       x2="490.12241"
+       y1="371.54401"
+       x1="442.03912"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18842"
+       xlink:href="#linearGradient14392-8"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="302.31699"
+       x2="353.74951"
+       y1="289.58905"
+       x1="355.16373"
+       gradientTransform="translate(-150.00016,-60.362183)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18844"
+       xlink:href="#linearGradient17009"
+       inkscape:collect="always" />
+    <radialGradient
+       r="16.845654"
+       fy="303.41541"
+       fx="363.33957"
+       cy="303.41541"
+       cx="363.33957"
+       gradientTransform="matrix(1.3082075,0.35053296,-0.36795399,1.3732236,-150.50951,-298.71133)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18846"
+       xlink:href="#linearGradient14518-6"
+       inkscape:collect="always" />
+    <radialGradient
+       r="16.845654"
+       fy="303.41541"
+       fx="363.33957"
+       cy="303.41541"
+       cx="363.33957"
+       gradientTransform="matrix(1.3082075,0.35053296,-0.36795399,1.3732236,-310.50935,-240.34915)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18848"
+       xlink:href="#linearGradient14518-2-2"
+       inkscape:collect="always" />
+    <radialGradient
+       r="20.537666"
+       fy="246.85757"
+       fx="382.23483"
+       cy="246.85757"
+       cx="382.23483"
+       gradientTransform="matrix(0.36025223,0.15680447,-0.07246786,0.16649214,260.61683,181.93825)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient18850"
+       xlink:href="#linearGradient14830-4"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="279.23718"
+       x2="361.5"
+       y1="279.36218"
+       x1="358.5"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18852"
+       xlink:href="#linearGradient14830-8-1"
+       inkscape:collect="always" />
+    <linearGradient
+       gradientTransform="translate(-80.00015,-58.362183)"
+       y2="381.62027"
+       x2="170.86368"
+       y1="301.54044"
+       x1="123.13397"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18854"
+       xlink:href="#linearGradient14132-6"
+       inkscape:collect="always" />
+    <linearGradient
+       y2="352.27536"
+       x2="186.5968"
+       y1="323.99109"
+       x1="171.57079"
+       gradientTransform="translate(-80.53048,-60.12995)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient18856"
+       xlink:href="#linearGradient14168-5"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath391"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:label="Clip - Left Foot Brighter Highlights"
+         transform="matrix(1,0,0,1.0182804,0,-4.0313444)"
+         style="fill:url(#linearGradient395);fill-opacity:1;stroke:none"
+         d="m 137.57703,281.0191 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+         id="path393"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="ssssssssssssssss" />
+    </clipPath>
+    <linearGradient
+       y2="381.62027"
+       x2="170.86368"
+       y1="301.54044"
+       x1="123.13397"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient395"
+       xlink:href="#linearGradient14132-6"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath401"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         sodipodi:nodetypes="ssssssssssssssss"
+         inkscape:connector-curvature="0"
+         id="path403"
+         d="m 137.57703,281.0191 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+         style="fill:url(#linearGradient405);fill-opacity:1;stroke:none"
+         transform="translate(-240.00015,-1)"
+         inkscape:label="Clip - Left Foot Highlights" />
+    </clipPath>
+    <linearGradient
+       y2="381.62027"
+       x2="170.86368"
+       y1="301.54044"
+       x1="123.13397"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient405"
+       xlink:href="#linearGradient14132-6"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath419"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:label="Clip - Right Foot Highlights"
+         clip-path="none"
+         sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+         inkscape:connector-curvature="0"
+         id="path421"
+         d="m 513.18983,336.61385 c -2.6238,3.11482 -6.268,5.17039 -9.89648,7.01985 -6.1886,3.15437 -12.60169,5.92177 -18.41964,9.71654 -3.89802,2.54249 -7.4959,5.52671 -10.86016,8.74238 -2.87719,2.75012 -5.60582,5.68745 -8.83247,8.01771 -3.25567,2.35122 -7.01915,4.05426 -10.99061,4.6502 -4.83026,0.72481 -9.82134,-0.21289 -14.29898,-2.16416 -3.13754,-1.36728 -6.15569,-3.3229 -7.96301,-6.22931 -1.81425,-2.91754 -2.22807,-6.48813 -2.23266,-9.92375 -0.008,-6.07666 1.11824,-12.09004 2.17848,-18.07349 0.88097,-4.97177 1.71949,-9.95483 2.26013,-14.97502 0.98337,-9.13118 0.9763,-18.35278 0.3199,-27.51327 -0.10993,-1.53416 -0.23754,-3.0832 -0.008,-4.60412 0.22922,-1.52092 0.85475,-3.0367 2.02069,-4.03986 1.07696,-0.9266 2.52093,-1.33598 3.93947,-1.4145 1.41854,-0.0785 2.83404,0.14655 4.23982,0.35197 3.31254,0.48405 6.65159,0.8649 9.88917,1.71656 2.04284,0.53738 4.03315,1.25925 6.0722,1.81081 3.40258,0.92039 6.96639,1.36144 10.46739,0.95192 3.76917,-0.44089 7.42987,-1.85678 11.22363,-1.76474 1.55658,0.0378 3.1015,0.33171 4.58649,0.79985 1.51539,0.47772 3.00914,1.16182 4.12281,2.29512 0.84639,0.8613 1.43579,1.94539 1.87872,3.06879 0.65982,1.67352 1.01492,3.457 1.16703,5.24945 0.13475,1.58788 0.11343,3.19441 0.41433,4.75933 0.49503,2.57458 1.84746,4.92305 3.52848,6.93494 1.68102,2.01189 3.68982,3.72048 5.69641,5.40783 1.99908,1.68103 4.0106,3.35469 6.16708,4.82839 1.0121,0.69165 2.05642,1.33949 3.01736,2.10062 0.96094,0.76113 1.84466,1.6468 2.44543,2.71535 0.81492,1.44944 1.06377,3.2077 0.53758,4.87655 -0.5262,1.66885 -1.48162,3.27659 -2.67059,4.68806 z"
+         style="display:inline;fill:url(#linearGradient423);fill-opacity:1;stroke:none" />
+    </clipPath>
+    <linearGradient
+       y2="293.58548"
+       x2="490.12241"
+       y1="371.54401"
+       x1="442.03912"
+       gradientTransform="translate(-250.00016,-58.362187)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient423"
+       xlink:href="#linearGradient14392-8"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath426"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:label="Clip - Right Foot Brighter Highlights"
+         clip-path="none"
+         sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+         inkscape:connector-curvature="0"
+         id="path428"
+         d="m 509.36312,335.7449 c -2.29559,2.52764 -5.48394,4.19571 -8.65854,5.69652 -5.41448,2.55973 -11.02537,4.80544 -16.11557,7.88485 -3.41042,2.0632 -6.55825,4.48486 -9.50168,7.09433 -2.51729,2.23169 -4.9046,4.6153 -7.72764,6.50627 -2.84842,1.90799 -6.14114,3.28999 -9.61581,3.77358 -4.22606,0.58818 -8.59281,-0.17275 -12.51035,-1.75618 -2.74507,-1.10954 -5.38569,-2.6965 -6.96694,-5.05501 -1.5873,-2.36755 -1.94936,-5.26504 -1.95338,-8.053 -0.007,-4.93114 0.97837,-9.81092 1.90598,-14.66641 0.77077,-4.03453 1.5044,-8.07822 1.97742,-12.15205 0.86036,-7.40983 0.85417,-14.89305 0.27988,-22.32667 -0.0962,-1.24495 -0.20783,-2.50198 -0.007,-3.73619 0.20055,-1.23421 0.74783,-2.46424 1.76793,-3.27829 0.94224,-0.75193 2.20559,-1.08414 3.44669,-1.14785 1.24109,-0.0637 2.47953,0.11892 3.70947,0.28562 2.89818,0.3928 5.81955,0.70185 8.65215,1.39296 1.78731,0.43608 3.52865,1.02187 5.31264,1.46945 2.97696,0.74689 6.09498,1.10479 9.15805,0.77247 3.29769,-0.35777 6.50048,-1.50675 9.81968,-1.43206 1.36187,0.0307 2.71354,0.26918 4.01278,0.64907 1.32583,0.38766 2.63273,0.9428 3.6071,1.86246 0.74051,0.69893 1.25619,1.57866 1.64371,2.49028 0.57728,1.35804 0.88797,2.80532 1.02105,4.25987 0.11789,1.28854 0.0992,2.59222 0.3625,3.86213 0.43311,2.08924 1.61637,3.995 3.08711,5.62762 1.47074,1.63263 3.22827,3.01913 4.98386,4.38839 1.74902,1.36413 3.50892,2.72229 5.39565,3.91818 0.8855,0.56126 1.79919,1.08698 2.63992,1.70462 0.84074,0.61765 1.61392,1.33636 2.13954,2.20348 0.71298,1.1762 0.93071,2.60301 0.47034,3.95726 -0.46038,1.35425 -1.29629,2.65891 -2.33654,3.8043 z"
+         style="display:inline;fill:url(#linearGradient430);fill-opacity:1;stroke:none;stroke-width:0.84260321" />
+    </clipPath>
+    <linearGradient
+       y2="293.58548"
+       x2="490.12241"
+       y1="371.54401"
+       x1="442.03912"
+       gradientTransform="matrix(0.87491199,0,0,0.81148755,-158.36095,15.22676)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient430"
+       xlink:href="#linearGradient14392-8"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath433"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:label="Clip - Right Foot Brightest Highlights"
+         clip-path="none"
+         sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+         inkscape:connector-curvature="0"
+         id="path435"
+         d="m 263.18967,278.25167 c -2.6238,3.11482 -6.268,5.17039 -9.89648,7.01985 -6.1886,3.15437 -12.60169,5.92177 -18.41964,9.71654 -3.89802,2.54249 -7.4959,5.52671 -10.86016,8.74238 -2.87719,2.75012 -5.60582,5.68745 -8.83247,8.01771 -3.25567,2.35122 -7.01915,4.05426 -10.99061,4.6502 -4.83026,0.72481 -9.82134,-0.21289 -14.29898,-2.16416 -3.13754,-1.36728 -6.15569,-3.3229 -7.96301,-6.22931 -1.81425,-2.91754 -2.22807,-6.48813 -2.23266,-9.92375 -0.008,-6.07666 1.11824,-12.09004 2.17848,-18.07349 0.88097,-4.97177 1.71949,-9.95483 2.26013,-14.97502 0.98337,-9.13118 0.9763,-18.35278 0.3199,-27.51327 -0.10993,-1.53416 -0.23754,-3.0832 -0.008,-4.60412 0.22922,-1.52092 0.85475,-3.0367 2.02069,-4.03986 1.07696,-0.9266 2.52093,-1.33598 3.93947,-1.4145 1.41854,-0.0785 2.83404,0.14655 4.23982,0.35197 3.31254,0.48405 6.65159,0.8649 9.88917,1.71656 2.04284,0.53738 4.03315,1.25925 6.0722,1.81081 3.40258,0.92039 6.96639,1.36144 10.46739,0.95192 3.76917,-0.44089 7.42987,-1.85678 11.22363,-1.76474 1.55658,0.0378 3.1015,0.33171 4.58649,0.79985 1.51539,0.47772 3.00914,1.16182 4.12281,2.29512 0.84639,0.8613 1.43579,1.94539 1.87872,3.06879 0.65982,1.67352 1.01492,3.457 1.16703,5.24945 0.13475,1.58788 0.11343,3.19441 0.41433,4.75933 0.49503,2.57458 1.84746,4.92305 3.52848,6.93494 1.68102,2.01189 3.68982,3.72048 5.69641,5.40783 1.99908,1.68103 4.0106,3.35469 6.16708,4.82839 1.0121,0.69165 2.05642,1.33949 3.01736,2.10062 0.96094,0.76113 1.84466,1.6468 2.44543,2.71535 0.81492,1.44944 1.06377,3.2077 0.53758,4.87655 -0.5262,1.66885 -1.48162,3.27659 -2.67059,4.68806 z"
+         style="display:inline;fill:url(#linearGradient437);fill-opacity:1;stroke:none" />
+    </clipPath>
+    <linearGradient
+       y2="293.58548"
+       x2="490.12241"
+       y1="371.54401"
+       x1="442.03912"
+       gradientTransform="translate(-500.00032,-116.72437)"
+       gradientUnits="userSpaceOnUse"
+       id="linearGradient437"
+       xlink:href="#linearGradient14392-8"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath504"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         sodipodi:nodetypes="aaaaassssaaaasssscssssc"
+         inkscape:connector-curvature="0"
+         d="m 304.84727,225.44951 c 5.97679,4.89463 9.76903,12.28597 10.94319,20.00305 0.91574,6.01859 0.32054,12.19496 -1.0124,18.13223 -1.33294,5.93726 -3.39093,11.67615 -5.43351,17.40051 -0.81452,2.2827 -1.63269,4.5871 -1.95634,6.9933 -0.32365,2.40621 -0.1187,4.95426 1.02109,7.08777 1.3066,2.44578 3.74526,4.13021 6.36677,4.92292 2.58816,0.78263 5.38374,0.76618 8.00354,0.10153 2.61979,-0.66466 5.06582,-1.96341 7.19828,-3.64929 5.41763,-4.28306 8.68657,-10.94871 9.95201,-17.81211 1.26545,-6.86339 0.68401,-13.95038 -0.49258,-20.83014 -1.60443,-9.38136 -4.30394,-18.55105 -7.74003,-27.40773 -2.52746,-6.51466 -5.7653,-12.74244 -9.61753,-18.52016 -3.77934,-5.66839 -9.14163,-10.09303 -13.10336,-15.63502 -1.37643,-1.92547 -3.03189,-3.93159 -4.38419,-5.87845 -2.91575,-4.19771 -2.25544,-3.41451 -4.06424,-6.13155 -1.31235,-1.9713 -3.38449,-2.6487 -5.56491,-3.51096 -2.18041,-0.86226 -4.629,-1.11623 -6.88065,-0.47108 -2.96781,0.85034 -5.39233,3.23113 -6.68215,6.08208 -1.28982,2.85095 -1.51545,6.12313 -1.01363,9.2201 0.64739,3.99536 2.44215,7.70258 4.46569,11.18873 2.28537,3.93724 4.93283,7.72707 8.38442,10.65407 3.60205,3.05459 7.95771,5.06875 11.61053,8.0602"
+         id="path506"
+         style="display:inline;fill:#020204;fill-opacity:1;stroke:none;stroke-width:0.9910841"
+         inkscape:label="Clip - Right Arm Shadow" />
+    </clipPath>
+    <clipPath
+       id="clipPath508"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+         d="m 240.47307,195.03592 c -7.07309,8.03686 -14.35222,15.81627 -18.34577,24.50506 -1.97625,4.41329 -2.91077,9.20725 -4.26498,13.84932 -1.5379,5.27176 -3.62609,10.3703 -5.97071,15.33612 -2.16496,4.58531 -4.54982,9.06291 -6.93891,13.53553 -1.7382,3.25409 -3.50514,6.58104 -4.10782,10.22071 -0.47628,2.87632 -0.1985,5.84423 0.53375,8.66626 0.73225,2.82202 1.90965,5.5106 3.23776,8.10601 5.66725,11.07504 14.17003,20.62168 24.24176,27.92472 4.57063,3.31418 9.46669,6.18109 14.60245,8.52595 2.78247,1.27041 5.71355,2.40436 8.77186,2.45744 1.52915,0.0265 3.0741,-0.22544 4.47434,-0.84055 1.40023,-0.6151 2.65068,-1.60373 3.48254,-2.88709 1.02278,-1.5779 1.36992,-3.53829 1.16461,-5.40743 -0.2053,-1.86914 -0.93484,-3.65294 -1.91324,-5.25873 -2.38997,-3.92251 -6.1652,-6.76055 -9.79642,-9.57343 -7.84055,-6.07358 -15.42465,-12.48039 -22.68212,-19.23996 -2.04912,-1.90854 -4.09841,-3.87759 -5.53019,-6.28412 -1.3943,-2.34352 -2.1476,-5.01376 -2.65783,-7.69253 -1.39972,-7.34873 -1.04092,-15.08286 1.45958,-22.13343 0.97822,-2.75826 2.27118,-5.39201 3.51815,-8.03965 2.16133,-4.58906 4.20725,-9.26564 7.04933,-13.46723 3.53798,-5.23037 8.26749,-9.66049 11.15147,-15.27803 2.43423,-4.74149 3.41994,-10.07236 4.36185,-15.31831 0.73693,-4.10434 2.15042,-8.12437 2.86923,-12.23193 -1.40611,2.66567 -5.93796,7.04283 -8.71069,10.5253 z"
+         id="path510"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="cczzzzzzzzzzzzszzzzzcssscc"
+         inkscape:label="Clip - Left Arm Shadow" />
+    </clipPath>
+    <clipPath
+       id="clipPath533"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         sodipodi:nodetypes="aaaasssaccaa"
+         inkscape:connector-curvature="0"
+         d="m 386.1875,285.32775 c -0.40516,-1.10369 -1.11845,-2.08156 -1.9907,-2.86987 -0.87226,-0.78832 -1.90049,-1.39229 -2.98278,-1.85155 -2.16459,-0.91852 -4.52053,-1.26149 -6.83152,-1.69556 -2.17919,-0.40931 -4.34179,-0.90631 -6.52782,-1.27734 -2.27136,-0.38551 -4.6179,-0.63213 -6.8653,-0.1253 -1.96583,0.44333 -3.7845,1.45879 -5.27172,2.81864 -1.48723,1.35984 -2.64911,3.0564 -3.48499,4.89007 -1.47218,3.22952 -1.93451,6.86503 -1.65394,10.40316 0.20881,2.63325 0.87532,5.34594 2.60877,7.33912 1.40065,1.61052 3.38733,2.61526 5.43398,3.22092 3.52502,1.04316 7.36663,0.98822 10.86038,-0.1553 5.76689,-1.93113 10.87568,-5.77387 14.33034,-10.77903 1.13861,-1.64963 2.11217,-3.44809 2.5532,-5.4034 0.33597,-1.48955 0.34831,-3.08112 -0.1779,-4.51456"
+         id="path535"
+         style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+         inkscape:label="Clip - Hand Lower Hightlight" />
+    </clipPath>
+    <clipPath
+       id="clipPath538"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:label="Clip - Hand Upper Highlight"
+         style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+         id="path540"
+         d="m 386.1875,285.32775 c -0.40516,-1.10369 -1.11845,-2.08156 -1.9907,-2.86987 -0.87226,-0.78832 -1.90049,-1.39229 -2.98278,-1.85155 -2.16459,-0.91852 -4.52053,-1.26149 -6.83152,-1.69556 -2.17919,-0.40931 -4.34179,-0.90631 -6.52782,-1.27734 -2.27136,-0.38551 -4.6179,-0.63213 -6.8653,-0.1253 -1.96583,0.44333 -3.7845,1.45879 -5.27172,2.81864 -1.48723,1.35984 -2.64911,3.0564 -3.48499,4.89007 -1.47218,3.22952 -1.93451,6.86503 -1.65394,10.40316 0.20881,2.63325 0.87532,5.34594 2.60877,7.33912 1.40065,1.61052 3.38733,2.61526 5.43398,3.22092 3.52502,1.04316 7.36663,0.98822 10.86038,-0.1553 5.76689,-1.93113 10.87568,-5.77387 14.33034,-10.77903 1.13861,-1.64963 2.11217,-3.44809 2.5532,-5.4034 0.33597,-1.48955 0.34831,-3.08112 -0.1779,-4.51456"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="aaaasssaccaa" />
+    </clipPath>
+    <clipPath
+       id="clipPath622"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         sodipodi:nodetypes="aaaaaaaa"
+         inkscape:connector-curvature="0"
+         id="path624"
+         d="m 85.75,122.36218 c -2.78042,1.91023 -5.11057,4.57487 -6.25,7.75 -1.43603,4.00163 -0.88584,8.48071 0.5,12.5 1.41949,4.11688 3.79379,8.04098 7.37932,10.51234 1.79277,1.23567 3.86809,2.08301 6.0304,2.33859 2.16231,0.25558 4.40928,-0.0949 6.34028,-1.10093 2.35312,-1.22596 4.14782,-3.37278 5.26217,-5.78076 1.11436,-2.40798 1.5888,-5.0701 1.73783,-7.71924 0.18989,-3.37546 -0.14047,-6.80646 -1.25,-10 -1.20527,-3.46909 -3.39005,-6.67055 -6.47275,-8.6666 -1.54136,-0.99803 -3.29195,-1.68356 -5.11089,-1.93515 -1.81893,-0.25158 -3.70476,-0.0633 -5.41636,0.60175 -0.97547,0.37901 -1.88744,0.9074 -2.75,1.5"
+         style="display:inline;fill:url(#radialGradient626);fill-opacity:1;stroke:none"
+         inkscape:label="Clip - Right Eyelid" />
+    </clipPath>
+    <radialGradient
+       r="14.572236"
+       fy="137.66095"
+       fx="223.19559"
+       cy="137.66095"
+       cx="223.19559"
+       gradientTransform="matrix(1.0857794,-0.03431182,0.03943781,1.2479887,-15.5421,-75.904827)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient626"
+       xlink:href="#linearGradient28799-3"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath631"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         sodipodi:nodetypes="zzzzzzz"
+         inkscape:connector-curvature="0"
+         id="path633"
+         d="m 54.23244,122.36218 c -1.78096,0.097 -3.48461,0.91899 -4.78785,2.1367 -1.30323,1.21771 -2.22137,2.81176 -2.78618,4.50357 -1.12962,3.38363 -0.87548,7.05177 -0.6187,10.60973 0.23251,3.22162 0.47041,6.50533 1.67679,9.50158 0.60319,1.49813 1.45024,2.91021 2.58034,4.06395 1.13009,1.15374 2.55173,2.04189 4.11829,2.43447 1.46884,0.36809 3.03816,0.29183 4.48279,-0.16209 1.44462,-0.45392 2.76391,-1.27887 3.84623,-2.33791 1.57904,-1.54507 2.64326,-3.5662 3.25345,-5.68947 0.61019,-2.12328 0.78416,-4.35155 0.7524,-6.56053 -0.0397,-2.76435 -0.40091,-5.53851 -1.26575,-8.16439 -0.86485,-2.62588 -2.24575,-5.10327 -4.1728,-7.08561 -0.93331,-0.96009 -1.99776,-1.80513 -3.19858,-2.39747 -1.20082,-0.59233 -2.54344,-0.92535 -3.88043,-0.85253"
+         style="display:inline;fill:url(#radialGradient635);fill-opacity:1;stroke:none"
+         inkscape:label="Clip - Left Eyelid" />
+    </clipPath>
+    <radialGradient
+       r="14.572236"
+       fy="137.66095"
+       fx="223.19559"
+       cy="137.66095"
+       cx="223.19559"
+       gradientTransform="matrix(0.81524244,-0.03431182,0.02961133,1.2479887,9.5624,-75.904827)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient635"
+       xlink:href="#linearGradient28799-5-5"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath697"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:label="Clip - Beak Side Highlight"
+         inkscape:connector-curvature="0"
+         id="path699"
+         d="m 214.63605,148.03815 c -2.51029,-0.0409 -4.99135,0.28921 -7.27146,0.88384 -4.05254,1.05688 -7.57367,2.93934 -10.08468,5.39315 -1.62814,0.85539 -3.05003,1.89919 -4.20722,3.08639 -0.66186,0.67901 -1.24391,1.41694 -1.50131,2.24757 -0.20244,0.65333 -0.19857,1.34469 -0.28524,2.01986 -0.0324,0.25293 -0.0778,0.5073 -0.0362,0.76548 0.0208,0.12909 0.0631,0.25809 0.13756,0.38081 0.0221,0.0364 0.0528,0.0707 0.0806,0.1055 0.0825,0.15031 0.18297,0.29681 0.31473,0.43099 0.28806,0.29334 0.68023,0.53107 1.09417,0.73203 0.41394,0.20097 0.85255,0.36757 1.2815,0.54936 2.28006,0.96628 4.22773,2.32456 5.9925,3.75924 2.3677,1.92485 4.52941,4.06099 7.5099,5.46004 2.10465,0.98794 4.52773,1.552 6.92602,1.72396 2.81637,0.20193 5.58521,-0.12293 8.18167,-0.69344 2.40631,-0.52873 4.69673,-1.27132 6.75202,-2.25401 3.90702,-1.86802 6.98699,-4.60634 11.42445,-5.83442 0.96876,-0.2681 1.99041,-0.45921 2.91317,-0.78993 0.92276,-0.33072 1.76305,-0.8265 2.11948,-1.52711 0.34261,-0.67347 0.2049,-1.45031 0.23569,-2.18968 0.0329,-0.791 0.26357,-1.5559 0.33312,-2.34278 0.0695,-0.78687 -0.0382,-1.6289 -0.62199,-2.35178 -0.12955,-0.16043 -0.28324,-0.31001 -0.45163,-0.45157 -0.0509,-0.29235 -0.22134,-0.58029 -0.46622,-0.83239 -0.50487,-0.51975 -1.29334,-0.87172 -2.09515,-1.11671 -1.09824,-0.33555 -2.25599,-0.50211 -3.39891,-0.69601 -3.51093,-0.59565 -6.96955,-1.47539 -10.29467,-2.60394 -1.65302,-0.56104 -3.27073,-1.18327 -4.90416,-1.77928 -1.67927,-0.61273 -3.38672,-1.20103 -5.16515,-1.57729 -1.49109,-0.31546 -3.00643,-0.47332 -4.51259,-0.49788 z"
+         style="display:inline;fill:url(#radialGradient701);fill-opacity:1;stroke:none;stroke-width:0.77538049" />
+    </clipPath>
+    <radialGradient
+       r="31.111488"
+       fy="193.09949"
+       fx="294.48483"
+       cy="193.09949"
+       cx="294.48483"
+       gradientTransform="matrix(0.81494503,-0.25452614,0.31491054,0.43302392,-75.371375,150.73818)"
+       gradientUnits="userSpaceOnUse"
+       id="radialGradient701"
+       xlink:href="#linearGradient28469-0"
+       inkscape:collect="always" />
+    <clipPath
+       id="clipPath816"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         sodipodi:nodetypes="zssacaaaaaaaaaacsszzz"
+         inkscape:connector-curvature="0"
+         id="path818"
+         d="m -26.29567,154.81433 c -1.0464,1.31136 -1.72773,2.88726 -2.13927,4.51369 -0.41153,1.62642 -0.56228,3.30801 -0.62653,4.98446 -0.12849,3.35291 0.0765,6.77015 -0.81096,10.00604 -0.94874,3.4595 -3.07595,6.45649 -5.15761,9.37795 -3.60485,5.05916 -7.248548,10.25011 -9.027058,16.20217 -1.077103,3.60469 -1.435613,7.42255 -1.04841,11.16474 -4.035298,5.9262 -7.528852,12.22112 -10.4229,18.78069 -4.386197,9.94163 -7.396115,20.5265 -8.454552,31.34105 -1.296051,13.24236 0.397579,26.86184 5.627472,39.09655 3.781309,8.84592 9.417708,16.94379 16.68566,23.2466 3.695408,3.20468 7.799668,5.93944 12.189498,8.09709 15.21252,7.47713 34.01348,7.49101 48.97296,-0.48031 7.81838,-4.16611 14.41789,-10.2582 20.78084,-16.42232 3.83183,-3.71209 7.64353,-7.51249 10.56653,-11.97551 5.62746,-8.59236 7.58747,-19.03566 8.80544,-29.23436 2.12971,-17.83321 2.1984,-36.66998 -5.62137,-52.83816 -2.69219,-5.56638 -6.27896,-10.69891 -10.58065,-15.14052 -1.14547,-7.78087 -3.40638,-15.39666 -6.69212,-22.54215 -2.37045,-5.15502 -5.2683,-10.06187 -7.47079,-15.29085 -0.90422,-2.14672 -1.68995,-4.34486 -2.69346,-6.44699 -1.00352,-2.10213 -2.24145,-4.12498 -3.92446,-5.73541 -1.72343,-1.6491 -3.87096,-2.81824 -6.13593,-3.56631 -2.26498,-0.74806 -4.64917,-1.08697 -7.03147,-1.2068 -4.7646,-0.23966 -9.53872,0.38348 -14.30559,0.19423 -3.79476,-0.15066 -7.57776,-0.81566 -11.36892,-0.59186 -1.89557,0.1119 -3.79087,0.45058 -5.55026,1.1649 -1.7594,0.71432 -3.38173,1.81713 -4.56609,3.30139"
+         style="display:inline;fill:#fdfdfb;fill-opacity:1;stroke:none"
+         inkscape:label="Clip - Lower Beak Shadow" />
+    </clipPath>
+    <clipPath
+       id="clipPath820"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:label="Clip - Upper Beak Shadow"
+         style="display:inline;fill:#fdfdfb;fill-opacity:1;stroke:none;stroke-width:1.04194593"
+         d="m -47.29567,161.44377 c -1.0464,1.42368 -1.72773,3.13456 -2.13927,4.9003 -0.41153,1.76572 -0.56228,3.59134 -0.62653,5.41138 -0.12849,3.64009 0.0765,7.35002 -0.81096,10.86307 -0.94874,3.75581 -3.07595,7.0095 -5.15761,10.18119 -3.60485,5.49248 -7.248548,11.12804 -9.027058,17.58991 -1.077103,3.91343 -1.435613,8.0583 -1.04841,12.12101 -4.035298,6.43379 -7.528852,13.26788 -10.4229,20.38928 -4.386197,10.79315 -7.396115,22.28463 -8.454552,34.02546 -1.296051,14.37658 0.397579,29.16259 5.627472,42.44522 3.781309,9.60359 9.417708,18.39505 16.68566,25.23771 3.695408,3.47916 7.799668,6.44816 12.189498,8.79061 15.21252,8.11756 34.01348,8.13263 48.97296,-0.52145 7.81838,-4.52294 14.41789,-11.13683 20.78084,-17.82891 3.83183,-4.03004 7.64353,-8.15595 10.56653,-13.00123 5.62746,-9.32831 7.58747,-20.66609 8.80544,-31.73832 2.12971,-19.36065 2.1984,-39.81082 -5.62137,-57.36383 -2.69219,-6.04314 -6.27896,-11.61528 -10.58065,-16.43732 -1.14547,-8.44732 -3.40638,-16.71541 -6.69212,-24.47292 -2.37045,-5.59655 -5.2683,-10.92368 -7.47079,-16.60053 -0.90422,-2.33059 -1.68995,-4.71701 -2.69346,-6.99919 -1.00352,-2.28218 -2.24145,-4.47829 -3.92446,-6.22665 -1.72343,-1.79035 -3.87096,-3.05963 -6.13593,-3.87177 -2.26498,-0.81213 -4.64917,-1.18007 -7.03147,-1.31016 -4.7646,-0.26019 -9.53872,0.41632 -14.30559,0.21086 -3.79476,-0.16356 -7.57776,-0.88552 -11.36892,-0.64255 -1.89557,0.12148 -3.79087,0.48917 -5.55026,1.26467 -1.7594,0.77551 -3.38173,1.97277 -4.56609,3.58416"
+         id="path822"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="zssacaaaaaaaaaacsszzz" />
+    </clipPath>
+  </defs>
+  <metadata
+     id="metadata27455">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title>Tux</dc:title>
+        <dc:date>20 June 2012</dc:date>
+        <dc:creator>
+          <cc:Agent>
+            <dc:title>Garrett LeSage</dc:title>
+          </cc:Agent>
+        </dc:creator>
+        <cc:license
+           rdf:resource="http://creativecommons.org/publicdomain/zero/1.0/" />
+        <dc:contributor>
+          <cc:Agent>
+            <dc:title>Larry Ewing, the creator of the original Tux graphic</dc:title>
+          </cc:Agent>
+        </dc:contributor>
+        <dc:subject>
+          <rdf:Bag>
+            <rdf:li>tux</rdf:li>
+            <rdf:li>Linux</rdf:li>
+            <rdf:li>penguin</rdf:li>
+            <rdf:li>logo</rdf:li>
+          </rdf:Bag>
+        </dc:subject>
+        <dc:rights>
+          <cc:Agent>
+            <dc:title>Larry Ewing, Garrett LeSage</dc:title>
+          </cc:Agent>
+        </dc:rights>
+        <dc:source>https://github.com/garrett/Tux</dc:source>
+      </cc:Work>
+      <cc:License
+         rdf:about="http://creativecommons.org/publicdomain/zero/1.0/">
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#Reproduction" />
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#Distribution" />
+        <cc:permits
+           rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
+      </cc:License>
+    </rdf:RDF>
+  </metadata>
+  <g
+     transform="translate(-16.987948,-19.575154)"
+     style="display:inline"
+     inkscape:label="Tux"
+     id="layer2"
+     inkscape:groupmode="layer">
+    <g
+       inkscape:label="Tux"
+       id="g1212">
+      <g
+         id="g463"
+         inkscape:label="Body">
+        <path
+           inkscape:label="Body Without Tummy"
+           sodipodi:nodetypes="csssccssssccsscccsccscccccscsscscsssc"
+           inkscape:connector-curvature="0"
+           id="path28712-2"
+           d="m 140.8125,19.578125 c -7.16795,-0.07795 -14.42402,1.374646 -20.73438,4.775391 -6.70663,3.614308 -12.20088,9.395485 -15.58593,16.220703 -3.38347,6.822028 -4.712926,14.108148 -4.914065,22.132808 -0.382163,15.24684 0.34393,31.23872 1.494145,45.730473 0.3054,4.41258 0.85369,6.99499 0.29297,11.52344 -1.88652,9.62986 -10.313201,16.11178 -14.80468,24.57031 -4.954704,9.33089 -7.043403,19.88101 -10.783203,29.76172 -3.422488,9.04236 -8.227578,17.52067 -11.470703,26.62891 -4.534864,12.73604 -5.890504,26.73088 -2.894532,39.91406 2.283855,10.04965 7.054597,19.47291 13.484375,27.5332 -0.930503,1.67688 -1.832233,3.3716 -2.792968,5.03125 -2.979452,5.14693 -6.619557,10.02667 -8.316407,15.72656 -0.848425,2.84995 -1.182417,5.88981 -0.634765,8.8125 0.547652,2.92268 2.02651,5.71858 4.351562,7.57227 1.522028,1.21346 3.357446,1.99485 5.253906,2.43359 1.896461,0.43879 3.856625,0.54531 5.802735,0.50391 7.394587,-0.15718 14.559024,-2.40522 21.71289,-4.2832 4.23946,-1.11291 8.51036,-2.10105 12.80273,-2.98829 15.24055,-3.12209 32.25031,-1.87591 46.39844,0.17579 4.79197,0.72368 9.54981,1.67102 14.25977,2.8125 7.37714,1.78788 14.72878,4.06701 22.3164,4.2832 1.99729,0.0569 4.0106,-0.0306 5.96094,-0.46484 1.95034,-0.43429 3.84211,-1.22688 5.4043,-2.47266 2.32922,-1.85746 3.80834,-4.65745 4.35546,-7.58594 0.54713,-2.9285 0.20917,-5.97702 -0.64843,-8.83008 -1.71521,-5.70613 -5.38873,-10.5749 -8.43555,-15.69531 -1.20215,-2.0203 -2.32023,-4.0926 -3.51367,-6.11719 9.16873,-10.29563 16.54824,-22.20278 20.8164,-35.28125 4.65874,-14.27524 5.51426,-29.64566 3.55274,-44.5332 -1.96148,-14.88754 -6.6821,-29.32114 -12.8984,-42.99023 -7.79769,-17.13839 -14.35278,-23.331 -19.10351,-38.38086 -5.13471,-16.266273 -0.8948,-35.514213 -4.71094,-50.267583 -1.3618,-5.0173 -3.53277,-9.80681 -6.32617,-14.191405 -3.27306,-5.137474 -7.42457,-9.742407 -12.35743,-13.316406 -7.87066,-5.702527 -17.61519,-8.638455 -27.33398,-8.744141 z"
+           style="display:inline;fill:#020204;fill-opacity:1;stroke:none" />
+        <path
+           inkscape:label="Tummy"
+           style="display:inline;fill:#fdfdfb;fill-opacity:1;stroke:none"
+           d="m 112.70417,105.45215 c -1.0464,1.31136 -1.72773,2.88726 -2.13927,4.51369 -0.41153,1.62642 -0.56228,3.30801 -0.62653,4.98446 -0.12849,3.35291 0.0765,6.77015 -0.81096,10.00604 -0.94874,3.4595 -3.07595,6.45649 -5.15761,9.37795 -3.60485,5.05916 -7.248548,10.25011 -9.027058,16.20217 -1.077103,3.60469 -1.435613,7.42255 -1.04841,11.16474 -4.035298,5.9262 -7.528852,12.22112 -10.4229,18.78069 -4.386197,9.94163 -7.396115,20.5265 -8.454552,31.34105 -1.296051,13.24236 0.397579,26.86184 5.627472,39.09655 3.781309,8.84592 9.417708,16.94379 16.68566,23.2466 3.695408,3.20468 7.799668,5.93944 12.189498,8.09709 15.21252,7.47713 34.01348,7.49101 48.97296,-0.48031 7.81838,-4.16611 14.41789,-10.2582 20.78084,-16.42232 3.83183,-3.71209 7.64353,-7.51249 10.56653,-11.97551 5.62746,-8.59236 7.58747,-19.03566 8.80544,-29.23436 2.12971,-17.83321 2.1984,-36.66998 -5.62137,-52.83816 -2.69219,-5.56638 -6.27896,-10.69891 -10.58065,-15.14052 -1.14547,-7.78087 -3.40638,-15.39666 -6.69212,-22.54215 -2.37045,-5.15502 -5.2683,-10.06187 -7.47079,-15.29085 -0.90422,-2.14672 -1.68995,-4.34486 -2.69346,-6.44699 -1.00352,-2.10213 -2.24145,-4.12498 -3.92446,-5.73541 -1.72343,-1.6491 -3.87096,-2.81824 -6.13593,-3.56631 -2.26498,-0.74806 -4.64917,-1.08697 -7.03147,-1.2068 -4.7646,-0.23966 -9.53872,0.38348 -14.30559,0.19423 -3.79476,-0.15066 -7.57776,-0.81566 -11.36892,-0.59186 -1.89557,0.1119 -3.79087,0.45058 -5.55026,1.1649 -1.7594,0.71432 -3.38173,1.81713 -4.56609,3.30139"
+           id="path29719-5"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="zssacaaaaaaaaaacsszzz" />
+      </g>
+      <g
+         style="display:inline"
+         id="g562"
+         inkscape:label="Body Shadows">
+        <path
+           inkscape:label="Left Pec Shadow"
+           style="display:inline;opacity:0.25;fill:url(#radialGradient18834);fill-opacity:1;stroke:none;filter:url(#filter4487-6)"
+           d="m -61.00266,211.59308 c 0.88005,1.52387 -0.54737,6.77829 19.96381,3.4153 0,0 -3.60202,0.4573 -7.15281,1.40419 -5.52127,2.1334 -10.33021,4.51706 -14.04019,7.67524 -3.67553,3.12167 -6.36707,7.19694 -9.73973,10.69705 0,0 5.46173,-11.5187 6.82331,-14.98742 1.36157,-3.46872 -0.22795,-3.30999 0.84893,-8.4136 1.07688,-5.1036 3.71346,-10.00699 3.71346,-10.00699 0,0 -2.15241,7.21088 -0.41678,10.21623 z"
+           id="path4400-1"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="scccczzcs"
+           transform="matrix(1.1543044,0,0,1,166.33231,-58.362183)" />
+        <path
+           inkscape:label="Right Pec Shadow"
+           style="display:inline;opacity:0.42000002;fill:url(#radialGradient18836);fill-opacity:1;stroke:none;filter:url(#filter4427-5-7)"
+           d="m 172.04993,151.8559 c -4.82509,3.36138 -7.65241,2.96341 -13.50685,3.62087 -5.85444,0.65746 -21.69838,0.41943 -21.69838,0.41943 0,0 2.29371,-0.0427 7.37759,0.90419 5.08388,0.94693 15.45307,1.85232 21.29176,4.07468 5.83869,2.22236 7.96846,2.8566 11.51723,5.10056 5.05107,3.19388 8.75817,8.19694 13.587,11.69705 0,0 0.23377,-4.6437 -1.71568,-8.11242 -1.94945,-3.46872 -7.19037,-8.93499 -8.7322,-14.0386 -1.54183,-5.1036 -2.27429,-15.13199 -2.27429,-15.13199 0,0 -1.02108,8.10485 -5.84618,11.46623 z"
+           id="path4400-2-8"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="zzczzaczzcz" />
+        <path
+           inkscape:label="Middle Pec Shadow"
+           style="display:inline;opacity:0.2;fill:url(#radialGradient18838);fill-opacity:1;stroke:none;filter:url(#filter4538-7)"
+           d="m 126.66974,144.67794 c -0.17937,1.45594 -0.41189,2.90533 -0.69695,4.34431 -0.14052,0.70936 -0.2949,1.41989 -0.55905,2.09306 -0.26414,0.67317 -0.64419,1.31214 -1.18125,1.79639 -0.47071,0.42443 -1.04439,0.71595 -1.62069,0.97975 -2.24827,1.02916 -4.6544,1.71261 -7.10798,2.01899 0.97993,0.0719 1.95856,0.16127 2.93528,0.2682 0.61534,0.0674 1.23207,0.14208 1.83169,0.29586 0.59961,0.15377 1.18472,0.38955 1.68422,0.75518 0.54781,0.40099 0.97799,0.94833 1.29931,1.54636 0.64023,1.19159 0.85435,2.56281 0.97272,3.91031 0.15139,1.72336 0.16244,3.45904 0.033,5.18419 0.11585,-1.15429 0.35775,-2.29589 0.72,-3.39797 0.65284,-1.98614 1.70416,-3.84789 3.11974,-5.38642 0.56171,-0.6105 1.18038,-1.17036 1.85876,-1.6479 2.07821,-1.46294 4.71804,-2.1055 7.23612,-1.76133 -2.55897,0.11302 -5.14896,-0.69089 -7.19419,-2.23302 -1.04161,-0.78539 -1.94875,-1.76287 -2.57976,-2.90463 -0.97579,-1.76561 -1.25012,-3.90675 -0.75097,-5.86133"
+           id="path4491-9"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="cssacaaaacaaacssc" />
+        <path
+           inkscape:label="Tummy Shadow"
+           style="color:#000000;display:inline;overflow:visible;visibility:visible;opacity:0.11000001;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2;marker:none;filter:url(#filter4592-2);enable-background:accumulate"
+           d="m 120.49984,178.71875 c -1.22954,4.67934 -2.07519,9.45949 -2.52566,14.27665 -0.63702,6.81216 -0.48368,13.6725 -0.84934,20.5046 -0.31029,5.79753 -0.99107,11.65587 0.0159,17.3737 0.48017,2.72655 1.34273,5.38547 2.55456,7.87467 0.19249,-0.95006 0.33356,-1.91054 0.42239,-2.87583 0.42661,-4.63604 -0.3541,-9.28689 -0.61781,-13.93504 -0.46225,-8.14744 0.66569,-16.2899 1.125,-24.4375 0.3526,-6.25476 0.31082,-12.53173 -0.125,-18.78125 h -4e-5"
+           id="path4542-7"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="cssscasacc" />
+        <path
+           inkscape:label="Left Pec Upper Shadow"
+           transform="matrix(-0.06991927,0.95700905,-0.56450744,-0.11853409,236.56758,-180.37928)"
+           style="display:inline;opacity:0.25;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter15211-9)"
+           d="m 351.9604,200.85653 c -1.45162,0.38883 -1.23008,3.99417 -0.29604,5.49789 0.78886,1.26999 3.07235,2.27109 3.75853,1.00504 1.11412,-2.05562 -1.47192,-7.03613 -3.46249,-6.50293 z"
+           id="path28767-9-3"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="aaaa" />
+        <path
+           inkscape:label="Right Pec Side Shadow"
+           sodipodi:nodetypes="zzzzsz"
+           inkscape:connector-curvature="0"
+           id="path15189-4"
+           d="m 361.75,209.34296 c 0.002,-1.53313 -7.56474,-10.0564 -9.7896,-8.48643 -2.22486,1.56998 -0.49172,3.7842 -0.29604,5.49789 0.19568,1.71368 -0.94537,6.60933 0.23849,7.25934 1.18386,0.65001 3.36607,-2.5198 5.30111,-4.27697 1.55818,-1.41494 4.54398,1.53929 4.54604,0.006 z"
+           style="display:inline;opacity:0.75;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter14706-3)"
+           transform="matrix(-0.09596121,-0.95700905,-0.77476232,0.11853409,398.90188,493.24449)" />
+        <path
+           inkscape:label="Head Shadow"
+           transform="matrix(1.1522137,0,0,1.1522137,-163.02721,-72.199565)"
+           style="display:inline;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter15133-1)"
+           d="m 277.9604,90.856536 c -2.22486,1.569973 -1.25289,3.530477 -0.29604,5.497884 0.95685,1.967407 -2.10429,7.63969 -2.13651,7.88434 -0.0322,0.24465 6.02534,-2.8754 7.67611,-4.901967 1.94956,-2.393373 6.87703,3.237917 6.60851,2.381167 0.002,-1.53312 -9.62721,-12.431397 -11.85207,-10.861424 z"
+           id="path28767-3"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="zzzscz" />
+        <path
+           transform="translate(160,-57.362183)"
+           sodipodi:nodetypes="asassa"
+           inkscape:connector-curvature="0"
+           id="path4596-3"
+           d="m 16.687345,165.86218 c -2.16217,1.96937 1.01359,4.92767 2.51966,8.40429 0.93626,2.16126 3.52677,5.20509 6.03244,4.7175 1.8848,-0.36677 3.05427,-3.07936 2.87588,-4.99121 -0.34416,-3.68852 -3.45669,-4.55256 -5.7172,-5.81949 -1.79139,-1.00401 -4.19258,-3.69391 -5.71078,-2.31109 z"
+           style="display:inline;fill:#838384;fill-opacity:1;stroke:none;filter:url(#filter15185-2)"
+           inkscape:label="Neck Shadow" />
+        <path
+           sodipodi:nodetypes="aaaasssaaaaaaaaaaa"
+           inkscape:connector-curvature="0"
+           d="m -28.632498,172.60136 c 1.702936,4.93775 5.13035,9.15199 9.185848,12.44354 1.348656,1.0946 2.782167,2.10442 4.366233,2.817 1.584067,0.71257 3.331648,1.11945 5.062377,0.97245 1.6949733,-0.14396 3.3074706,-0.80936 4.788721,-1.64575 1.4812505,-0.8364 2.8560173,-1.84688 4.29298914,-2.75725 2.46262056,-1.56015 5.09983966,-2.82139 7.65715996,-4.22092 3.0824622,-1.68692 6.0695999,-3.59014 8.6646899,-5.95927 1.187948,-1.08451 2.295748,-2.26795 3.607519,-3.19888 1.31177,-0.93094 2.882987,-1.60572 4.487811,-1.49651 1.203853,0.0819 2.332908,0.59386 3.51249,0.84794 0.589792,0.12704 1.200784,0.18932 1.797215,0.0984 0.596431,-0.0909 1.179727,-0.34439 1.597895,-0.77928 0.512367,-0.53286 0.736406,-1.29981 0.709607,-2.03855 -0.0268,-0.73874 -0.284448,-1.45303 -0.628853,-2.10712 -0.68881,-1.30819 -1.734547,-2.43513 -2.200224,-3.83833 -0.414395,-1.24867 -0.330451,-2.59887 -0.293929,-3.91401 0.03652,-1.31513 0.0075,-2.68902 -0.598601,-3.85671 -0.461591,-0.88922 -1.236126,-1.59525 -2.12164,-2.06391 -0.885513,-0.46867 -1.878578,-0.71001 -2.876081,-0.80365 -1.995007,-0.18727 -3.993929,0.19997 -5.994489,0.31349 -2.655817,0.1507 -5.321957,-0.18176 -7.9772499,-0.0221 -3.3112912,0.1991 -6.5570138,1.16053 -9.87428,1.16645 -3.7859765,0.007 -7.5681223,-1.23192 -11.3075401,-0.63996 -1.60458,0.25401 -3.134778,0.8376 -4.675685,1.35219 -1.540906,0.5146 -3.132742,0.96724 -4.757133,0.94371 -1.844198,-0.0267 -3.629272,-0.66537 -5.468985,-0.79666 -0.919856,-0.0656 -1.86247,-1.8e-4 -2.726086,0.32326 -0.863615,0.32344 -1.644513,0.92357 -2.068349,1.7426 -0.242869,0.46932 -0.363194,0.99683 -0.385859,1.52479 -0.02266,0.52795 0.05026,1.05702 0.177828,1.56983 0.255132,1.02563 0.724233,1.98285 1.109821,2.96688 1.392508,3.55373 1.692361,7.44806 2.93678,11.05632"
+           id="path28461-8-7"
+           style="display:inline;fill:#000000;fill-opacity:0.25882353;stroke:none;filter:url(#filter30479-2)"
+           transform="translate(138.99984,-49.362181)"
+           inkscape:label="Lower Beak Shadow"
+           clip-path="url(#clipPath816)" />
+        <path
+           transform="matrix(1,0,0,0.92110599,159.99984,-43.254675)"
+           sodipodi:nodetypes="caaaaaaaasssaaaaaac"
+           inkscape:connector-curvature="0"
+           d="m -54.3809,165.4735 c 3.308481,2.21892 6.276719,4.94413 8.76949,8.0515 2.313244,2.88358 4.281072,6.1543 7.29931,8.28886 2.132561,1.50819 4.694875,2.3578 7.29406,2.61606 3.051509,0.3032 6.139761,-0.18685 9.08171,-1.05205 2.72664,-0.80188 5.363225,-1.92931 7.78216,-3.4214 4.5982326,-2.83636 8.4392136,-6.99279 13.51002,-8.85709 1.1070251,-0.407 2.2592345,-0.69817 3.3265087,-1.20024 1.0672741,-0.50208 2.071356,-1.25404 2.5810913,-2.31768 0.489979,-1.02241 0.4709637,-2.20249 0.63053,-3.32496 0.1707072,-1.20085 0.5537633,-2.36184 0.7638732,-3.55642 0.2101099,-1.19458 0.2351735,-2.47234 -0.2814032,-3.56975 -0.4277722,-0.90876 -1.2053869,-1.6278 -2.0998754,-2.08466 -0.8944886,-0.45686 -1.9010816,-0.6644 -2.9042801,-0.71362 -2.00639693,-0.0985 -3.9875479,0.41519 -5.9880545,0.59766 -2.649555,0.24167 -5.3179008,-0.0991 -7.97725,-0.019 -3.308278,0.0996 -6.568191,0.84884 -9.87428,1.00503 -3.771652,0.17818 -7.534056,-0.41751 -11.30754,-0.55139 -1.632251,-0.0579 -3.2754,-0.0286 -4.884302,0.25254 -1.608902,0.28112 -3.188197,0.82168 -4.548518,1.72563 -1.319979,0.87714 -2.396737,2.06728 -3.606567,3.09101 -0.604916,0.51187 -1.247757,0.98508 -1.953748,1.34495 -0.705991,0.35987 -1.478799,0.60451 -2.270305,0.64257 -0.40728,0.0196 -0.818345,-0.0152 -1.2213,0.0472 -0.676172,0.10463 -1.303709,0.49355 -1.698284,1.05254 -0.394576,0.55899 -0.550896,1.28053 -0.423046,1.9527 v 1e-5"
+           id="path28461-84-3"
+           style="display:inline;opacity:0.3;fill:#000000;fill-opacity:1;stroke:none;filter:url(#filter30475-4)"
+           inkscape:label="Upper Beak Shadow"
+           clip-path="url(#clipPath820)" />
+      </g>
+      <g
+         id="g481"
+         inkscape:label="Arms"
+         style="display:inline">
+        <path
+           inkscape:label="Right Arm"
+           style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+           id="path29705-5-0"
+           d="m 45.10134,224.44951 c 6.084796,4.89463 9.945575,12.28597 11.14096,20.00305 0.932288,6.01859 0.326343,12.19496 -1.030694,18.13223 -1.357038,5.93726 -3.452212,11.67615 -5.531716,17.40051 -0.829244,2.2827 -1.662186,4.5871 -1.991686,6.9933 -0.3295,2.40621 -0.120849,4.95426 1.039536,7.08777 1.330223,2.44578 3.812954,4.13021 6.48184,4.92292 2.634941,0.78263 5.481042,0.76618 8.148186,0.10153 2.667145,-0.66466 7.157372,-1.52591 9.328374,-3.21179 5.515551,-4.28306 6.82474,-11.71935 8.13188,-18.24961 1.363195,-6.8103 0.69637,-13.95038 -0.50149,-20.83014 -1.63342,-9.38136 -4.38172,-18.55105 -7.87991,-27.40773 -2.573144,-6.51466 -5.8695,-12.74244 -9.79135,-18.52016 -3.847635,-5.66839 -9.306853,-10.09303 -13.34018,-15.63502 -1.401311,-1.92547 -3.086675,-3.93159 -4.463417,-5.87845 -2.968456,-4.19771 -2.296208,-3.41451 -4.137707,-6.13155 -1.336068,-1.9713 -3.445653,-2.6487 -5.665474,-3.51096 -2.219821,-0.86226 -4.71266,-1.11623 -7.005012,-0.47108 -3.021449,0.85034 -5.489775,3.23113 -6.802912,6.08208 -1.313136,2.85095 -1.54284,6.12313 -1.031948,9.2201 0.659095,3.99536 2.486278,7.70258 4.54639,11.18873 2.326679,3.93724 5.021993,7.72707 8.53596,10.65407 3.667149,3.05459 8.101532,5.06875 11.82037,8.0602"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="aaaaassssaaaasssscssssc"
+           transform="translate(160,-57.362183)" />
+        <path
+           inkscape:label="Left Arm"
+           sodipodi:nodetypes="cczzzzzzzzzzzzszzzzzcssscc"
+           inkscape:connector-curvature="0"
+           id="path14967-1-0"
+           d="m -69.527091,194.03592 c -7.073089,8.03686 -14.352222,15.81627 -18.345769,24.50506 -1.976249,4.41329 -2.910774,9.20725 -4.26498,13.84932 -1.537901,5.27176 -3.626089,10.3703 -5.97071,15.33612 -2.16496,4.58531 -4.54982,9.06291 -6.93891,13.53553 -1.7382,3.25409 -3.50514,6.58104 -4.10782,10.22071 -0.47628,2.87632 -0.1985,5.84423 0.53375,8.66626 0.73225,2.82202 1.90965,5.5106 3.23776,8.10601 5.667249,11.07504 14.170032,20.62168 24.24176,27.92472 4.570626,3.31418 9.466691,6.18109 14.60245,8.52595 2.782468,1.27041 5.713552,2.40436 8.771859,2.45744 1.529154,0.0265 3.074104,-0.22544 4.47434,-0.84055 1.400236,-0.6151 2.650677,-1.60373 3.482541,-2.88709 1.022778,-1.5779 1.369917,-3.53829 1.164614,-5.40743 -0.205303,-1.86914 -0.934843,-3.65294 -1.913244,-5.25873 -2.389971,-3.92251 -6.165196,-6.76055 -9.79642,-9.57343 -7.840549,-6.07358 -15.424654,-12.48039 -22.68212,-19.23996 -2.049117,-1.90854 -4.098407,-3.87759 -5.53019,-6.28412 -1.394295,-2.34352 -2.147602,-5.01376 -2.65783,-7.69253 -1.399719,-7.34873 -1.040921,-15.08286 1.45958,-22.13343 0.978222,-2.75826 2.271183,-5.39201 3.51815,-8.03965 2.161326,-4.58906 4.207248,-9.26564 7.04933,-13.46723 3.537978,-5.23037 8.267489,-9.66049 11.15147,-15.27803 2.434229,-4.74149 3.419942,-10.07236 4.36185,-15.31831 0.736933,-4.10434 2.150416,-8.12437 2.869234,-12.23193 -1.406111,2.66567 -5.937961,7.04283 -8.710695,10.5253 z"
+           style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+           transform="translate(160,-57.362183)" />
+      </g>
+      <g
+         style="display:inline"
+         id="g514"
+         inkscape:label="Arm Shadows">
+        <path
+           inkscape:label="Right Arm Shadow"
+           sodipodi:nodetypes="cssssssccsssscc"
+           transform="matrix(1.0180731,0,0,1,-105.25547,-58.362183)"
+           inkscape:connector-curvature="0"
+           clip-path="url(#clipPath504)"
+           id="path29705-9"
+           d="m 290.78125,216.01843 c 0.48482,0.46774 0.98091,0.94261 1.5,1.375 3.66715,3.05459 5.61879,6.48526 9.33763,9.47671 6.0848,4.89463 12.25895,13.34358 13.45434,21.06066 0.93229,6.01859 -0.30093,9.28947 -1.80468,16.3878 -1.50374,7.09832 -5.76944,17.14832 -8.07376,23.99211 -0.9189,2.7291 1.86121,1.60306 1.49609,4.4798 -0.17944,1.41384 -0.19766,2.84238 -0.0346,4.25917 0.0227,-0.27104 0.0388,-0.5525 0.0693,-0.82194 0.44281,-3.92274 1.62331,-7.69479 2.90878,-11.39514 2.47416,-7.12214 5.31434,-14.10109 7.27196,-21.40792 1.95763,-7.30683 1.74028,-12.56443 0.71875,-18.84375 -1.28459,-7.89637 -5.79703,-15.18702 -12.1875,-20 -4.51852,-3.40313 -9.84688,-5.58465 -14.65625,-8.5625 z"
+           style="display:inline;fill:#838384;fill-opacity:1;stroke:none;filter:url(#filter14666-9)" />
+        <path
+           inkscape:label="Left Arm Shadow"
+           transform="translate(-150.00016,-58.362183)"
+           clip-path="url(#clipPath508)"
+           sodipodi:nodetypes="csczzczzsccczc"
+           inkscape:connector-curvature="0"
+           id="path15007-0"
+           d="m 232.33049,224.26954 c -2.32126,2.13749 -4.33307,4.61051 -5.95338,7.31823 -2.66801,4.45854 -4.23905,9.46835 -6.17809,14.28882 -1.44362,3.58886 -3.12519,7.19314 -3.32662,11.05622 -0.10346,1.98418 0.19056,3.96588 0.25671,5.95165 0.0662,1.98578 -0.11756,4.05108 -1.08967,5.78391 -0.81338,1.44988 -2.1659,2.58902 -3.73298,3.14402 2.11547,0.70686 4.00453,2.07546 5.33532,3.86539 1.11451,1.49902 1.82759,3.25366 2.79609,4.85091 0.78716,1.29818 1.75335,2.50124 2.94285,3.44463 1.1895,0.94339 2.61141,1.61974 4.11726,1.81293 2.06623,0.26508 4.23574,-0.42815 5.76541,-1.84225 -1.92538,-18.0357 -0.16195,-36.4572 5.15013,-53.80008 0.33544,-1.09515 0.68725,-2.19828 0.77034,-3.34063 0.0831,-1.14235 -0.12896,-2.34792 -0.82414,-3.2582 -0.37014,-0.48467 -0.86838,-0.87059 -1.4302,-1.1078 -0.56182,-0.2372 -1.18588,-0.32512 -1.79136,-0.25236 -0.60549,0.0727 -1.19096,0.306 -1.68059,0.66954 -0.48964,0.36355 -0.88227,0.85651 -1.12706,1.41507 h -2e-5"
+           style="display:inline;opacity:0.95;fill:#7c7c7c;fill-opacity:1;stroke:none;filter:url(#filter15053-7)" />
+      </g>
+      <g
+         style="display:inline"
+         id="g457"
+         inkscape:label="Feet">
+        <g
+           inkscape:label="Right Foot"
+           id="g444">
+          <path
+             sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaaacscaa"
+             inkscape:connector-curvature="0"
+             id="path14483-7-4"
+             d="m 86.05618,328.13191 c -0.45671,1.54919 -1.15216,3.04585 -2.04962,4.41089 -1.9805,3.01237 -4.85449,5.2794 -7.7268,7.37015 -4.89889,3.56589 -10.00272,6.83785 -14.56318,10.89029 -3.05551,2.71513 -5.84112,5.75937 -8.42278,8.96491 -2.20789,2.74146 -4.2839,5.61929 -6.80089,8.05729 -2.53964,2.45993 -5.53049,4.44676 -8.75187,5.52933 -3.91796,1.31667 -8.05795,1.2533 -11.83233,0.25938 -2.64475,-0.69647 -5.22365,-1.90703 -6.86216,-4.0969 -1.6448,-2.19828 -2.1777,-5.15218 -2.36802,-8.05186 -0.33651,-5.12875 0.25967,-10.36956 0.8034,-15.57549 0.45167,-4.3257 0.86825,-8.65475 1.03855,-12.97167 0.30984,-7.85202 -0.19668,-15.63586 -1.23186,-23.27336 -0.17336,-1.27909 -0.36202,-2.56818 -0.25656,-3.88562 0.10519,-1.31741 0.5354,-2.6883 1.43615,-3.70529 0.83202,-0.93937 1.9928,-1.49566 3.15071,-1.76892 1.15792,-0.27325 2.32983,-0.2898 3.4927,-0.3215 2.74018,-0.0747 5.49647,-0.24039 8.19521,0.006 1.70282,0.15559 3.37264,0.47459 5.07313,0.6427 2.83764,0.28052 5.78134,0.13286 8.62739,-0.72369 3.06405,-0.92215 5.98631,-2.65158 9.09944,-3.12742 1.27732,-0.19521 2.559,-0.17251 3.80104,0.006 1.26746,0.18218 2.5284,0.54175 3.50235,1.33598 0.74019,0.60362 1.28194,1.43281 1.70583,2.31655 0.63144,1.31651 1.01921,2.77031 1.24115,4.2613 0.19663,1.32082 0.26639,2.68017 0.59789,3.95737 0.54536,2.10123 1.78089,3.88647 3.26736,5.33963 1.48646,1.45316 3.22499,2.60245 4.96058,3.73413 1.72907,1.12743 3.46794,2.24684 5.31472,3.17629 0.86675,0.43621 1.75751,0.83074 2.58612,1.33307 0.8286,0.50234 1.60071,1.12107 2.15093,1.93549 0.74634,1.10471 1.04569,2.55273 0.82168,3.97474 l 2e-5,-1e-5"
+             style="display:inline;opacity:0.2;fill:url(#linearGradient18840);fill-opacity:1;stroke:none;filter:url(#filter15115-3)"
+             inkscape:transform-center-x="-21.512644"
+             inkscape:transform-center-y="-26.916075"
+             transform="matrix(1.0268828,0,0,1,157.6864,-58.362183)"
+             inkscape:label="Right Foot Shadow" />
+          <path
+             style="display:inline;fill:url(#linearGradient18842);fill-opacity:1;stroke:none"
+             d="m 263.18967,278.25167 c -2.6238,3.11482 -6.268,5.17039 -9.89648,7.01985 -6.1886,3.15437 -12.60169,5.92177 -18.41964,9.71654 -3.89802,2.54249 -7.4959,5.52671 -10.86016,8.74238 -2.87719,2.75012 -5.60582,5.68745 -8.83247,8.01771 -3.25567,2.35122 -7.01915,4.05426 -10.99061,4.6502 -4.83026,0.72481 -9.82134,-0.21289 -14.29898,-2.16416 -3.13754,-1.36728 -6.15569,-3.3229 -7.96301,-6.22931 -1.81425,-2.91754 -2.22807,-6.48813 -2.23266,-9.92375 -0.008,-6.07666 1.11824,-12.09004 2.17848,-18.07349 0.88097,-4.97177 1.71949,-9.95483 2.26013,-14.97502 0.98337,-9.13118 0.9763,-18.35278 0.3199,-27.51327 -0.10993,-1.53416 -0.23754,-3.0832 -0.008,-4.60412 0.22922,-1.52092 0.85475,-3.0367 2.02069,-4.03986 1.07696,-0.9266 2.52093,-1.33598 3.93947,-1.4145 1.41854,-0.0785 2.83404,0.14655 4.23982,0.35197 3.31254,0.48405 6.65159,0.8649 9.88917,1.71656 2.04284,0.53738 4.03315,1.25925 6.0722,1.81081 3.40258,0.92039 6.96639,1.36144 10.46739,0.95192 3.76917,-0.44089 7.42987,-1.85678 11.22363,-1.76474 1.55658,0.0378 3.1015,0.33171 4.58649,0.79985 1.51539,0.47772 3.00914,1.16182 4.12281,2.29512 0.84639,0.8613 1.43579,1.94539 1.87872,3.06879 0.65982,1.67352 1.01492,3.457 1.16703,5.24945 0.13475,1.58788 0.11343,3.19441 0.41433,4.75933 0.49503,2.57458 1.84746,4.92305 3.52848,6.93494 1.68102,2.01189 3.68982,3.72048 5.69641,5.40783 1.99908,1.68103 4.0106,3.35469 6.16708,4.82839 1.0121,0.69165 2.05642,1.33949 3.01736,2.10062 0.96094,0.76113 1.84466,1.6468 2.44543,2.71535 0.81492,1.44944 1.06377,3.2077 0.53758,4.87655 -0.5262,1.66885 -1.48162,3.27659 -2.67059,4.68806 z"
+             id="path14296-0"
+             inkscape:connector-curvature="0"
+             sodipodi:nodetypes="aaaaaaaaaaaaaaaaaaaaaaacscaascca"
+             clip-path="none"
+             inkscape:label="Right Foot" />
+          <path
+             style="display:inline;fill:#cd8907;fill-opacity:1;stroke:none;filter:url(#filter14416-8)"
+             d="m 512.89128,328.72435 c -0.61724,1.54745 -1.48971,2.99275 -2.57146,4.2598 -2.40248,2.814 -5.72921,4.65444 -9.03774,6.31099 -5.65305,2.83043 -11.50277,5.31761 -16.82133,8.73539 -3.55362,2.28361 -6.84076,4.96564 -9.9178,7.85959 -2.62917,2.47273 -5.12496,5.116 -8.06607,7.20809 -2.98093,2.12042 -6.41793,3.6468 -10.03693,4.18063 -4.40931,0.65041 -8.96019,-0.19314 -13.05822,-1.94562 -2.85719,-1.22185 -5.61733,-2.97002 -7.27205,-5.60029 -1.64629,-2.61688 -2.0313,-5.83002 -2.03893,-8.92166 -0.0135,-5.46467 1.01827,-10.87076 1.98945,-16.24846 0.80703,-4.46875 1.57531,-8.9482 2.06402,-13.46287 0.88853,-8.20825 0.8481,-16.49756 0.29214,-24.73502 -0.0931,-1.38017 -0.20023,-2.77381 0.0118,-4.14077 0.21204,-1.36695 0.77803,-2.72737 1.82595,-3.63036 0.9828,-0.84687 2.30304,-1.21795 3.5986,-1.28594 1.29556,-0.068 2.58744,0.14181 3.87096,0.3307 3.02315,0.4449 6.07241,0.77918 9.03106,1.54323 1.86541,0.48173 3.68372,1.13165 5.54531,1.62795 3.10947,0.82898 6.36227,1.22486 9.55911,0.8558 3.44127,-0.39728 6.78665,-1.67148 10.24974,-1.58654 1.42063,0.0348 2.83052,0.30037 4.1885,0.71908 1.38179,0.42605 2.74909,1.03446 3.76507,2.06337 0.76566,0.7754 1.29538,1.75352 1.7157,2.75891 0.62574,1.49674 1.03256,3.09742 1.06577,4.71936 0.0347,1.69374 -0.33552,3.39491 -0.10594,5.07338 0.18638,1.36264 0.7635,2.64802 1.50064,3.80912 0.73713,1.1611 1.634,2.2109 2.52251,3.26069 1.71726,2.02897 3.4393,4.09674 5.5931,5.65457 2.45218,1.77364 5.36188,2.81145 7.89508,4.46732 0.75511,0.49359 1.48596,1.05215 2.01814,1.78058 0.8972,1.22806 1.1387,2.90791 0.62379,4.33898 h 2e-5"
+             id="path14296-3-7"
+             inkscape:connector-curvature="0"
+             sodipodi:nodetypes="caaaaaaaaaaaaaaaaaaaaaasssaaac"
+             clip-path="url(#clipPath419)"
+             transform="translate(-250.00016,-58.362183)"
+             inkscape:label="Right Foot Hightlights" />
+          <path
+             style="display:inline;fill:#f5c021;fill-opacity:1;stroke:none;filter:url(#filter14432-2)"
+             d="m 508.79285,327.92545 c -0.60151,1.26455 -1.38215,2.44372 -2.31134,3.49133 -2.15335,2.42776 -5.06099,4.09917 -8.12349,5.1725 -5.04166,1.76698 -10.54565,2.00437 -15.49471,4.01618 -3.01615,1.22607 -5.73063,3.07339 -8.47914,4.81871 -2.22174,1.41082 -4.49246,2.76887 -6.93206,3.75622 -2.75548,1.1152 -5.68568,1.74047 -8.62582,2.17857 -1.87082,0.27876 -3.76259,0.48423 -5.65156,0.38704 -1.88898,-0.0972 -3.78418,-0.50735 -5.45127,-1.40092 -1.26399,-0.6775 -2.40126,-1.6529 -3.07596,-2.91839 -0.74956,-1.4059 -0.87959,-3.05603 -0.86243,-4.64917 0.0457,-4.24592 1.02557,-8.4458 0.99617,-12.69186 -0.0256,-3.69614 -0.81525,-7.34495 -1.04231,-11.03419 -0.43665,-7.09457 1.2047,-14.31322 -0.23989,-21.27287 -0.23125,-1.11413 -0.54212,-2.22686 -0.52701,-3.36463 0.008,-0.56889 0.0988,-1.14101 0.31541,-1.66709 0.21661,-0.52609 0.56289,-1.00508 1.02461,-1.33751 0.38878,-0.27992 0.85044,-0.45024 1.32336,-0.52677 0.47292,-0.0765 0.95748,-0.0616 1.43166,0.007 0.94836,0.13656 1.85188,0.48215 2.77546,0.73718 2.64193,0.72952 5.43254,0.71432 8.11748,1.26484 1.68527,0.34555 3.31679,0.91149 4.98436,1.33427 2.80028,0.70996 5.72013,1.0133 8.59212,0.70142 3.0885,-0.33539 6.10714,-1.37534 9.21289,-1.30034 1.27305,0.0307 2.53741,0.25005 3.76479,0.58936 1.22771,0.3394 2.45538,0.81951 3.38421,1.69114 0.6693,0.62809 1.15135,1.4307 1.54214,2.26121 0.5703,1.21202 0.96726,2.52854 0.95796,3.868 -0.005,0.6968 -0.11899,1.38758 -0.18672,2.0811 -0.0677,0.69352 -0.0878,1.40368 0.0914,2.07705 0.18009,0.67656 0.55415,1.2867 0.98269,1.84033 0.42854,0.55364 0.91471,1.06002 1.35819,1.60176 1.24195,1.51713 2.12961,3.28544 3.09724,4.99067 0.96764,1.70523 2.05232,3.39266 3.58036,4.62117 2.0797,1.67204 4.77798,2.34016 7.09642,3.66141 0.67877,0.38682 1.33676,0.84082 1.81399,1.45937 0.38231,0.49552 0.63762,1.0882 0.73509,1.70642 0.0975,0.61822 0.0369,1.26071 -0.1744,1.84982 h 9e-5"
+             id="path14296-3-1-9"
+             inkscape:connector-curvature="0"
+             sodipodi:nodetypes="caaaaaaaaaaaaaaaaaaaaaaaaaaaac"
+             transform="matrix(1.1429721,0,0,1.2323048,-318.99817,-135.48838)"
+             clip-path="url(#clipPath426)"
+             inkscape:label="Right Foot Brighter Highlights" />
+          <path
+             style="display:inline;fill:url(#linearGradient18844);fill-opacity:1;stroke:none;filter:url(#filter17044)"
+             d="m 187.30911,230.28754 c 3.27611,-0.88704 6.0662,1.5972 8.44228,3.47233 1.53527,1.30928 3.75348,0.97992 5.63665,1.04213 3.12069,-0.11321 6.22535,0.52281 9.34708,0.13577 6.14462,-0.51932 12.16847,-2.02966 18.34236,-2.28984 2.94948,-0.18579 6.25992,-0.35725 8.80813,1.36517 1.03299,0.7155 2.54702,3.74139 3.56647,2.60489 -0.42031,-3.17821 -2.77748,-6.25589 -5.93906,-7.10224 -2.47492,-0.38942 -4.98985,0.29134 -7.48947,0.0711 -7.42294,-0.17706 -14.79344,-1.5554 -22.23396,-1.16015 -5.17644,0.0448 -10.34657,-0.19501 -15.51546,-0.39662 -2.03057,-0.41489 -2.74674,1.38901 -3.8489,2.08085"
+             id="path16493"
+             inkscape:connector-curvature="0"
+             inkscape:label="Right Foot Brightest Highlights"
+             clip-path="url(#clipPath433)" />
+        </g>
+        <g
+           style="display:inline"
+           inkscape:label="Left Foot"
+           id="g411">
+          <path
+             sodipodi:nodetypes="ssssssssssssssss"
+             inkscape:connector-curvature="0"
+             id="path4635-1"
+             d="m 57.57688,222.65692 c 1.59929,-0.66295 3.3982,-0.78361 5.10074,-0.46963 1.70253,0.31398 3.31141,1.04948 4.74342,2.02239 2.86402,1.94583 4.98821,4.77774 7.02263,7.57952 4.67189,6.43406 9.16868,13.00227 13.24488,19.8293 3.30635,5.53766 6.34352,11.25685 10.16415,16.45304 2.49398,3.3919 5.3066,6.53947 7.813,9.92221 2.50639,3.38273 4.72794,7.05586 5.83931,11.11662 1.44411,5.27653 0.88463,11.09291 -1.62666,15.95302 -1.76663,3.41896 -4.47646,6.35228 -7.77242,8.33898 -3.29595,1.9867 -7.17064,3.01444 -11.01635,2.87021 -6.11413,-0.2293 -11.69944,-3.28515 -17.38362,-5.54906 -11.58097,-4.6125 -24.15978,-6.0594 -36.09666,-9.65174 -3.66859,-1.10404 -7.27582,-2.4107 -10.96988,-3.42629 -1.64125,-0.45122 -3.30866,-0.8482 -4.85875,-1.55144 -1.55008,-0.70325 -2.999548,-1.7491 -3.86171,-3.21675 -0.666391,-1.13439 -0.948386,-2.47002 -0.930187,-3.78554 0.0182,-1.31552 0.325889,-2.61453 0.773815,-3.85158 0.895851,-2.47409 2.343262,-4.71374 3.320162,-7.15696 1.59511,-3.98935 1.88169,-8.38839 1.66657,-12.67942 -0.21511,-4.29103 -0.91078,-8.54478 -1.20454,-12.83115 -0.13118,-1.91406 -0.18066,-3.85256 0.18479,-5.73598 0.36545,-1.88343 1.17577,-3.72459 2.55771,-5.05541 1.27406,-1.22693 2.96492,-1.95531 4.69643,-2.31651 1.73151,-0.3612 3.51533,-0.37747 5.28367,-0.33762 1.76833,0.0399 3.54067,0.13425 5.30351,-0.0106 1.76284,-0.14488 3.53347,-0.54055 5.06911,-1.41828 1.45996,-0.83447 2.65433,-2.0745 3.64374,-3.43424 0.9894,-1.35974 1.78909,-2.84573 2.60891,-4.31396 0.81983,-1.46823 1.66834,-2.93151 2.74157,-4.22611 1.07324,-1.2946 2.38923,-2.42304 3.94266,-3.06698"
+             style="display:inline;fill:url(#linearGradient18854);fill-opacity:1;stroke:none"
+             inkscape:label="Left Foot" />
+          <path
+             inkscape:connector-curvature="0"
+             style="display:inline;fill:#d99a03;fill-opacity:1;stroke:none;filter:url(#filter14148-8)"
+             d="m -99.89049,282.77885 c 1.45515,-0.58619 3.09423,-0.65064 4.62272,-0.30406 1.52849,0.34657 2.94957,1.09015 4.18836,2.047 2.47758,1.91371 4.19983,4.61379 5.85419,7.26861 3.97009,6.43306 7.8514,12.93381 11.5161,19.56716 2.7769,4.99324 5.4247,10.09253 8.83749,14.67892 2.26379,3.04154 4.84735,5.83139 7.15787,8.83675 2.31051,3.00536 4.37126,6.28214 5.3928,9.93347 1.31626,4.70582 0.78265,9.91001 -1.49541,14.23282 -1.63755,3.10576 -4.15203,5.74644 -7.18609,7.5126 -3.03406,1.76617 -6.57924,2.64923 -10.08655,2.48791 -5.59831,-0.25772 -10.71129,-3.05353 -15.98089,-4.95071 -10.10307,-3.66572 -21.05344,-4.15754 -31.41615,-7.02001 -3.71479,-1.00833 -7.33661,-2.35276 -11.06955,-3.29396 -1.65162,-0.41658 -3.33303,-0.75712 -4.90217,-1.4193 -1.56914,-0.66219 -3.04681,-1.68866 -3.89752,-3.16474 -0.63282,-1.09717 -0.88561,-2.38838 -0.84651,-3.65421 0.0391,-1.26584 0.35915,-2.51035 0.80992,-3.69386 0.90155,-2.36701 2.32025,-4.51029 3.22912,-6.87464 1.3787,-3.57425 1.54994,-7.50412 1.29397,-11.32617 -0.25597,-3.82205 -0.9211,-7.60949 -1.15326,-11.4336 -0.10374,-1.70896 -0.11933,-3.43899 0.22634,-5.11576 0.34564,-1.67677 1.07606,-3.30971 2.29486,-4.512 1.32089,-1.30904 3.14116,-2.02413 4.97727,-2.30427 1.83611,-0.28013 3.70601,-0.15808 5.55479,0.007 1.84877,0.16495 3.70503,0.37271 5.56113,0.26163 1.85609,-0.11109 3.7357,-0.56331 5.26886,-1.60694 1.39737,-0.94461 2.44584,-2.32407 3.24439,-3.79842 0.79856,-1.47435 1.3676,-3.05544 1.97644,-4.61656 0.60885,-1.56113 1.26672,-3.12189 2.2218,-4.50973 0.95509,-1.38785 2.23612,-2.60467 3.80568,-3.23473"
+             id="path13596-6"
+             clip-path="url(#clipPath401)"
+             transform="translate(160,-57.362183)"
+             inkscape:label="Left Foot Hightlights" />
+          <path
+             sodipodi:nodetypes="aaassssssssssssa"
+             inkscape:connector-curvature="0"
+             id="path4635-2-4"
+             d="m 138.7532,281.23531 c 1.40907,-0.7122 3.07062,-0.85812 4.61642,-0.53681 1.54579,0.3213 2.97823,1.09063 4.19572,2.09584 2.43498,2.0104 3.98026,4.8747 5.41939,7.68535 3.30494,6.45466 6.3834,13.04983 10.33791,19.12824 2.86875,4.40952 6.17965,8.51701 9.08155,12.90479 3.93557,5.95071 7.13582,12.4957 8.45639,19.50682 0.88822,4.71571 0.85899,9.80955 -1.37244,14.05779 -1.46869,2.79611 -3.85002,5.08988 -6.66339,6.52522 -2.81337,1.43533 -6.0432,2.01701 -9.18889,1.73441 -4.95423,-0.44507 -9.4537,-2.92512 -14.11748,-4.65475 -8.27469,-3.06879 -17.21809,-3.80325 -25.73435,-6.1187 -3.59196,-0.9766 -7.10999,-2.23521 -10.7509,-3.00963 -1.60616,-0.34163 -3.24361,-0.59125 -4.77675,-1.17943 -1.53313,-0.58818 -2.98623,-1.56578 -3.76965,-3.00894 -0.55139,-1.01573 -0.73656,-2.20459 -0.65433,-3.3574 0.0822,-1.15282 0.42084,-2.27486 0.86462,-3.34201 0.88755,-2.13429 2.20087,-4.08935 2.89035,-6.29561 1.01321,-3.24214 0.59672,-6.75718 -0.1636,-10.06777 -0.76031,-3.31059 -1.85667,-6.56127 -2.19448,-9.94121 -0.15046,-1.50543 -0.14681,-3.03993 0.19136,-4.51458 0.33818,-1.47465 1.02687,-2.89176 2.10855,-3.94955 1.3932,-1.36244 3.34372,-2.03997 5.28315,-2.22925 1.93944,-0.18927 3.89217,0.0689 5.82027,0.3512 1.9281,0.28227 3.86824,0.59003 5.8148,0.49986 1.94656,-0.0902 3.92849,-0.61081 5.45316,-1.82432 1.50782,-1.20011 2.45577,-2.98713 2.99939,-4.83599 0.54362,-1.84885 0.71997,-3.78191 0.94267,-5.69612 0.2227,-1.91421 0.50044,-3.8462 1.22971,-5.63 0.72928,-1.7838 1.96094,-3.42814 3.68085,-4.29745"
+             style="display:inline;fill:#f5bd0c;fill-opacity:1;stroke:none;filter:url(#filter14140-3)"
+             transform="matrix(1,0,0,0.98204782,-80.00015,-54.40321)"
+             clip-path="url(#clipPath391)"
+             inkscape:label="Left Foot Brighter Highlights" />
+          <path
+             id="path4792-8"
+             d="m 76.40702,237.60723 c 2.60622,4.71337 4.1958,10.12156 6.78125,14.875 2.3781,4.37223 5.08446,8.87379 7.5,12.90625 1.07545,1.79534 3.58329,4.5546 6.11895,8.83731 2.29771,3.88081 4.61826,9.29715 5.91658,11.1158 -0.74552,-2.12877 -2.27926,-7.84655 -4.10875,-11.92255 -1.70955,-3.80877 -3.69976,-5.98219 -4.92678,-8.03056 -2.41553,-4.03246 -5.01691,-7.65647 -7.5,-11.5 -3.42521,-5.30181 -6.03558,-11.23523 -9.78125,-16.28125 z"
+             style="display:inline;fill:url(#linearGradient18856);fill-opacity:1;stroke:none;filter:url(#filter14176-5)"
+             inkscape:connector-curvature="0"
+             sodipodi:nodetypes="cssscsssc"
+             inkscape:label="Left Foot Brightest Highlights" />
+        </g>
+      </g>
+      <g
+         style="display:inline"
+         id="g526"
+         inkscape:label="Hand Shadows">
+        <path
+           inkscape:label="Hand Lower Shadow"
+           style="opacity:0.35;fill:url(#radialGradient18846);fill-opacity:1;stroke:none;filter:url(#filter14897-2)"
+           id="path29714-8-2"
+           d="m 231.4835,237.27796 c -0.56258,-1.10201 -1.58692,-1.92585 -2.72873,-2.40251 -1.1418,-0.47667 -2.39692,-0.6289 -3.63419,-0.61936 -2.47454,0.0191 -4.93459,0.66357 -7.39999,0.45028 -2.0826,-0.18018 -4.05875,-0.96301 -6.08982,-1.45739 -2.09726,-0.51049 -4.32188,-0.70969 -6.40465,-0.14297 -2.22595,0.60568 -4.18942,2.09362 -5.41915,4.04541 -1.08426,1.72091 -1.59909,3.75274 -1.76111,5.78028 -0.16202,2.02754 0.013,4.06578 0.21815,6.08941 0.1484,1.46363 0.31354,2.93079 0.66764,4.35866 0.35411,1.42788 0.90422,2.82282 1.76608,4.01506 1.24071,1.71632 3.08337,2.9395 5.06938,3.67497 3.24183,1.20053 6.9338,1.13597 10.13167,-0.17718 5.65885,-2.45702 10.44922,-6.8639 13.36879,-12.29857 1.04539,-1.94596 1.8574,-4.01932 2.38189,-6.16513 0.20845,-0.85283 0.37215,-1.72236 0.37977,-2.60027 0.008,-0.8779 -0.14655,-1.76875 -0.54573,-2.55069"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="aaaasssaccaa" />
+        <path
+           inkscape:label="Hand Upper Shadow"
+           transform="matrix(1,0,0,0.72292525,159.99984,20.396294)"
+           style="opacity:0.35;fill:url(#radialGradient18848);fill-opacity:1;stroke:none;filter:url(#filter14951-8)"
+           id="path29714-8-3-0"
+           d="m 71.483661,295.64014 c -0.562575,-1.10201 -1.586921,-1.92585 -2.728725,-2.40251 -1.141803,-0.47667 -2.396928,-0.6289 -3.634197,-0.61936 -2.474537,0.0191 -4.934587,0.66357 -7.399988,0.45028 -2.082597,-0.18018 -4.058745,-0.96301 -6.08982,-1.45739 -2.097262,-0.51049 -4.321879,-0.70969 -6.40465,-0.14297 -2.225952,0.60568 -4.189424,2.09362 -5.41915,4.04541 -1.084262,1.72091 -1.599093,3.75274 -1.76111,5.78028 -0.162016,2.02754 0.01297,4.06578 0.21815,6.08941 0.148398,1.46363 0.31354,2.93079 0.667645,4.35866 0.354105,1.42788 0.904219,2.82282 1.766075,4.01506 1.240713,1.71632 3.083374,2.9395 5.06938,3.67497 3.241832,1.20053 6.933796,1.13597 10.13167,-0.17718 5.658851,-2.45702 10.449216,-6.8639 13.36879,-12.29857 1.045394,-1.94596 1.857401,-4.01932 2.38189,-6.16513 0.208453,-0.85283 0.372151,-1.72236 0.379775,-2.60027 0.0076,-0.8779 -0.146556,-1.76875 -0.545735,-2.55069"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="aaaasssaccaa" />
+      </g>
+      <g
+         style="display:inline"
+         id="g545"
+         inkscape:label="Hand">
+        <path
+           inkscape:label="Hand"
+           transform="translate(159.99984,-58.362183)"
+           style="display:inline;fill:#020204;fill-opacity:1;stroke:none"
+           id="path29714-5-4"
+           d="m 76.1875,285.32775 c -0.405158,-1.10369 -1.118445,-2.08156 -1.990705,-2.86987 -0.872259,-0.78832 -1.900482,-1.39229 -2.982775,-1.85155 -2.164587,-0.91852 -4.520525,-1.26149 -6.83152,-1.69556 -2.179187,-0.40931 -4.34179,-0.90631 -6.52782,-1.27734 -2.27136,-0.38551 -4.617897,-0.63213 -6.8653,-0.1253 -1.965827,0.44333 -3.784499,1.45879 -5.271724,2.81864 -1.487225,1.35984 -2.649109,3.0564 -3.484986,4.89007 -1.472176,3.22952 -1.934512,6.86503 -1.65394,10.40316 0.208815,2.63325 0.875323,5.34594 2.60877,7.33912 1.400654,1.61052 3.387329,2.61526 5.43398,3.22092 3.525017,1.04316 7.366632,0.98822 10.86038,-0.1553 5.766894,-1.93113 10.875681,-5.77387 14.33034,-10.77903 1.138609,-1.64963 2.112174,-3.44809 2.5532,-5.4034 0.335973,-1.48955 0.348308,-3.08112 -0.1779,-4.51456"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="aaaasssaccaa" />
+        <path
+           inkscape:label="Hand Lower Hightlight"
+           transform="translate(-150.00016,-58.362183)"
+           clip-path="url(#clipPath533)"
+           inkscape:connector-curvature="0"
+           id="path29714-3-9-9"
+           d="m 362.21875,276.45593 c -0.54933,0.0306 -1.08144,0.0909 -1.625,0.1875 -3.46951,0.61686 -6.64705,2.80857 -8.4375,5.84375 -1.26396,2.14267 -1.83985,4.67634 -1.65625,7.15625 0.0732,-1.74163 0.52946,-3.44685 1.375,-4.96875 1.43442,-2.58185 4.03238,-4.52979 6.9375,-5.0625 1.78976,-0.32819 3.63182,-0.13095 5.4375,0.0937 1.73256,0.2156 3.48115,0.44287 5.1875,0.8125 2.64101,0.57209 5.25428,1.45135 7.46875,3 0.51646,0.36118 0.99955,0.76857 1.40625,1.25 0.40669,0.48143 0.72188,1.03792 0.84375,1.65625 0.17824,0.90428 -0.0794,1.85295 -0.53125,2.65625 -0.45189,0.8033 -1.06491,1.50665 -1.71875,2.15625 -0.52923,0.5258 -1.09482,1.03417 -1.65625,1.53125 2.559,-0.49571 5.15199,-1.19766 7.28125,-2.6875 0.89975,-0.62955 1.71523,-1.38464 2.25,-2.34375 0.53477,-0.95912 0.76245,-2.1212 0.5,-3.1875 -0.17714,-0.71971 -0.57137,-1.3824 -1.0625,-1.9375 -0.49114,-0.55511 -1.0805,-1.01217 -1.6875,-1.4375 -2.67877,-1.87701 -5.81493,-3.07854 -9.0625,-3.46875 -2.08149,-0.38286 -4.18122,-0.70597 -6.28125,-0.96875 -1.64344,-0.20564 -3.32077,-0.37313 -4.96875,-0.28125 z"
+           style="display:inline;fill:url(#radialGradient18850);fill-opacity:1;stroke:none;filter:url(#filter14812-5)" />
+        <path
+           inkscape:label="Hand Upper Highlight"
+           transform="translate(-150.00016,-58.36218)"
+           clip-path="url(#clipPath538)"
+           inkscape:connector-curvature="0"
+           id="path29714-3-9-3-8"
+           d="m 362.21875,276.45593 c -0.54933,0.0306 -1.08144,0.0909 -1.625,0.1875 -3.46951,0.61686 -6.64705,2.80857 -8.4375,5.84375 -1.26396,2.14267 -1.83985,4.67634 -1.65625,7.15625 0.0732,-1.74163 0.52946,-3.44685 1.375,-4.96875 1.43442,-2.58185 4.03238,-4.52979 6.9375,-5.0625 1.78976,-0.32819 3.63182,-0.13095 5.4375,0.0937 1.73256,0.2156 3.48115,0.44287 5.1875,0.8125 2.64101,0.57209 5.25428,1.45135 7.46875,3 0.51646,0.36118 0.99955,0.76857 1.40625,1.25 0.40669,0.48143 0.72188,1.03792 0.84375,1.65625 0.17824,0.90428 -0.0794,1.85295 -0.53125,2.65625 -0.45189,0.8033 -1.06491,1.50665 -1.71875,2.15625 -0.52923,0.5258 -1.09482,1.03417 -1.65625,1.53125 2.559,-0.49571 5.15199,-1.19766 7.28125,-2.6875 0.89975,-0.62955 1.71523,-1.38464 2.25,-2.34375 0.53477,-0.95912 0.76245,-2.1212 0.5,-3.1875 -0.17714,-0.71971 -0.57137,-1.3824 -1.0625,-1.9375 -0.49114,-0.55511 -1.0805,-1.01217 -1.6875,-1.4375 -2.67877,-1.87701 -5.81493,-3.07854 -9.0625,-3.46875 -2.08149,-0.38286 -4.18122,-0.70597 -6.28125,-0.96875 -1.64344,-0.20564 -3.32077,-0.37313 -4.96875,-0.28125 z"
+           style="display:inline;fill:url(#linearGradient18852);fill-opacity:1;stroke:none;filter:url(#filter14812-0-9)" />
+      </g>
+      <g
+         style="display:inline"
+         id="g777"
+         inkscape:label="Face">
+        <g
+           inkscape:label="Eyes"
+           id="g669">
+          <g
+             id="g643"
+             inkscape:label="Left Eye"
+             style="display:inline">
+            <path
+               inkscape:label="Left Eyeball"
+               transform="translate(138.99984,-49.362181)"
+               style="display:inline;fill:url(#radialGradient18806);fill-opacity:1;stroke:none"
+               d="m -24.767558,113.36218 c -1.780966,0.097 -3.484616,0.91899 -4.787852,2.1367 -1.303235,1.21771 -2.221372,2.81176 -2.786181,4.50357 -1.129618,3.38363 -0.87548,7.05177 -0.618697,10.60973 0.23251,3.22162 0.470404,6.50533 1.676785,9.50158 0.60319,1.49813 1.450246,2.91021 2.580338,4.06395 1.130092,1.15374 2.551736,2.04189 4.118297,2.43447 1.468838,0.36809 3.03816,0.29183 4.482783,-0.16209 1.444622,-0.45392 2.763916,-1.27887 3.846235,-2.33791 1.57904,-1.54507 2.643262,-3.5662 3.253449,-5.68947 0.610186,-2.12328 0.784157,-4.35155 0.752401,-6.56053 -0.03974,-2.76435 -0.400909,-5.53851 -1.265755,-8.16439 -0.864846,-2.62588 -2.245743,-5.10327 -4.172795,-7.08561 -0.933308,-0.96009 -1.997765,-1.80513 -3.198585,-2.39747 -1.200819,-0.59233 -2.543439,-0.92535 -3.880423,-0.85253"
+               id="path28795-9-5"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="zzzzzzz" />
+            <g
+               style="display:inline"
+               inkscape:label="Left Eye Pupil"
+               id="g605">
+              <path
+                 sodipodi:nodetypes="aaaaaaaaa"
+                 inkscape:connector-curvature="0"
+                 d="m 159.93889,137.11161 c -0.37211,2.24574 -0.38563,4.60199 0.3864,6.74344 0.50979,1.41404 1.35041,2.69692 2.37218,3.79935 0.66903,0.72184 1.42824,1.37779 2.31576,1.80318 0.88752,0.42539 1.91578,0.60638 2.87035,0.36671 0.88113,-0.22123 1.65156,-0.78859 2.22013,-1.49715 0.56856,-0.70857 0.9476,-1.55295 1.2177,-2.42034 0.7974,-2.56075 0.66926,-5.36165 -0.12241,-7.92418 -0.5768,-1.86701 -1.53208,-3.66794 -3.02664,-4.9268 -0.71307,-0.60061 -1.54773,-1.07115 -2.45479,-1.28664 -0.90707,-0.2155 -1.88874,-0.16505 -2.73754,0.22063 -0.9423,0.42817 -1.67159,1.24304 -2.14907,2.16134 -0.47749,0.91829 -0.72288,1.93936 -0.89207,2.96046"
+                 id="path29453-9"
+                 style="fill:#020204;fill-opacity:1;stroke:none"
+                 transform="translate(-50.00015,-58.362183)"
+                 inkscape:label="Left Eye Pupil" />
+              <path
+                 sodipodi:nodetypes="aaaaaaaaa"
+                 inkscape:connector-curvature="0"
+                 id="path29465-8"
+                 d="m 114.68735,77.124997 c 0.24185,0.6337 1.05418,0.86381 1.5,1.375 0.43302,0.49651 0.88735,1.01055 1.125,1.625 0.4549,1.17616 -0.4488,2.91931 0.5,3.75 0.29782,0.26075 0.89472,0.26639 1.1875,0 1.14539,-1.04215 0.89094,-3.14433 0.4375,-4.625 -0.4115,-1.34371 -1.42747,-2.61637 -2.67923,-3.25512 -0.57882,-0.29536 -1.45077,-0.54089 -1.94577,-0.11988 -0.31898,0.2713 -0.27431,0.85878 -0.125,1.25 z"
+                 style="fill:url(#linearGradient18820);fill-opacity:1;stroke:none;filter:url(#filter29493-2)"
+                 inkscape:label="Left Eye Pupil Highlight" />
+            </g>
+            <path
+               inkscape:label="Left Eyelid"
+               sodipodi:nodetypes="aaaaaaaaaaaaa"
+               inkscape:connector-curvature="0"
+               id="path29551-9"
+               d="m 50.39208,129.52717 c 2.68537,-1.59933 5.95507,-1.97034 9.066699,-1.67565 3.111629,0.29468 6.125434,1.20847 9.141301,2.02921 2.211625,0.60188 4.451579,1.16149 6.525325,2.13777 2.073747,0.97627 3.99989,2.41568 5.141935,4.40296 0.183191,0.31877 0.345257,0.6497 0.539254,0.96201 0.193996,0.31232 0.42311,0.60867 0.716456,0.83031 0.293346,0.22164 0.656994,0.3643 1.024107,0.34424 0.183557,-0.01 0.365612,-0.0609 0.524176,-0.15388 0.158563,-0.093 0.292945,-0.22871 0.377987,-0.39169 0.09778,-0.18739 0.128079,-0.40446 0.117139,-0.61554 -0.01094,-0.21108 -0.06122,-0.41805 -0.117139,-0.62189 -0.755202,-2.75296 -2.53499,-5.08832 -3.88909,-7.6014 -0.8126,-1.5081 -1.476963,-3.09273 -2.2981,-4.5962 -2.81829,-5.16019 -7.443597,-9.21564 -12.701405,-11.84733 -5.257808,-2.6317 -11.127445,-3.89613 -16.997075,-4.23934 -6.801182,-0.39768 -13.619761,0.40945 -20.32932,1.59099 -2.908599,0.5122 -5.86079,1.11511 -8.435686,2.56156 -1.287447,0.72322 -2.467452,1.65662 -3.388474,2.81087 -0.921022,1.15425 -1.576477,2.53523 -1.78765,3.99673 -0.203522,1.40855 0.0088,2.86057 0.501301,4.19582 0.492484,1.33524 1.258246,2.5585 2.156537,3.66236 1.796584,2.20771 4.100665,3.93361 6.222432,5.83092 2.121308,1.8969 4.09001,3.99204 6.462948,5.56282 1.186469,0.78539 2.472664,1.43499 3.843385,1.81666 1.37072,0.38166 2.829918,0.48917 4.223827,0.20358 1.444987,-0.29606 2.782689,-1.005 3.953624,-1.90197 1.170934,-0.89697 2.186129,-1.98006 3.148417,-3.09793 1.924576,-2.23575 3.722539,-4.68648 6.257089,-6.19599"
+               style="display:inline;fill:url(#radialGradient18832);fill-opacity:1;stroke:none"
+               clip-path="url(#clipPath631)"
+               transform="translate(59.99984,-58.362183)" />
+            <path
+               sodipodi:nodetypes="ccccccccc"
+               inkscape:label="Left Eyebrow"
+               style="display:inline;fill:url(#linearGradient18818);fill-opacity:1;stroke:none;filter:url(#filter29447-1)"
+               d="m -38.437655,119.37798 c 2.5037,2.34533 4.36502,5.2397 5.625,8.30939 -0.550665,-3.38469 -1.423402,-6.10373 -3.625,-8.30939 -1.35129,-1.26581 -2.88639,-2.37775 -4.625,-3.1587 -1.52128,-0.68334 -3.213598,-1.10788 -4.180828,-1.12552 -0.96723,-0.0176 -1.2022,0.004 -1.40094,0.0134 -0.19874,0.009 -0.35739,0.0162 0.27185,0.0877 0.62924,0.0715 2.03368,0.45118 3.541104,1.12827 1.507424,0.6771 3.042524,1.78904 4.393814,3.05485 z"
+               id="path28795-9-2-2"
+               inkscape:connector-curvature="0"
+               transform="translate(160,-57.362183)" />
+          </g>
+          <g
+             id="g652"
+             inkscape:label="Right Eye">
+            <path
+               inkscape:label="Right Eyeball"
+               transform="translate(138.99984,-49.362181)"
+               style="display:inline;fill:url(#radialGradient18808);fill-opacity:1;stroke:none"
+               d="m 6.7500001,113.36218 c -2.780425,1.91023 -5.110569,4.57487 -6.24999996,7.75 -1.4360294,4.00163 -0.88583807,8.48071 0.49999996,12.5 1.4194877,4.11688 3.793788,8.04098 7.37932,10.51234 1.7927659,1.23567 3.8680909,2.08301 6.0304019,2.33859 2.162311,0.25558 4.409274,-0.0949 6.340278,-1.10093 2.353116,-1.22596 4.147816,-3.37278 5.262172,-5.78076 1.114356,-2.40798 1.588797,-5.0701 1.737828,-7.71924 0.189892,-3.37546 -0.140469,-6.80646 -1.25,-10 -1.205266,-3.46909 -3.390051,-6.67055 -6.472754,-8.6666 -1.541351,-0.99803 -3.291947,-1.68356 -5.110883,-1.93515 -1.818936,-0.25158 -3.704766,-0.0633 -5.4163629,0.60175 -0.9754713,0.37901 -1.8874384,0.9074 -2.75,1.5"
+               id="path28795-3"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="aaaaaaaa" />
+            <g
+               style="display:inline"
+               inkscape:label="Right Eye Pupil"
+               id="g613">
+              <path
+                 sodipodi:nodetypes="zzzzzzz"
+                 inkscape:connector-curvature="0"
+                 d="m 302.16152,130.75695 c -1.04548,0.0749 -2.06437,0.4318 -2.95135,0.99028 -0.88699,0.55848 -1.64327,1.31521 -2.23701,2.17899 -1.18748,1.72757 -1.70894,3.84675 -1.793,5.94139 -0.0631,1.5723 0.11098,3.16512 0.63245,4.64977 0.52147,1.48465 1.40089,2.85877 2.61276,3.86251 1.24011,1.02713 2.81647,1.64364 4.42485,1.72094 1.60838,0.0773 3.23948,-0.38665 4.56105,-1.3066 1.05288,-0.73292 1.9021,-1.74168 2.50666,-2.87315 0.60455,-1.13148 0.96879,-2.38348 1.1353,-3.65549 0.29411,-2.24678 -0.0385,-4.59295 -1.07692,-6.60695 -1.03841,-2.01401 -2.80051,-3.67269 -4.92674,-4.45606 -0.92093,-0.3393 -1.90911,-0.51576 -2.88805,-0.44563"
+                 id="path28879-6"
+                 style="fill:#020204;fill-opacity:1;stroke:none"
+                 transform="translate(-150.00015,-58.362183)"
+                 inkscape:label="Right Eye Pupil" />
+              <path
+                 sodipodi:nodetypes="aaaaaa"
+                 inkscape:connector-curvature="0"
+                 id="path28891-5"
+                 d="m 154.6561,79.249997 c -0.86591,0.34162 -2.23657,0.12677 -2.61622,0.9767 -0.22493,0.50357 0.0927,1.33252 0.60343,1.54061 1.03244,0.42063 2.63193,-0.34111 3.04876,-1.3751 0.18104,-0.4491 -0.0934,-1.16101 -0.53974,-1.34865 -0.16515,-0.0694 -0.32958,0.14069 -0.49623,0.20644 z"
+                 style="fill:#141413;fill-opacity:1;stroke:none;filter:url(#filter28927-8)"
+                 inkscape:label="Right Eye Pupil Lower Hightlight" />
+              <path
+                 sodipodi:nodetypes="sazas"
+                 inkscape:connector-curvature="0"
+                 id="path28887-6"
+                 d="m 158.62485,81.499997 c 1.16113,-1.16113 -0.82613,-4.23951 -2.375,-5.5 -1.12184,-0.91296 -4.39063,-1.86851 -4.25,-0.875 0.14063,0.99351 1.60988,2.26647 2.59467,3.23744 1.21236,1.19533 3.47886,3.68903 4.03033,3.13756 z"
+                 style="fill:url(#linearGradient18812);fill-opacity:1;stroke:none;filter:url(#filter28949-8)"
+                 inkscape:label="Right Eye Pupil Upper Highlight" />
+            </g>
+            <path
+               inkscape:label="Right Eyelid"
+               style="display:inline;fill:url(#linearGradient18814);fill-opacity:1;stroke:none"
+               d="m 75.25,132.48718 c 2.383746,-1.98014 5.160908,-3.48474 8.12154,-4.40008 6.085564,-1.88147 12.999677,-1.13706 18.37846,2.27508 1.85708,1.17808 3.51244,2.64192 5.23935,4.00367 1.72691,1.36176 3.56115,2.64122 5.63565,3.37133 1.12086,0.39448 2.31818,0.62345 3.5,0.5 1.06768,-0.11153 2.09928,-0.5118 2.98444,-1.11915 0.88515,-0.60736 1.62476,-1.4185 2.18064,-2.33686 1.11176,-1.8367 1.47001,-4.06457 1.27839,-6.20298 -0.38324,-4.27682 -2.79556,-8.05341 -4.81847,-11.84101 -0.63342,-1.18598 -1.23642,-2.39333 -2,-3.5 -2.34327,-3.39616 -6.07312,-5.63562 -9.98498,-6.94794 -3.91185,-1.31233 -8.046257,-1.78639 -12.14002,-2.30206 -1.825736,-0.22998 -3.673032,-0.46998 -5.5,-0.25 -2.099797,0.25283 -4.075978,1.101 -6.125,1.625 -0.972648,0.24874 -1.963662,0.42478 -2.928029,0.70391 -0.964366,0.27912 -1.912957,0.669 -2.696971,1.29609 -1.144817,0.91567 -1.865056,2.29088 -2.176504,3.72338 -0.311449,1.4325 -0.240517,2.92444 -0.01161,4.37242 0.457809,2.89597 1.540886,5.72407 1.438116,8.6542 -0.07058,2.01227 -0.702287,3.98797 -0.625,6 0.02266,0.58987 0.106588,1.17738 0.25,1.75"
+               id="path28972-5"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="cssssssssssssc"
+               clip-path="url(#clipPath622)"
+               transform="translate(59.99984,-58.362183)" />
+            <path
+               inkscape:label="Right Eyebrow"
+               style="display:inline;fill:url(#linearGradient18816);fill-opacity:1;stroke:none;filter:url(#filter29350-1)"
+               d="m -4.593905,113.125 c -0.47695,0.59985 -0.90798,1.25231 -1.25,1.96875 2.14641,0.46247 4.19906,1.34575 6.03125,2.5625 3.54507,2.35427 6.237,5.7965 8.125,9.625 0.44076,-0.48807 0.84202,-1.01184 1.1875,-1.59375 -1.89751,-3.9878 -4.64382,-7.5949 -8.3125,-10.03125 -1.76231,-1.17035 -3.72465,-2.05369 -5.78125,-2.53125 z"
+               id="path28795-92-2"
+               inkscape:connector-curvature="0"
+               transform="translate(160,-57.362183)" />
+          </g>
+        </g>
+        <g
+           inkscape:label="Beak"
+           id="g735"
+           style="display:inline">
+          <path
+             sodipodi:nodetypes="zzzzzzzz"
+             inkscape:connector-curvature="0"
+             id="path28849-2"
+             d="m -16.39938,136.86218 c 1.767366,-1.98662 2.976192,-4.41053 4.674142,-6.45679 0.848975,-1.02314 1.8284211,-1.95533 2.9816817,-2.61681 1.1532606,-0.66147 2.4919769,-1.0411 3.8165164,-0.9264 1.4744902,0.12769 2.8545436,0.86228 3.93407466,1.87472 1.07953103,1.01244 1.8797683,2.29027 2.51864534,3.62528 0.6117397,1.27831 1.0977635,2.64027 1.97912,3.75 0.940326,1.18398 2.2595274,1.99218 3.4510909,2.92288 0.5957818,0.46535 1.167477,0.96911 1.6383978,1.5605 0.4709209,0.59139 0.8396117,1.27595 0.9909913,2.01662 0.1537234,0.75214 0.077153,1.54506 -0.1851792,2.26653 -0.2623326,0.72148 -0.7066964,1.37174 -1.2596263,1.90429 -1.1058598,1.0651 -2.6135811,1.63957 -4.1338116,1.85466 -3.04046123,0.43016 -6.1146629,-0.47583 -9.1842429,-0.39142 -3.1068902,0.0854 -6.1415551,1.18366 -9.2475441,1.07007 -1.552994,-0.0568 -3.128063,-0.43624 -4.404252,-1.32301 -0.638094,-0.44339 -1.194008,-1.01055 -1.595831,-1.6756 -0.401824,-0.66505 -0.646688,-1.42894 -0.672863,-2.20552 -0.02497,-0.74092 0.148043,-1.48088 0.444075,-2.16055 0.296033,-0.67967 0.712681,-1.30175 1.182123,-1.87552 0.938883,-1.14753 2.086993,-2.10617 3.072492,-3.21393"
+             style="display:inline;fill:url(#radialGradient18810);fill-opacity:1;stroke:none"
+             transform="translate(138.99984,-49.362181)"
+             inkscape:label="Under Beak" />
+          <g
+             style="display:inline"
+             id="g712"
+             inkscape:label="Beak">
+            <path
+               inkscape:label="Lower Beak"
+               transform="translate(59.99984,-58.362183)"
+               style="display:inline;fill:url(#radialGradient18822);fill-opacity:1;stroke:none"
+               id="path28461-2"
+               d="m 45.751683,165.03156 c 0.06146,0.29539 0.172509,0.58039 0.32709,0.8395 0.265683,0.44533 0.653935,0.80631 1.073256,1.1114 0.419321,0.30509 0.872799,0.55947 1.311827,0.83545 2.333646,1.46695 4.235362,3.52905 5.924734,5.70709 2.266543,2.92217 4.271913,6.16491 7.29931,8.28886 2.137781,1.49982 4.695713,2.35501 7.29406,2.61606 3.051317,0.30656 6.139876,-0.18595 9.08171,-1.05205 2.726384,-0.80267 5.363099,-1.92956 7.78216,-3.4214 4.598507,-2.83591 8.439249,-6.99271 13.51002,-8.85709 1.10702,-0.40702 2.25922,-0.69819 3.3265,-1.20026 1.06727,-0.50207 2.07136,-1.25403 2.5811,-2.31766 0.48998,-1.02241 0.47097,-2.20249 0.63053,-3.32496 0.1707,-1.20084 0.55374,-2.36184 0.76385,-3.55642 0.2101,-1.19458 0.23517,-2.47233 -0.28138,-3.56975 -0.42775,-0.90878 -1.20535,-1.62786 -2.09983,-2.08475 -0.89448,-0.4569 -1.90108,-0.66447 -2.90429,-0.71372 -2.006415,-0.0985 -3.987581,0.41519 -5.98809,0.59785 -2.649534,0.24193 -5.317874,-0.0982 -7.97725,-0.019 -3.308296,0.0986 -6.568402,0.84468 -9.87428,1.00503 -3.771518,0.18294 -7.534685,-0.39851 -11.30754,-0.55139 -1.634066,-0.0662 -3.279962,-0.0512 -4.891819,0.22531 -1.611857,0.27654 -3.195234,0.82363 -4.541001,1.75286 -1.311442,0.90553 -2.355916,2.14022 -3.560189,3.18405 -0.602137,0.52192 -1.249488,0.99929 -1.966273,1.3474 -0.716785,0.34812 -1.50749,0.564 -2.304158,0.54708 -0.409601,-0.009 -0.830861,-0.0769 -1.2213,0.0472 -0.243915,0.0775 -0.460478,0.22705 -0.643532,0.40593 -0.183054,0.17888 -0.334787,0.38705 -0.477798,0.59931 -0.332537,0.49356 -0.623066,1.01541 -0.867417,1.55807"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="csssaaaaaaasssaaaaaac" />
+            <path
+               inkscape:label="Lower Beak Highlight"
+               transform="translate(59.99984,-58.362183)"
+               style="display:inline;fill:#d9b30d;fill-opacity:1;stroke:none;filter:url(#filter28502-8)"
+               id="path28487-2"
+               d="m 60.55673,169.09742 c -0.386462,1.59605 -0.151992,3.33408 0.64359,4.77067 0.795582,1.43659 2.144391,2.5575 3.70231,3.07676 1.977755,0.65919 4.206575,0.33635 6.05477,-0.62813 1.071362,-0.55909 2.051171,-1.34588 2.669379,-2.38425 0.309105,-0.51918 0.523981,-1.09707 0.604518,-1.69591 0.08054,-0.59884 0.02471,-1.2185 -0.184887,-1.78522 -0.229715,-0.62112 -0.640261,-1.16849 -1.146053,-1.59596 -0.505791,-0.42748 -1.104668,-0.7378 -1.733436,-0.94568 -1.257537,-0.41575 -2.610936,-0.42405 -3.933891,-0.36051 -2.005209,0.0963 -4.002918,0.34837 -5.9692,0.75318"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="caaaac" />
+            <path
+               inkscape:label="Upper Beak Undershadow"
+               transform="translate(59.99984,-58.362183)"
+               style="display:inline;fill:#604405;fill-opacity:1;stroke:none;filter:url(#filter15145-6)"
+               d="m 54.0663,156.67992 c -1.338955,0.79147 -2.628584,1.66369 -3.8975,2.56317 -0.656705,0.46551 -1.334168,0.96895 -1.68056,1.69557 -0.245501,0.51498 -0.301768,1.09903 -0.309586,1.66948 -0.0078,0.57045 0.02884,1.14399 -0.04618,1.70954 -0.05124,0.38625 -0.154326,0.76619 -0.171537,1.15544 -0.0086,0.19463 0.0047,0.39145 0.05602,0.57938 0.05134,0.18793 0.141902,0.36704 0.275482,0.50885 0.172556,0.18318 0.407931,0.29591 0.64865,0.36931 0.240719,0.0734 0.490638,0.1112 0.73562,0.16878 1.174662,0.27611 2.196917,0.99676 3.094125,1.80366 0.897208,0.8069 1.702883,1.71487 2.638865,2.47645 2.537255,2.06449 5.890478,2.91872 9.161088,2.97254 3.27061,0.0538 6.504204,-0.63066 9.695302,-1.34946 2.506322,-0.56456 5.014978,-1.15472 7.42544,-2.04356 3.702752,-1.36537 7.140748,-3.43167 10.11819,-6.02193 1.349968,-1.17442 2.617219,-2.46364 4.13251,-3.41525 1.340926,-0.84211 2.842622,-1.39796 4.206331,-2.20265 0.12193,-0.072 0.24321,-0.14621 0.35213,-0.23665 0.10893,-0.0905 0.20574,-0.1981 0.26892,-0.3248 0.10917,-0.21894 0.10937,-0.48123 0.0389,-0.71552 -0.0704,-0.23429 -0.20633,-0.44389 -0.36,-0.63425 -0.16999,-0.21058 -0.36336,-0.40158 -0.568951,-0.57756 -1.424379,-1.21921 -3.356756,-1.66245 -5.22581,-1.81067 -1.869053,-0.14822 -3.760672,-0.0434 -5.60996,-0.35238 -1.738647,-0.29048 -3.393268,-0.93881 -5.07175,-1.4773 -1.761942,-0.56527 -3.562776,-1.01251 -5.38903,-1.31044 -4.294756,-0.70063 -8.71732,-0.56641 -12.97748,0.32063 -4.057685,0.84488 -7.971287,2.37056 -11.53927,4.47962"
+               id="path27476-7-8"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="ssssssssssssssssaaas" />
+            <path
+               inkscape:label="Upper Beak"
+               transform="translate(59.99984,-58.362183)"
+               style="display:inline;fill:url(#linearGradient18824);fill-opacity:1;stroke:none"
+               d="m 53.63941,152.15408 c -1.929391,1.2986 -3.666135,2.88291 -5.13602,4.68523 -0.840698,1.03083 -1.603727,2.15084 -2.02709,3.41185 -0.332996,0.99185 -0.446478,2.04153 -0.65633,3.06652 -0.07861,0.38398 -0.171386,0.76923 -0.169741,1.16118 8.22e-4,0.19597 0.02568,0.39281 0.08646,0.57912 0.06079,0.18631 0.15831,0.36204 0.294069,0.50337 0.224679,0.23391 0.540409,0.36101 0.858102,0.42632 0.317692,0.0653 0.643798,0.0751 0.966058,0.11177 1.454637,0.16535 2.794463,0.87199 4.000333,1.70216 1.205869,0.83017 2.317112,1.79543 3.554437,2.57795 2.733893,1.72899 5.994554,2.49829 9.226902,2.62285 3.232347,0.12456 6.457354,-0.36641 9.629488,-0.99977 2.520903,-0.50334 5.033924,-1.10072 7.42544,-2.04356 3.662411,-1.44389 6.963507,-3.66693 10.11819,-6.02193 1.43301,-1.06976 2.84598,-2.17318 4.13251,-3.41525 0.43668,-0.42159 0.859162,-0.85947 1.327567,-1.24551 0.468404,-0.38603 0.988159,-0.72177 1.565973,-0.90766 0.880766,-0.28336 1.835622,-0.20203 2.748192,-0.0495 0.68732,0.11488 1.376,0.26902 2.07229,0.24128 0.34815,-0.0139 0.69661,-0.0742 1.02006,-0.2037 0.32345,-0.12954 0.62155,-0.33028 0.8433,-0.59903 0.29139,-0.35317 0.43996,-0.81445 0.4416,-1.2723 0.002,-0.45786 -0.1387,-0.91095 -0.37105,-1.30548 -0.4647,-0.78905 -1.26825,-1.32311 -2.10504,-1.69503 -1.14614,-0.50941 -2.3863,-0.76136 -3.605512,-1.05573 -3.745289,-0.90427 -7.384752,-2.24056 -10.83577,-3.95385 -1.715597,-0.85173 -3.383551,-1.79555 -5.07175,-2.70037 -1.735567,-0.93021 -3.504569,-1.82415 -5.38903,-2.39536 -4.21332,-1.27713 -8.818528,-0.85829 -12.97748,0.58609 -4.619909,1.60447 -8.797447,4.46312 -11.96616,8.18832 v 2e-5"
+               id="path27476-4"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="cssssssssssssssssaaasc" />
+            <path
+               inkscape:label="Upper Beak Highlight"
+               transform="translate(59.99984,-58.362183)"
+               style="display:inline;fill:#f6da4a;fill-opacity:1;stroke:none;filter:url(#filter14963-7)"
+               d="m 83.23853,153.07989 c -0.226496,-0.28623 -0.551139,-0.48799 -0.901294,-0.59103 -0.350155,-0.10304 -0.724669,-0.1104 -1.084432,-0.0488 -0.719527,0.12322 -1.364496,0.51049 -1.965744,0.9245 -1.708552,1.17648 -3.218864,2.62822 -4.53731,4.22977 -1.745223,2.11996 -3.18499,4.57171 -3.66755,7.27489 -0.08131,0.45547 -0.135106,0.92132 -0.07821,1.38049 0.0569,0.45916 0.232792,0.91479 0.558708,1.24319 0.286171,0.28835 0.675727,0.46425 1.077847,0.52203 0.40212,0.0578 0.815901,0.002 1.200757,-0.12836 0.769713,-0.26019 1.408987,-0.79942 2.014436,-1.34126 3.335973,-2.98548 6.352522,-6.56776 7.55957,-10.87877 0.121128,-0.43261 0.224012,-0.87566 0.221233,-1.3249 -0.0028,-0.44924 -0.119237,-0.90947 -0.398013,-1.26176"
+               id="path28357-7"
+               inkscape:connector-curvature="0"
+               sodipodi:nodetypes="zzzzzzzz" />
+            <g
+               style="display:inline"
+               inkscape:label="Nostrils"
+               id="g681">
+              <path
+                 sodipodi:nodetypes="aaaaaaa"
+                 inkscape:connector-curvature="0"
+                 id="path28396-7"
+                 d="m 135.25114,88.527667 c 0.23129,0.7424 1.42778,0.61935 2.11906,0.97542 0.60659,0.31244 1.09447,0.99723 1.77651,1.01692 0.65093,0.0188 1.66398,-0.22542 1.74866,-0.87109 0.11187,-0.85303 -1.13379,-1.39511 -1.93536,-1.70762 -1.03148,-0.40216 -2.35301,-0.6062 -3.3206,-0.0682 -0.22173,0.12328 -0.46373,0.41238 -0.38827,0.65458 z"
+                 style="display:inline;opacity:0.8;fill:url(#radialGradient18826);fill-opacity:1;stroke:none;filter:url(#filter15177-1)"
+                 inkscape:label="Right Nostril" />
+              <path
+                 sodipodi:nodetypes="asssa"
+                 inkscape:connector-curvature="0"
+                 id="path28398-7"
+                 d="m 123.82694,88.107827 c -0.88816,-0.28854 -2.35748,1.27746 -1.87806,2.07886 0.13167,0.22009 0.53491,0.49916 0.80641,0.34992 0.40925,-0.22497 0.74404,-1.02958 1.18746,-1.34496 0.29608,-0.21058 0.22974,-0.97156 -0.11581,-1.08382 z"
+                 style="display:inline;opacity:0.8;fill:url(#radialGradient18828);fill-opacity:1;stroke:none;filter:url(#filter15173-0)"
+                 inkscape:label="Left Nostril" />
+            </g>
+            <path
+               clip-path="url(#clipPath697)"
+               inkscape:label="Beak Side Highlight"
+               inkscape:connector-curvature="0"
+               id="path28570-8"
+               d="m 245.90496,158.28406 a 2.608083,2.328125 0 0 1 -2.60809,2.32812 2.608083,2.328125 0 0 1 -2.60808,-2.32812 2.608083,2.328125 0 0 1 2.60808,-2.32813 2.608083,2.328125 0 0 1 2.60809,2.32813 z"
+               style="color:#000000;display:inline;overflow:visible;visibility:visible;fill:url(#linearGradient18830);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.25;marker:none;filter:url(#filter28584-4);enable-background:accumulate"
+               transform="matrix(1.0956223,0,-0.17017853,1.5181314,-76.24447,-140.47964)" />
+          </g>
+        </g>
+      </g>
+    </g>
+  </g>
+</svg>
index 062cf88..8f9be0e 100644 (file)
@@ -136,8 +136,8 @@ needed).
    misc-devices/index
    scheduler/index
    mhi/index
-   tty/index
    peci/index
+   hte/index
 
 Architecture-agnostic documentation
 -----------------------------------
index 2638dce..c926481 100644 (file)
@@ -85,15 +85,15 @@ accepted by this input device. Our example device can only generate EV_KEY
 type events, and from those only BTN_0 event code. Thus we only set these
 two bits. We could have used::
 
-       set_bit(EV_KEY, button_dev.evbit);
-       set_bit(BTN_0, button_dev.keybit);
+       set_bit(EV_KEY, button_dev->evbit);
+       set_bit(BTN_0, button_dev->keybit);
 
 as well, but with more than single bits the first approach tends to be
 shorter.
 
 Then the example driver registers the input device structure by calling::
 
-       input_register_device(&button_dev);
+       input_register_device(button_dev);
 
 This adds the button_dev structure to linked lists of the input driver and
 calls device handler modules _connect functions to tell them a new input
diff --git a/Documentation/leds/leds-qcom-lpg.rst b/Documentation/leds/leds-qcom-lpg.rst
new file mode 100644 (file)
index 0000000..de7ceea
--- /dev/null
@@ -0,0 +1,78 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Kernel driver for Qualcomm LPG
+==============================
+
+Description
+-----------
+
+The Qualcomm LPG can be found in a variety of Qualcomm PMICs and consists of a
+number of PWM channels, a programmable pattern lookup table and a RGB LED
+current sink.
+
+To facilitate the various use cases, the LPG channels can be exposed as
+individual LEDs, grouped together as RGB LEDs or otherwise be accessed as PWM
+channels. The output of each PWM channel is routed to other hardware
+blocks, such as the RGB current sink, GPIO pins etc.
+
+The each PWM channel can operate with a period between 27us and 384 seconds and
+has a 9 bit resolution of the duty cycle.
+
+In order to provide support for status notifications with the CPU subsystem in
+deeper idle states the LPG provides pattern support. This consists of a shared
+lookup table of brightness values and per channel properties to select the
+range within the table to use, the rate and if the pattern should repeat.
+
+The pattern for a channel can be programmed using the "pattern" trigger, using
+the hw_pattern attribute.
+
+/sys/class/leds/<led>/hw_pattern
+--------------------------------
+
+Specify a hardware pattern for a Qualcomm LPG LED.
+
+The pattern is a series of brightness and hold-time pairs, with the hold-time
+expressed in milliseconds. The hold time is a property of the pattern and must
+therefor be identical for each element in the pattern (except for the pauses
+described below). As the LPG hardware is not able to perform the linear
+transitions expected by the leds-trigger-pattern format, each entry in the
+pattern must be followed a zero-length entry of the same brightness.
+
+Simple pattern::
+
+    "255 500 255 0 0 500 0 0"
+
+        ^
+        |
+    255 +----+    +----+
+        |    |    |    |      ...
+      0 |    +----+    +----
+        +---------------------->
+        0    5   10   15     time (100ms)
+
+The LPG supports specifying a longer hold-time for the first and last element
+in the pattern, the so called "low pause" and "high pause".
+
+Low-pause pattern::
+
+    "255 1000 255 0 0 500 0 0 255 500 255 0 0 500 0 0"
+
+        ^
+        |
+    255 +--------+    +----+    +----+    +--------+
+        |        |    |    |    |    |    |        |      ...
+      0 |        +----+    +----+    +----+        +----
+        +----------------------------->
+        0    5   10   15  20   25   time (100ms)
+
+Similarily, the last entry can be stretched by using a higher hold-time on the
+last entry.
+
+In order to save space in the shared lookup table the LPG supports "ping-pong"
+mode, in which case each run through the pattern is performed by first running
+the pattern forward, then backwards. This mode is automatically used by the
+driver when the given pattern is a palindrome. In this case the "high pause"
+denotes the wait time before the pattern is run in reverse and as such the
+specified hold-time of the middle item in the pattern is allowed to have a
+different hold-time.
diff --git a/Documentation/loongarch/features.rst b/Documentation/loongarch/features.rst
new file mode 100644 (file)
index 0000000..ebacade
--- /dev/null
@@ -0,0 +1,3 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. kernel-feat:: $srctree/Documentation/features loongarch
diff --git a/Documentation/loongarch/index.rst b/Documentation/loongarch/index.rst
new file mode 100644 (file)
index 0000000..aaba648
--- /dev/null
@@ -0,0 +1,21 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+LoongArch Architecture
+======================
+
+.. toctree::
+   :maxdepth: 2
+   :numbered:
+
+   introduction
+   irq-chip-model
+
+   features
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/loongarch/introduction.rst b/Documentation/loongarch/introduction.rst
new file mode 100644 (file)
index 0000000..2bf40ad
--- /dev/null
@@ -0,0 +1,387 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+Introduction to LoongArch
+=========================
+
+LoongArch is a new RISC ISA, which is a bit like MIPS or RISC-V. There are
+currently 3 variants: a reduced 32-bit version (LA32R), a standard 32-bit
+version (LA32S) and a 64-bit version (LA64). There are 4 privilege levels
+(PLVs) defined in LoongArch: PLV0~PLV3, from high to low. Kernel runs at PLV0
+while applications run at PLV3. This document introduces the registers, basic
+instruction set, virtual memory and some other topics of LoongArch.
+
+Registers
+=========
+
+LoongArch registers include general purpose registers (GPRs), floating point
+registers (FPRs), vector registers (VRs) and control status registers (CSRs)
+used in privileged mode (PLV0).
+
+GPRs
+----
+
+LoongArch has 32 GPRs ( ``$r0`` ~ ``$r31`` ); each one is 32-bit wide in LA32
+and 64-bit wide in LA64. ``$r0`` is hard-wired to zero, and the other registers
+are not architecturally special. (Except ``$r1``, which is hard-wired as the
+link register of the BL instruction.)
+
+The kernel uses a variant of the LoongArch register convention, as described in
+the LoongArch ELF psABI spec, in :ref:`References <loongarch-references>`:
+
+================= =============== =================== ============
+Name              Alias           Usage               Preserved
+                                                      across calls
+================= =============== =================== ============
+``$r0``           ``$zero``       Constant zero       Unused
+``$r1``           ``$ra``         Return address      No
+``$r2``           ``$tp``         TLS/Thread pointer  Unused
+``$r3``           ``$sp``         Stack pointer       Yes
+``$r4``-``$r11``  ``$a0``-``$a7`` Argument registers  No
+``$r4``-``$r5``   ``$v0``-``$v1`` Return value        No
+``$r12``-``$r20`` ``$t0``-``$t8`` Temp registers      No
+``$r21``          ``$u0``         Percpu base address Unused
+``$r22``          ``$fp``         Frame pointer       Yes
+``$r23``-``$r31`` ``$s0``-``$s8`` Static registers    Yes
+================= =============== =================== ============
+
+Note: The register ``$r21`` is reserved in the ELF psABI, but used by the Linux
+kernel for storing the percpu base address. It normally has no ABI name, but is
+called ``$u0`` in the kernel. You may also see ``$v0`` or ``$v1`` in some old code,
+however they are deprecated aliases of ``$a0`` and ``$a1`` respectively.
+
+FPRs
+----
+
+LoongArch has 32 FPRs ( ``$f0`` ~ ``$f31`` ) when FPU is present. Each one is
+64-bit wide on the LA64 cores.
+
+The floating-point register convention is the same as described in the
+LoongArch ELF psABI spec:
+
+================= ================== =================== ============
+Name              Alias              Usage               Preserved
+                                                         across calls
+================= ================== =================== ============
+``$f0``-``$f7``   ``$fa0``-``$fa7``  Argument registers  No
+``$f0``-``$f1``   ``$fv0``-``$fv1``  Return value        No
+``$f8``-``$f23``  ``$ft0``-``$ft15`` Temp registers      No
+``$f24``-``$f31`` ``$fs0``-``$fs7``  Static registers    Yes
+================= ================== =================== ============
+
+Note: You may see ``$fv0`` or ``$fv1`` in some old code, however they are deprecated
+aliases of ``$fa0`` and ``$fa1`` respectively.
+
+VRs
+----
+
+There are currently 2 vector extensions to LoongArch:
+
+- LSX (Loongson SIMD eXtension) with 128-bit vectors,
+- LASX (Loongson Advanced SIMD eXtension) with 256-bit vectors.
+
+LSX brings ``$v0`` ~ ``$v31`` while LASX brings ``$x0`` ~ ``$x31`` as the vector
+registers.
+
+The VRs overlap with FPRs: for example, on a core implementing LSX and LASX,
+the lower 128 bits of ``$x0`` is shared with ``$v0``, and the lower 64 bits of
+``$v0`` is shared with ``$f0``; same with all other VRs.
+
+CSRs
+----
+
+CSRs can only be accessed from privileged mode (PLV0):
+
+================= ===================================== ==============
+Address           Full Name                             Abbrev Name
+================= ===================================== ==============
+0x0               Current Mode Information              CRMD
+0x1               Pre-exception Mode Information        PRMD
+0x2               Extension Unit Enable                 EUEN
+0x3               Miscellaneous Control                 MISC
+0x4               Exception Configuration               ECFG
+0x5               Exception Status                      ESTAT
+0x6               Exception Return Address              ERA
+0x7               Bad (Faulting) Virtual Address        BADV
+0x8               Bad (Faulting) Instruction Word       BADI
+0xC               Exception Entrypoint Address          EENTRY
+0x10              TLB Index                             TLBIDX
+0x11              TLB Entry High-order Bits             TLBEHI
+0x12              TLB Entry Low-order Bits 0            TLBELO0
+0x13              TLB Entry Low-order Bits 1            TLBELO1
+0x18              Address Space Identifier              ASID
+0x19              Page Global Directory Address for     PGDL
+                  Lower-half Address Space
+0x1A              Page Global Directory Address for     PGDH
+                  Higher-half Address Space
+0x1B              Page Global Directory Address         PGD
+0x1C              Page Walk Control for Lower-          PWCL
+                  half Address Space
+0x1D              Page Walk Control for Higher-         PWCH
+                  half Address Space
+0x1E              STLB Page Size                        STLBPS
+0x1F              Reduced Virtual Address Configuration RVACFG
+0x20              CPU Identifier                        CPUID
+0x21              Privileged Resource Configuration 1   PRCFG1
+0x22              Privileged Resource Configuration 2   PRCFG2
+0x23              Privileged Resource Configuration 3   PRCFG3
+0x30+n (0≤n≤15)   Saved Data register                   SAVEn
+0x40              Timer Identifier                      TID
+0x41              Timer Configuration                   TCFG
+0x42              Timer Value                           TVAL
+0x43              Compensation of Timer Count           CNTC
+0x44              Timer Interrupt Clearing              TICLR
+0x60              LLBit Control                         LLBCTL
+0x80              Implementation-specific Control 1     IMPCTL1
+0x81              Implementation-specific Control 2     IMPCTL2
+0x88              TLB Refill Exception Entrypoint       TLBRENTRY
+                  Address
+0x89              TLB Refill Exception BAD (Faulting)   TLBRBADV
+                  Virtual Address
+0x8A              TLB Refill Exception Return Address   TLBRERA
+0x8B              TLB Refill Exception Saved Data       TLBRSAVE
+                  Register
+0x8C              TLB Refill Exception Entry Low-order  TLBRELO0
+                  Bits 0
+0x8D              TLB Refill Exception Entry Low-order  TLBRELO1
+                  Bits 1
+0x8E              TLB Refill Exception Entry High-order TLBEHI
+                  Bits
+0x8F              TLB Refill Exception Pre-exception    TLBRPRMD
+                  Mode Information
+0x90              Machine Error Control                 MERRCTL
+0x91              Machine Error Information 1           MERRINFO1
+0x92              Machine Error Information 2           MERRINFO2
+0x93              Machine Error Exception Entrypoint    MERRENTRY
+                  Address
+0x94              Machine Error Exception Return        MERRERA
+                  Address
+0x95              Machine Error Exception Saved Data    MERRSAVE
+                  Register
+0x98              Cache TAGs                            CTAG
+0x180+n (0≤n≤3)   Direct Mapping Configuration Window n DMWn
+0x200+2n (0≤n≤31) Performance Monitor Configuration n   PMCFGn
+0x201+2n (0≤n≤31) Performance Monitor Overall Counter n PMCNTn
+0x300             Memory Load/Store WatchPoint          MWPC
+                  Overall Control
+0x301             Memory Load/Store WatchPoint          MWPS
+                  Overall Status
+0x310+8n (0≤n≤7)  Memory Load/Store WatchPoint n        MWPnCFG1
+                  Configuration 1
+0x311+8n (0≤n≤7)  Memory Load/Store WatchPoint n        MWPnCFG2
+                  Configuration 2
+0x312+8n (0≤n≤7)  Memory Load/Store WatchPoint n        MWPnCFG3
+                  Configuration 3
+0x313+8n (0≤n≤7)  Memory Load/Store WatchPoint n        MWPnCFG4
+                  Configuration 4
+0x380             Instruction Fetch WatchPoint          FWPC
+                  Overall Control
+0x381             Instruction Fetch WatchPoint          FWPS
+                  Overall Status
+0x390+8n (0≤n≤7)  Instruction Fetch WatchPoint n        FWPnCFG1
+                  Configuration 1
+0x391+8n (0≤n≤7)  Instruction Fetch WatchPoint n        FWPnCFG2
+                  Configuration 2
+0x392+8n (0≤n≤7)  Instruction Fetch WatchPoint n        FWPnCFG3
+                  Configuration 3
+0x393+8n (0≤n≤7)  Instruction Fetch WatchPoint n        FWPnCFG4
+                  Configuration 4
+0x500             Debug Register                        DBG
+0x501             Debug Exception Return Address        DERA
+0x502             Debug Exception Saved Data Register   DSAVE
+================= ===================================== ==============
+
+ERA, TLBRERA, MERRERA and DERA are sometimes also known as EPC, TLBREPC, MERREPC
+and DEPC respectively.
+
+Basic Instruction Set
+=====================
+
+Instruction formats
+-------------------
+
+LoongArch instructions are 32 bits wide, belonging to 9 basic instruction
+formats (and variants of them):
+
+=========== ==========================
+Format name Composition
+=========== ==========================
+2R          Opcode + Rj + Rd
+3R          Opcode + Rk + Rj + Rd
+4R          Opcode + Ra + Rk + Rj + Rd
+2RI8        Opcode + I8 + Rj + Rd
+2RI12       Opcode + I12 + Rj + Rd
+2RI14       Opcode + I14 + Rj + Rd
+2RI16       Opcode + I16 + Rj + Rd
+1RI21       Opcode + I21L + Rj + I21H
+I26         Opcode + I26L + I26H
+=========== ==========================
+
+Rd is the destination register operand, while Rj, Rk and Ra ("a" stands for
+"additional") are the source register operands. I8/I12/I16/I21/I26 are
+immediate operands of respective width. The longer I21 and I26 are stored
+in separate higher and lower parts in the instruction word, denoted by the "L"
+and "H" suffixes.
+
+List of Instructions
+--------------------
+
+For brevity, only instruction names (mnemonics) are listed here; please see the
+:ref:`References <loongarch-references>` for details.
+
+
+1. Arithmetic Instructions::
+
+    ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
+    SLT SLTU SLTI SLTUI
+    AND OR NOR XOR ANDN ORN ANDI ORI XORI
+    MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
+    MUL.D MULH.D MULH.DU DIV.D DIV.DU MOD.D MOD.DU
+    PCADDI PCADDU12I PCADDU18I
+    LU12I.W LU32I.D LU52I.D ADDU16I.D
+
+2. Bit-shift Instructions::
+
+    SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
+    SLL.D SRL.D SRA.D ROTR.D SLLI.D SRLI.D SRAI.D ROTRI.D
+
+3. Bit-manipulation Instructions::
+
+    EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
+    BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
+    REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
+    MASKEQZ MASKNEZ
+
+4. Branch Instructions::
+
+    BEQ BNE BLT BGE BLTU BGEU BEQZ BNEZ B BL JIRL
+
+5. Load/Store Instructions::
+
+    LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
+    LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D
+    LDPTR.W LDPTR.D STPTR.W STPTR.D
+    PRELD PRELDX
+
+6. Atomic Operation Instructions::
+
+    LL.W SC.W LL.D SC.D
+    AMSWAP.W AMSWAP.D AMADD.W AMADD.D AMAND.W AMAND.D AMOR.W AMOR.D AMXOR.W AMXOR.D
+    AMMAX.W AMMAX.D AMMIN.W AMMIN.D
+
+7. Barrier Instructions::
+
+    IBAR DBAR
+
+8. Special Instructions::
+
+    SYSCALL BREAK CPUCFG NOP IDLE ERTN(ERET) DBCL(DBGCALL) RDTIMEL.W RDTIMEH.W RDTIME.D
+    ASRTLE.D ASRTGT.D
+
+9. Privileged Instructions::
+
+    CSRRD CSRWR CSRXCHG
+    IOCSRRD.B IOCSRRD.H IOCSRRD.W IOCSRRD.D IOCSRWR.B IOCSRWR.H IOCSRWR.W IOCSRWR.D
+    CACOP TLBP(TLBSRCH) TLBRD TLBWR TLBFILL TLBCLR TLBFLUSH INVTLB LDDIR LDPTE
+
+Virtual Memory
+==============
+
+LoongArch supports direct-mapped virtual memory and page-mapped virtual memory.
+
+Direct-mapped virtual memory is configured by CSR.DMWn (n=0~3), it has a simple
+relationship between virtual address (VA) and physical address (PA)::
+
+ VA = PA + FixedOffset
+
+Page-mapped virtual memory has arbitrary relationship between VA and PA, which
+is recorded in TLB and page tables. LoongArch's TLB includes a fully-associative
+MTLB (Multiple Page Size TLB) and set-associative STLB (Single Page Size TLB).
+
+By default, the whole virtual address space of LA32 is configured like this:
+
+============ =========================== =============================
+Name         Address Range               Attributes
+============ =========================== =============================
+``UVRANGE``  ``0x00000000 - 0x7FFFFFFF`` Page-mapped, Cached, PLV0~3
+``KPRANGE0`` ``0x80000000 - 0x9FFFFFFF`` Direct-mapped, Uncached, PLV0
+``KPRANGE1`` ``0xA0000000 - 0xBFFFFFFF`` Direct-mapped, Cached, PLV0
+``KVRANGE``  ``0xC0000000 - 0xFFFFFFFF`` Page-mapped, Cached, PLV0
+============ =========================== =============================
+
+User mode (PLV3) can only access UVRANGE. For direct-mapped KPRANGE0 and
+KPRANGE1, PA is equal to VA with bit30~31 cleared. For example, the uncached
+direct-mapped VA of 0x00001000 is 0x80001000, and the cached direct-mapped
+VA of 0x00001000 is 0xA0001000.
+
+By default, the whole virtual address space of LA64 is configured like this:
+
+============ ====================== ======================================
+Name         Address Range          Attributes
+============ ====================== ======================================
+``XUVRANGE`` ``0x0000000000000000 - Page-mapped, Cached, PLV0~3
+             0x3FFFFFFFFFFFFFFF``
+``XSPRANGE`` ``0x4000000000000000 - Direct-mapped, Cached / Uncached, PLV0
+             0x7FFFFFFFFFFFFFFF``
+``XKPRANGE`` ``0x8000000000000000 - Direct-mapped, Cached / Uncached, PLV0
+             0xBFFFFFFFFFFFFFFF``
+``XKVRANGE`` ``0xC000000000000000 - Page-mapped, Cached, PLV0
+             0xFFFFFFFFFFFFFFFF``
+============ ====================== ======================================
+
+User mode (PLV3) can only access XUVRANGE. For direct-mapped XSPRANGE and
+XKPRANGE, PA is equal to VA with bits 60~63 cleared, and the cache attribute
+is configured by bits 60~61 in VA: 0 is for strongly-ordered uncached, 1 is
+for coherent cached, and 2 is for weakly-ordered uncached.
+
+Currently we only use XKPRANGE for direct mapping and XSPRANGE is reserved.
+
+To put this in action: the strongly-ordered uncached direct-mapped VA (in
+XKPRANGE) of 0x00000000_00001000 is 0x80000000_00001000, the coherent cached
+direct-mapped VA (in XKPRANGE) of 0x00000000_00001000 is 0x90000000_00001000,
+and the weakly-ordered uncached direct-mapped VA (in XKPRANGE) of 0x00000000
+_00001000 is 0xA0000000_00001000.
+
+Relationship of Loongson and LoongArch
+======================================
+
+LoongArch is a RISC ISA which is different from any other existing ones, while
+Loongson is a family of processors. Loongson includes 3 series: Loongson-1 is
+the 32-bit processor series, Loongson-2 is the low-end 64-bit processor series,
+and Loongson-3 is the high-end 64-bit processor series. Old Loongson is based on
+MIPS, while New Loongson is based on LoongArch. Take Loongson-3 as an example:
+Loongson-3A1000/3B1500/3A2000/3A3000/3A4000 are MIPS-compatible, while Loongson-
+3A5000 (and future revisions) are all based on LoongArch.
+
+.. _loongarch-references:
+
+References
+==========
+
+Official web site of Loongson Technology Corp. Ltd.:
+
+  http://www.loongson.cn/
+
+Developer web site of Loongson and LoongArch (Software and Documentation):
+
+  http://www.loongnix.cn/
+
+  https://github.com/loongson/
+
+  https://loongson.github.io/LoongArch-Documentation/
+
+Documentation of LoongArch ISA:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-CN.pdf (in Chinese)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-EN.pdf (in English)
+
+Documentation of LoongArch ELF psABI:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-CN.pdf (in Chinese)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-EN.pdf (in English)
+
+Linux kernel repository of Loongson and LoongArch:
+
+  https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git
diff --git a/Documentation/loongarch/irq-chip-model.rst b/Documentation/loongarch/irq-chip-model.rst
new file mode 100644 (file)
index 0000000..8d88f7a
--- /dev/null
@@ -0,0 +1,156 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+IRQ chip model (hierarchy) of LoongArch
+=======================================
+
+Currently, LoongArch based processors (e.g. Loongson-3A5000) can only work together
+with LS7A chipsets. The irq chips in LoongArch computers include CPUINTC (CPU Core
+Interrupt Controller), LIOINTC (Legacy I/O Interrupt Controller), EIOINTC (Extended
+I/O Interrupt Controller), HTVECINTC (Hyper-Transport Vector Interrupt Controller),
+PCH-PIC (Main Interrupt Controller in LS7A chipset), PCH-LPC (LPC Interrupt Controller
+in LS7A chipset) and PCH-MSI (MSI Interrupt Controller).
+
+CPUINTC is a per-core controller (in CPU), LIOINTC/EIOINTC/HTVECINTC are per-package
+controllers (in CPU), while PCH-PIC/PCH-LPC/PCH-MSI are controllers out of CPU (i.e.,
+in chipsets). These controllers (in other words, irqchips) are linked in a hierarchy,
+and there are two models of hierarchy (legacy model and extended model).
+
+Legacy IRQ model
+================
+
+In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go
+to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, while all other devices
+interrupts go to PCH-PIC/PCH-LPC/PCH-MSI and gathered by HTVECINTC, and then go
+to LIOINTC, and then CPUINTC::
+
+     +-----+     +---------+     +-------+
+     | IPI | --> | CPUINTC | <-- | Timer |
+     +-----+     +---------+     +-------+
+                      ^
+                      |
+                 +---------+     +-------+
+                 | LIOINTC | <-- | UARTs |
+                 +---------+     +-------+
+                      ^
+                      |
+                +-----------+
+                | HTVECINTC |
+                +-----------+
+                 ^         ^
+                 |         |
+           +---------+ +---------+
+           | PCH-PIC | | PCH-MSI |
+           +---------+ +---------+
+             ^     ^           ^
+             |     |           |
+     +---------+ +---------+ +---------+
+     | PCH-LPC | | Devices | | Devices |
+     +---------+ +---------+ +---------+
+          ^
+          |
+     +---------+
+     | Devices |
+     +---------+
+
+Extended IRQ model
+==================
+
+In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go
+to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, while all other devices
+interrupts go to PCH-PIC/PCH-LPC/PCH-MSI and gathered by EIOINTC, and then go to
+to CPUINTC directly::
+
+          +-----+     +---------+     +-------+
+          | IPI | --> | CPUINTC | <-- | Timer |
+          +-----+     +---------+     +-------+
+                       ^       ^
+                       |       |
+                +---------+ +---------+     +-------+
+                | EIOINTC | | LIOINTC | <-- | UARTs |
+                +---------+ +---------+     +-------+
+                 ^       ^
+                 |       |
+          +---------+ +---------+
+          | PCH-PIC | | PCH-MSI |
+          +---------+ +---------+
+            ^     ^           ^
+            |     |           |
+    +---------+ +---------+ +---------+
+    | PCH-LPC | | Devices | | Devices |
+    +---------+ +---------+ +---------+
+         ^
+         |
+    +---------+
+    | Devices |
+    +---------+
+
+ACPI-related definitions
+========================
+
+CPUINTC::
+
+  ACPI_MADT_TYPE_CORE_PIC;
+  struct acpi_madt_core_pic;
+  enum acpi_madt_core_pic_version;
+
+LIOINTC::
+
+  ACPI_MADT_TYPE_LIO_PIC;
+  struct acpi_madt_lio_pic;
+  enum acpi_madt_lio_pic_version;
+
+EIOINTC::
+
+  ACPI_MADT_TYPE_EIO_PIC;
+  struct acpi_madt_eio_pic;
+  enum acpi_madt_eio_pic_version;
+
+HTVECINTC::
+
+  ACPI_MADT_TYPE_HT_PIC;
+  struct acpi_madt_ht_pic;
+  enum acpi_madt_ht_pic_version;
+
+PCH-PIC::
+
+  ACPI_MADT_TYPE_BIO_PIC;
+  struct acpi_madt_bio_pic;
+  enum acpi_madt_bio_pic_version;
+
+PCH-MSI::
+
+  ACPI_MADT_TYPE_MSI_PIC;
+  struct acpi_madt_msi_pic;
+  enum acpi_madt_msi_pic_version;
+
+PCH-LPC::
+
+  ACPI_MADT_TYPE_LPC_PIC;
+  struct acpi_madt_lpc_pic;
+  enum acpi_madt_lpc_pic_version;
+
+References
+==========
+
+Documentation of Loongson-3A5000:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-CN.pdf (in Chinese)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-EN.pdf (in English)
+
+Documentation of Loongson's LS7A chipset:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-CN.pdf (in Chinese)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (in English)
+
+Note: CPUINTC is CSR.ECFG/CSR.ESTAT and its interrupt controller described
+in Section 7.4 of "LoongArch Reference Manual, Vol 1"; LIOINTC is "Legacy I/O
+Interrupts" described in Section 11.1 of "Loongson 3A5000 Processor Reference
+Manual"; EIOINTC is "Extended I/O Interrupts" described in Section 11.2 of
+"Loongson 3A5000 Processor Reference Manual"; HTVECINTC is "HyperTransport
+Interrupts" described in Section 14.3 of "Loongson 3A5000 Processor Reference
+Manual"; PCH-PIC/PCH-MSI is "Interrupt Controller" described in Section 5 of
+"Loongson 7A1000 Bridge User Manual"; PCH-LPC is "LPC Interrupts" described in
+Section 24.3 of "Loongson 7A1000 Bridge User Manual".
index 30ac58f..756be15 100644 (file)
@@ -25,6 +25,7 @@ fit into other categories.
    isl29003
    lis3lv02d
    max6875
+   oxsemi-tornado
    pci-endpoint-test
    spear-pcie-gadget
    uacce
diff --git a/Documentation/misc-devices/oxsemi-tornado.rst b/Documentation/misc-devices/oxsemi-tornado.rst
new file mode 100644 (file)
index 0000000..b33351b
--- /dev/null
@@ -0,0 +1,131 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================================================================
+Notes on Oxford Semiconductor PCIe (Tornado) 950 serial port devices
+====================================================================
+
+Oxford Semiconductor PCIe (Tornado) 950 serial port devices are driven
+by a fixed 62.5MHz clock input derived from the 100MHz PCI Express clock.
+
+The baud rate produced by the baud generator is obtained from this input
+frequency by dividing it by the clock prescaler, which can be set to any
+value from 1 to 63.875 in increments of 0.125, and then the usual 16-bit
+divisor is used as with the original 8250, to divide the frequency by a
+value from 1 to 65535.  Finally a programmable oversampling rate is used
+that can take any value from 4 to 16 to divide the frequency further and
+determine the actual baud rate used.  Baud rates from 15625000bps down
+to 0.933bps can be obtained this way.
+
+By default the oversampling rate is set to 16 and the clock prescaler is
+set to 33.875, meaning that the frequency to be used as the reference
+for the usual 16-bit divisor is 115313.653, which is close enough to the
+frequency of 115200 used by the original 8250 for the same values to be
+used for the divisor to obtain the requested baud rates by software that
+is unaware of the extra clock controls available.
+
+The oversampling rate is programmed with the TCR register and the clock
+prescaler is programmed with the CPR/CPR2 register pair [OX200]_ [OX952]_
+[OX954]_ [OX958]_.  To switch away from the default value of 33.875 for
+the prescaler the enhanced mode has to be explicitly enabled though, by
+setting bit 4 of the EFR.  In that mode setting bit 7 in the MCR enables
+the prescaler or otherwise it is bypassed as if the value of 1 was used.
+Additionally writing any value to CPR clears CPR2 for compatibility with
+old software written for older conventional PCI Oxford Semiconductor
+devices that do not have the extra prescaler's 9th bit in CPR2, so the
+CPR/CPR2 register pair has to be programmed in the right order.
+
+By using these parameters rates from 15625000bps down to 1bps can be
+obtained, with either exact or highly-accurate actual bit rates for
+standard and many non-standard rates.
+
+Here are the figures for the standard and some non-standard baud rates
+(including those quoted in Oxford Semiconductor documentation), giving
+the requested rate (r), the actual rate yielded (a) and its deviation
+from the requested rate (d), and the values of the oversampling rate
+(tcr), the clock prescaler (cpr) and the divisor (div) produced by the
+new ``get_divisor`` handler:
+
+::
+
+ r: 15625000, a: 15625000.00, d:  0.0000%, tcr:  4, cpr:  1.000, div:     1
+ r: 12500000, a: 12500000.00, d:  0.0000%, tcr:  5, cpr:  1.000, div:     1
+ r: 10416666, a: 10416666.67, d:  0.0000%, tcr:  6, cpr:  1.000, div:     1
+ r:  8928571, a:  8928571.43, d:  0.0000%, tcr:  7, cpr:  1.000, div:     1
+ r:  7812500, a:  7812500.00, d:  0.0000%, tcr:  8, cpr:  1.000, div:     1
+ r:  4000000, a:  4000000.00, d:  0.0000%, tcr:  5, cpr:  3.125, div:     1
+ r:  3686400, a:  3676470.59, d: -0.2694%, tcr:  8, cpr:  2.125, div:     1
+ r:  3500000, a:  3496503.50, d: -0.0999%, tcr: 13, cpr:  1.375, div:     1
+ r:  3000000, a:  2976190.48, d: -0.7937%, tcr: 14, cpr:  1.500, div:     1
+ r:  2500000, a:  2500000.00, d:  0.0000%, tcr: 10, cpr:  2.500, div:     1
+ r:  2000000, a:  2000000.00, d:  0.0000%, tcr: 10, cpr:  3.125, div:     1
+ r:  1843200, a:  1838235.29, d: -0.2694%, tcr: 16, cpr:  2.125, div:     1
+ r:  1500000, a:  1492537.31, d: -0.4975%, tcr:  5, cpr:  8.375, div:     1
+ r:  1152000, a:  1152073.73, d:  0.0064%, tcr: 14, cpr:  3.875, div:     1
+ r:   921600, a:   919117.65, d: -0.2694%, tcr: 16, cpr:  2.125, div:     2
+ r:   576000, a:   576036.87, d:  0.0064%, tcr: 14, cpr:  3.875, div:     2
+ r:   460800, a:   460829.49, d:  0.0064%, tcr:  7, cpr:  3.875, div:     5
+ r:   230400, a:   230414.75, d:  0.0064%, tcr: 14, cpr:  3.875, div:     5
+ r:   115200, a:   115207.37, d:  0.0064%, tcr: 14, cpr:  1.250, div:    31
+ r:    57600, a:    57603.69, d:  0.0064%, tcr:  8, cpr:  3.875, div:    35
+ r:    38400, a:    38402.46, d:  0.0064%, tcr: 14, cpr:  3.875, div:    30
+ r:    19200, a:    19201.23, d:  0.0064%, tcr:  8, cpr:  3.875, div:   105
+ r:     9600, a:     9600.06, d:  0.0006%, tcr:  9, cpr:  1.125, div:   643
+ r:     4800, a:     4799.98, d: -0.0004%, tcr:  7, cpr:  2.875, div:   647
+ r:     2400, a:     2400.02, d:  0.0008%, tcr:  9, cpr:  2.250, div:  1286
+ r:     1200, a:     1200.00, d:  0.0000%, tcr: 14, cpr:  2.875, div:  1294
+ r:      300, a:      300.00, d:  0.0000%, tcr: 11, cpr:  2.625, div:  7215
+ r:      200, a:      200.00, d:  0.0000%, tcr: 16, cpr:  1.250, div: 15625
+ r:      150, a:      150.00, d:  0.0000%, tcr: 13, cpr:  2.250, div: 14245
+ r:      134, a:      134.00, d:  0.0000%, tcr: 11, cpr:  2.625, div: 16153
+ r:      110, a:      110.00, d:  0.0000%, tcr: 12, cpr:  1.000, div: 47348
+ r:       75, a:       75.00, d:  0.0000%, tcr:  4, cpr:  5.875, div: 35461
+ r:       50, a:       50.00, d:  0.0000%, tcr: 16, cpr:  1.250, div: 62500
+ r:       25, a:       25.00, d:  0.0000%, tcr: 16, cpr:  2.500, div: 62500
+ r:        4, a:        4.00, d:  0.0000%, tcr: 16, cpr: 20.000, div: 48828
+ r:        2, a:        2.00, d:  0.0000%, tcr: 16, cpr: 40.000, div: 48828
+ r:        1, a:        1.00, d:  0.0000%, tcr: 16, cpr: 63.875, div: 61154
+
+With the baud base set to 15625000 and the unsigned 16-bit UART_DIV_MAX
+limitation imposed by ``serial8250_get_baud_rate`` standard baud rates
+below 300bps become unavailable in the regular way, e.g. the rate of
+200bps requires the baud base to be divided by 78125 and that is beyond
+the unsigned 16-bit range.  The historic spd_cust feature can still be
+used by encoding the values for, the prescaler, the oversampling rate
+and the clock divisor (DLM/DLL) as follows to obtain such rates if so
+required:
+
+::
+
+  31 29 28             20 19   16 15                            0
+ +-----+-----------------+-------+-------------------------------+
+ |0 0 0|    CPR2:CPR     |  TCR  |            DLM:DLL            |
+ +-----+-----------------+-------+-------------------------------+
+
+Use a value such encoded for the ``custom_divisor`` field along with the
+ASYNC_SPD_CUST flag set in the ``flags`` field in ``struct serial_struct``
+passed with the TIOCSSERIAL ioctl(2), such as with the setserial(8)
+utility and its ``divisor`` and ``spd_cust`` parameters, and then select
+the baud rate of 38400bps.  Note that the value of 0 in TCR sets the
+oversampling rate to 16 and prescaler values below 1 in CPR2/CPR are
+clamped by the driver to 1.
+
+For example the value of 0x1f4004e2 will set CPR2/CPR, TCR and DLM/DLL
+respectively to 0x1f4, 0x0 and 0x04e2, choosing the prescaler value,
+the oversampling rate and the clock divisor of 62.500, 16 and 1250
+respectively.  These parameters will set the baud rate for the serial
+port to 62500000 / 62.500 / 1250 / 16 = 50bps.
+
+Maciej W. Rozycki  <macro@orcam.me.uk>
+
+.. [OX200] "OXPCIe200 PCI Express Multi-Port Bridge", Oxford Semiconductor,
+   Inc., DS-0045, 10 Nov 2008, Section "950 Mode", pp. 64-65
+
+.. [OX952] "OXPCIe952 PCI Express Bridge to Dual Serial & Parallel Port",
+   Oxford Semiconductor, Inc., DS-0046, Mar 06 08, Section "950 Mode",
+   p. 20
+
+.. [OX954] "OXPCIe954 PCI Express Bridge to Quad Serial Port", Oxford
+   Semiconductor, Inc., DS-0047, Feb 08, Section "950 Mode", p. 20
+
+.. [OX958] "OXPCIe958 PCI Express Bridge to Octal Serial Port", Oxford
+   Semiconductor, Inc., DS-0048, Feb 08, Section "950 Mode", p. 20
index b882d42..0421656 100644 (file)
@@ -2474,21 +2474,16 @@ drop_unsolicited_na - BOOLEAN
 
        By default this is turned off.
 
-accept_unsolicited_na - BOOLEAN
-       Add a new neighbour cache entry in STALE state for routers on receiving an
-       unsolicited neighbour advertisement with target link-layer address option
-       specified. This is as per router-side behavior documented in RFC9131.
-       This has lower precedence than drop_unsolicited_na.
+accept_untracked_na - BOOLEAN
+       Add a new neighbour cache entry in STALE state for routers on receiving a
+       neighbour advertisement (either solicited or unsolicited) with target
+       link-layer address option specified if no neighbour entry is already
+       present for the advertised IPv6 address. Without this knob, NAs received
+       for untracked addresses (absent in neighbour cache) are silently ignored.
+
+       This is as per router-side behaviour documented in RFC9131.
 
-        ====   ======  ======  ==============================================
-        drop   accept  fwding                   behaviour
-        ----   ------  ------  ----------------------------------------------
-           1        X       X  Drop NA packet and don't pass up the stack
-           0        0       X  Pass NA packet up the stack, don't update NC
-           0        1       0  Pass NA packet up the stack, don't update NC
-           0        1       1  Pass NA packet up the stack, and add a STALE
-                               NC entry
-        ====   ======  ======  ==============================================
+       This has lower precedence than drop_unsolicited_na.
 
        This will optimize the return path for the initial off-link communication
        that is initiated by a directly connected host, by ensuring that
index 1bd687b..5b36e45 100644 (file)
@@ -61,3 +61,39 @@ RISC-V Linux Kernel SV39
    ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | modules, BPF
    ffffffff80000000 |   -2    GB | ffffffffffffffff |    2 GB | kernel
   __________________|____________|__________________|_________|____________________________________________________________
+
+
+RISC-V Linux Kernel SV48
+------------------------
+
+::
+
+ ========================================================================================================================
+      Start addr    |   Offset   |     End addr     |  Size   | VM area description
+ ========================================================================================================================
+                    |            |                  |         |
+   0000000000000000 |    0       | 00007fffffffffff |  128 TB | user-space virtual memory, different per mm
+  __________________|____________|__________________|_________|___________________________________________________________
+                    |            |                  |         |
+   0000800000000000 | +128    TB | ffff7fffffffffff | ~16M TB | ... huge, almost 64 bits wide hole of non-canonical
+                    |            |                  |         | virtual memory addresses up to the -128 TB
+                    |            |                  |         | starting offset of kernel mappings.
+  __________________|____________|__________________|_________|___________________________________________________________
+                                                              |
+                                                              | Kernel-space virtual memory, shared between all processes:
+  ____________________________________________________________|___________________________________________________________
+                    |            |                  |         |
+   ffff8d7ffee00000 |  -114.5 TB | ffff8d7ffeffffff |    2 MB | fixmap
+   ffff8d7fff000000 |  -114.5 TB | ffff8d7fffffffff |   16 MB | PCI io
+   ffff8d8000000000 |  -114.5 TB | ffff8f7fffffffff |    2 TB | vmemmap
+   ffff8f8000000000 |  -112.5 TB | ffffaf7fffffffff |   32 TB | vmalloc/ioremap space
+   ffffaf8000000000 |  -80.5  TB | ffffef7fffffffff |   64 TB | direct mapping of all physical memory
+   ffffef8000000000 |  -16.5  TB | fffffffeffffffff | 16.5 TB | kasan
+  __________________|____________|__________________|_________|____________________________________________________________
+                                                              |
+                                                              | Identical layout to the 39-bit one from here on:
+  ____________________________________________________________|____________________________________________________________
+                    |            |                  |         |
+   ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | modules, BPF
+   ffffffff80000000 |   -2    GB | ffffffffffffffff |    2 GB | kernel
+  __________________|____________|__________________|_________|____________________________________________________________
index ac32d8e..ad7bb8c 100644 (file)
@@ -171,6 +171,7 @@ TODOList:
    riscv/index
    openrisc/index
    parisc/index
+   loongarch/index
 
 TODOList:
 
diff --git a/Documentation/translations/zh_CN/loongarch/features.rst b/Documentation/translations/zh_CN/loongarch/features.rst
new file mode 100644 (file)
index 0000000..3886e63
--- /dev/null
@@ -0,0 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/features.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+.. kernel-feat:: $srctree/Documentation/features loongarch
diff --git a/Documentation/translations/zh_CN/loongarch/index.rst b/Documentation/translations/zh_CN/loongarch/index.rst
new file mode 100644 (file)
index 0000000..7d23eb7
--- /dev/null
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/index.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+=================
+LoongArch体系结构
+=================
+
+.. toctree::
+   :maxdepth: 2
+   :numbered:
+
+   introduction
+   irq-chip-model
+
+   features
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/loongarch/introduction.rst b/Documentation/translations/zh_CN/loongarch/introduction.rst
new file mode 100644 (file)
index 0000000..e31a1a9
--- /dev/null
@@ -0,0 +1,351 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/introduction.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+=============
+LoongArch介绍
+=============
+
+LoongArch是一种新的RISC ISA,在一定程度上类似于MIPS和RISC-V。LoongArch指令集
+包括一个精简32位版(LA32R)、一个标准32位版(LA32S)、一个64位版(LA64)。
+LoongArch定义了四个特权级(PLV0~PLV3),其中PLV0是最高特权级,用于内核;而PLV3
+是最低特权级,用于应用程序。本文档介绍了LoongArch的寄存器、基础指令集、虚拟内
+存以及其他一些主题。
+
+寄存器
+======
+
+LoongArch的寄存器包括通用寄存器(GPRs)、浮点寄存器(FPRs)、向量寄存器(VRs)
+和用于特权模式(PLV0)的控制状态寄存器(CSRs)。
+
+通用寄存器
+----------
+
+LoongArch包括32个通用寄存器( ``$r0`` ~ ``$r31`` ),LA32中每个寄存器为32位宽,
+LA64中每个寄存器为64位宽。 ``$r0`` 的内容总是固定为0,而其他寄存器在体系结构层面
+没有特殊功能。( ``$r1`` 算是一个例外,在BL指令中固定用作链接返回寄存器。)
+
+内核使用了一套LoongArch寄存器约定,定义在LoongArch ELF psABI规范中,详细描述参见
+:ref:`参考文献 <loongarch-references-zh_CN>`:
+
+================= =============== =================== ==========
+寄存器名          别名            用途                跨调用保持
+================= =============== =================== ==========
+``$r0``           ``$zero``       常量0               不使用
+``$r1``           ``$ra``         返回地址            否
+``$r2``           ``$tp``         TLS/线程信息指针    不使用
+``$r3``           ``$sp``         栈指针              是
+``$r4``-``$r11``  ``$a0``-``$a7`` 参数寄存器          否
+``$r4``-``$r5``   ``$v0``-``$v1`` 返回值              否
+``$r12``-``$r20`` ``$t0``-``$t8`` 临时寄存器          否
+``$r21``          ``$u0``         每CPU变量基地址     不使用
+``$r22``          ``$fp``         帧指针              是
+``$r23``-``$r31`` ``$s0``-``$s8`` 静态寄存器          是
+================= =============== =================== ==========
+
+注意:``$r21``寄存器在ELF psABI中保留未使用,但是在Linux内核用于保存每CPU
+变量基地址。该寄存器没有ABI命名,不过在内核中称为``$u0``。在一些遗留代码
+中有时可能见到``$v0``和``$v1``,它们是``$a0``和``$a1``的别名,属于已经废弃
+的用法。
+
+浮点寄存器
+----------
+
+当系统中存在FPU时,LoongArch有32个浮点寄存器( ``$f0`` ~ ``$f31`` )。在LA64
+的CPU核上,每个寄存器均为64位宽。
+
+浮点寄存器的使用约定与LoongArch ELF psABI规范的描述相同:
+
+================= ================== =================== ==========
+寄存器名          别名               用途                跨调用保持
+================= ================== =================== ==========
+``$f0``-``$f7``   ``$fa0``-``$fa7``  参数寄存器          否
+``$f0``-``$f1``   ``$fv0``-``$fv1``  返回值              否
+``$f8``-``$f23``  ``$ft0``-``$ft15`` 临时寄存器          否
+``$f24``-``$f31`` ``$fs0``-``$fs7``  静态寄存器          是
+================= ================== =================== ==========
+
+注意:在一些遗留代码中有时可能见到 ``$v0`` 和 ``$v1`` ,它们是 ``$a0``
+和 ``$a1`` 的别名,属于已经废弃的用法。
+
+
+向量寄存器
+----------
+
+LoongArch现有两种向量扩展:
+
+- 128位向量扩展LSX(全称Loongson SIMD eXtention),
+- 256位向量扩展LASX(全称Loongson Advanced SIMD eXtention)。
+
+LSX使用 ``$v0`` ~ ``$v31`` 向量寄存器,而LASX则使用 ``$x0`` ~ ``$x31`` 。
+
+浮点寄存器和向量寄存器是复用的,比如:在一个实现了LSX和LASX的核上, ``$x0`` 的
+低128位与 ``$v0`` 共用, ``$v0`` 的低64位与 ``$f0`` 共用,其他寄存器依此类推。
+
+控制状态寄存器
+--------------
+
+控制状态寄存器只能在特权模式(PLV0)下访问:
+
+================= ==================================== ==========
+地址              全称描述                             简称
+================= ==================================== ==========
+0x0               当前模式信息                         CRMD
+0x1               异常前模式信息                       PRMD
+0x2               扩展部件使能                         EUEN
+0x3               杂项控制                             MISC
+0x4               异常配置                             ECFG
+0x5               异常状态                             ESTAT
+0x6               异常返回地址                         ERA
+0x7               出错(Faulting)虚拟地址               BADV
+0x8               出错(Faulting)指令字                 BADI
+0xC               异常入口地址                         EENTRY
+0x10              TLB索引                              TLBIDX
+0x11              TLB表项高位                          TLBEHI
+0x12              TLB表项低位0                         TLBELO0
+0x13              TLB表项低位1                         TLBELO1
+0x18              地址空间标识符                       ASID
+0x19              低半地址空间页全局目录基址           PGDL
+0x1A              高半地址空间页全局目录基址           PGDH
+0x1B              页全局目录基址                       PGD
+0x1C              页表遍历控制低半部分                 PWCL
+0x1D              页表遍历控制高半部分                 PWCH
+0x1E              STLB页大小                           STLBPS
+0x1F              缩减虚地址配置                       RVACFG
+0x20              CPU编号                              CPUID
+0x21              特权资源配置信息1                    PRCFG1
+0x22              特权资源配置信息2                    PRCFG2
+0x23              特权资源配置信息3                    PRCFG3
+0x30+n (0≤n≤15)   数据保存寄存器                       SAVEn
+0x40              定时器编号                           TID
+0x41              定时器配置                           TCFG
+0x42              定时器值                             TVAL
+0x43              计时器补偿                           CNTC
+0x44              定时器中断清除                       TICLR
+0x60              LLBit相关控制                        LLBCTL
+0x80              实现相关控制1                        IMPCTL1
+0x81              实现相关控制2                        IMPCTL2
+0x88              TLB重填异常入口地址                  TLBRENTRY
+0x89              TLB重填异常出错(Faulting)虚地址      TLBRBADV
+0x8A              TLB重填异常返回地址                  TLBRERA
+0x8B              TLB重填异常数据保存                  TLBRSAVE
+0x8C              TLB重填异常表项低位0                 TLBRELO0
+0x8D              TLB重填异常表项低位1                 TLBRELO1
+0x8E              TLB重填异常表项高位                  TLBEHI
+0x8F              TLB重填异常前模式信息                TLBRPRMD
+0x90              机器错误控制                         MERRCTL
+0x91              机器错误信息1                        MERRINFO1
+0x92              机器错误信息2                        MERRINFO2
+0x93              机器错误异常入口地址                 MERRENTRY
+0x94              机器错误异常返回地址                 MERRERA
+0x95              机器错误异常数据保存                 MERRSAVE
+0x98              高速缓存标签                         CTAG
+0x180+n (0≤n≤3)   直接映射配置窗口n                    DMWn
+0x200+2n (0≤n≤31) 性能监测配置n                        PMCFGn
+0x201+2n (0≤n≤31) 性能监测计数器n                      PMCNTn
+0x300             内存读写监视点整体控制               MWPC
+0x301             内存读写监视点整体状态               MWPS
+0x310+8n (0≤n≤7)  内存读写监视点n配置1                 MWPnCFG1
+0x311+8n (0≤n≤7)  内存读写监视点n配置2                 MWPnCFG2
+0x312+8n (0≤n≤7)  内存读写监视点n配置3                 MWPnCFG3
+0x313+8n (0≤n≤7)  内存读写监视点n配置4                 MWPnCFG4
+0x380             取指监视点整体控制                   FWPC
+0x381             取指监视点整体状态                   FWPS
+0x390+8n (0≤n≤7)  取指监视点n配置1                     FWPnCFG1
+0x391+8n (0≤n≤7)  取指监视点n配置2                     FWPnCFG2
+0x392+8n (0≤n≤7)  取指监视点n配置3                     FWPnCFG3
+0x393+8n (0≤n≤7)  取指监视点n配置4                     FWPnCFG4
+0x500             调试寄存器                           DBG
+0x501             调试异常返回地址                     DERA
+0x502             调试数据保存                         DSAVE
+================= ==================================== ==========
+
+ERA,TLBRERA,MERRERA和DERA有时也分别称为EPC,TLBREPC,MERREPC和DEPC。
+
+基础指令集
+==========
+
+指令格式
+--------
+
+LoongArch的指令字长为32位,一共有9种基本指令格式(以及一些变体):
+
+=========== ==========================
+格式名称    指令构成
+=========== ==========================
+2R          Opcode + Rj + Rd
+3R          Opcode + Rk + Rj + Rd
+4R          Opcode + Ra + Rk + Rj + Rd
+2RI8        Opcode + I8 + Rj + Rd
+2RI12       Opcode + I12 + Rj + Rd
+2RI14       Opcode + I14 + Rj + Rd
+2RI16       Opcode + I16 + Rj + Rd
+1RI21       Opcode + I21L + Rj + I21H
+I26         Opcode + I26L + I26H
+=========== ==========================
+
+Opcode是指令操作码,Rj和Rk是源操作数(寄存器),Rd是目标操作数(寄存器),Ra是
+4R-type格式特有的附加操作数(寄存器)。I8/I12/I16/I21/I26分别是8位/12位/16位/
+21位/26位的立即数。其中较长的21位和26位立即数在指令字中被分割为高位部分与低位
+部分,所以你们在这里的格式描述中能够看到I21L/I21H和I26L/I26H这样带后缀的表述。
+
+指令列表
+--------
+
+为了简便起见,我们在此只罗列一下指令名称(助记符),需要详细信息请阅读
+:ref:`参考文献 <loongarch-references-zh_CN>` 中的文档。
+
+1. 算术运算指令::
+
+    ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
+    SLT SLTU SLTI SLTUI
+    AND OR NOR XOR ANDN ORN ANDI ORI XORI
+    MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
+    MUL.D MULH.D MULH.DU DIV.D DIV.DU MOD.D MOD.DU
+    PCADDI PCADDU12I PCADDU18I
+    LU12I.W LU32I.D LU52I.D ADDU16I.D
+
+2. 移位运算指令::
+
+    SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
+    SLL.D SRL.D SRA.D ROTR.D SLLI.D SRLI.D SRAI.D ROTRI.D
+
+3. 位域操作指令::
+
+    EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
+    BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
+    REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
+    MASKEQZ MASKNEZ
+
+4. 分支转移指令::
+
+    BEQ BNE BLT BGE BLTU BGEU BEQZ BNEZ B BL JIRL
+
+5. 访存读写指令::
+
+    LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
+    LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D
+    LDPTR.W LDPTR.D STPTR.W STPTR.D
+    PRELD PRELDX
+
+6. 原子操作指令::
+
+    LL.W SC.W LL.D SC.D
+    AMSWAP.W AMSWAP.D AMADD.W AMADD.D AMAND.W AMAND.D AMOR.W AMOR.D AMXOR.W AMXOR.D
+    AMMAX.W AMMAX.D AMMIN.W AMMIN.D
+
+7. 栅障指令::
+
+    IBAR DBAR
+
+8. 特殊指令::
+
+    SYSCALL BREAK CPUCFG NOP IDLE ERTN(ERET) DBCL(DBGCALL) RDTIMEL.W RDTIMEH.W RDTIME.D
+    ASRTLE.D ASRTGT.D
+
+9. 特权指令::
+
+    CSRRD CSRWR CSRXCHG
+    IOCSRRD.B IOCSRRD.H IOCSRRD.W IOCSRRD.D IOCSRWR.B IOCSRWR.H IOCSRWR.W IOCSRWR.D
+    CACOP TLBP(TLBSRCH) TLBRD TLBWR TLBFILL TLBCLR TLBFLUSH INVTLB LDDIR LDPTE
+
+虚拟内存
+========
+
+LoongArch可以使用直接映射虚拟内存和分页映射虚拟内存。
+
+直接映射虚拟内存通过CSR.DMWn(n=0~3)来进行配置,虚拟地址(VA)和物理地址(PA)
+之间有简单的映射关系::
+
+ VA = PA + 固定偏移
+
+分页映射的虚拟地址(VA)和物理地址(PA)有任意的映射关系,这种关系记录在TLB和页
+表中。LoongArch的TLB包括一个全相联的MTLB(Multiple Page Size TLB,多样页大小TLB)
+和一个组相联的STLB(Single Page Size TLB,单一页大小TLB)。
+
+缺省状态下,LA32的整个虚拟地址空间配置如下:
+
+============ =========================== ===========================
+区段名       地址范围                    属性
+============ =========================== ===========================
+``UVRANGE``  ``0x00000000 - 0x7FFFFFFF`` 分页映射, 可缓存, PLV0~3
+``KPRANGE0`` ``0x80000000 - 0x9FFFFFFF`` 直接映射, 非缓存, PLV0
+``KPRANGE1`` ``0xA0000000 - 0xBFFFFFFF`` 直接映射, 可缓存, PLV0
+``KVRANGE``  ``0xC0000000 - 0xFFFFFFFF`` 分页映射, 可缓存, PLV0
+============ =========================== ===========================
+
+用户态(PLV3)只能访问UVRANGE,对于直接映射的KPRANGE0和KPRANGE1,将虚拟地址的第
+30~31位清零就等于物理地址。例如:物理地址0x00001000对应的非缓存直接映射虚拟地址
+是0x80001000,而其可缓存直接映射虚拟地址是0xA0001000。
+
+缺省状态下,LA64的整个虚拟地址空间配置如下:
+
+============ ====================== ==================================
+区段名       地址范围               属性
+============ ====================== ==================================
+``XUVRANGE`` ``0x0000000000000000 - 分页映射, 可缓存, PLV0~3
+             0x3FFFFFFFFFFFFFFF``
+``XSPRANGE`` ``0x4000000000000000 - 直接映射, 可缓存 / 非缓存, PLV0
+             0x7FFFFFFFFFFFFFFF``
+``XKPRANGE`` ``0x8000000000000000 - 直接映射, 可缓存 / 非缓存, PLV0
+             0xBFFFFFFFFFFFFFFF``
+``XKVRANGE`` ``0xC000000000000000 - 分页映射, 可缓存, PLV0
+             0xFFFFFFFFFFFFFFFF``
+============ ====================== ==================================
+
+用户态(PLV3)只能访问XUVRANGE,对于直接映射的XSPRANGE和XKPRANGE,将虚拟地址的第
+60~63位清零就等于物理地址,而其缓存属性是通过虚拟地址的第60~61位配置的(0表示强序
+非缓存,1表示一致可缓存,2表示弱序非缓存)。
+
+目前,我们仅用XKPRANGE来进行直接映射,XSPRANGE保留给以后用。
+
+此处给出一个直接映射的例子:物理地址0x00000000_00001000的强序非缓存直接映射虚拟地址
+(在XKPRANGE中)是0x80000000_00001000,其一致可缓存直接映射虚拟地址(在XKPRANGE中)
+是0x90000000_00001000,而其弱序非缓存直接映射虚拟地址(在XKPRANGE中)是0xA0000000_
+00001000。
+
+Loongson与LoongArch的关系
+=========================
+
+LoongArch是一种RISC指令集架构(ISA),不同于现存的任何一种ISA,而Loongson(即龙
+芯)是一个处理器家族。龙芯包括三个系列:Loongson-1(龙芯1号)是32位处理器系列,
+Loongson-2(龙芯2号)是低端64位处理器系列,而Loongson-3(龙芯3号)是高端64位处理
+器系列。旧的龙芯处理器基于MIPS架构,而新的龙芯处理器基于LoongArch架构。以龙芯3号
+为例:龙芯3A1000/3B1500/3A2000/3A3000/3A4000都是兼容MIPS的,而龙芯3A5000(以及将
+来的型号)都是基于LoongArch的。
+
+.. _loongarch-references-zh_CN:
+
+参考文献
+========
+
+Loongson官方网站(龙芯中科技术股份有限公司):
+
+  http://www.loongson.cn/
+
+Loongson与LoongArch的开发者网站(软件与文档资源):
+
+  http://www.loongnix.cn/
+
+  https://github.com/loongson/
+
+  https://loongson.github.io/LoongArch-Documentation/
+
+LoongArch指令集架构的文档:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-CN.pdf (中文版)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.00-EN.pdf (英文版)
+
+LoongArch的ELF psABI文档:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-CN.pdf (中文版)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v1.00-EN.pdf (英文版)
+
+Loongson与LoongArch的Linux内核源码仓库:
+
+  https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git
diff --git a/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/loongarch/irq-chip-model.rst
new file mode 100644 (file)
index 0000000..2a4c3ad
--- /dev/null
@@ -0,0 +1,155 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/irq-chip-model.rst
+:Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+==================================
+LoongArch的IRQ芯片模型(层级关系)
+==================================
+
+目前,基于LoongArch的处理器(如龙芯3A5000)只能与LS7A芯片组配合工作。LoongArch计算机
+中的中断控制器(即IRQ芯片)包括CPUINTC(CPU Core Interrupt Controller)、LIOINTC(
+Legacy I/O Interrupt Controller)、EIOINTC(Extended I/O Interrupt Controller)、
+HTVECINTC(Hyper-Transport Vector Interrupt Controller)、PCH-PIC(LS7A芯片组的主中
+断控制器)、PCH-LPC(LS7A芯片组的LPC中断控制器)和PCH-MSI(MSI中断控制器)。
+
+CPUINTC是一种CPU内部的每个核本地的中断控制器,LIOINTC/EIOINTC/HTVECINTC是CPU内部的
+全局中断控制器(每个芯片一个,所有核共享),而PCH-PIC/PCH-LPC/PCH-MSI是CPU外部的中
+断控制器(在配套芯片组里面)。这些中断控制器(或者说IRQ芯片)以一种层次树的组织形式
+级联在一起,一共有两种层级关系模型(传统IRQ模型和扩展IRQ模型)。
+
+传统IRQ模型
+===========
+
+在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC,
+CPU串口(UARTs)中断发送到LIOINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/
+PCH-LPC/PCH-MSI,然后被HTVECINTC统一收集,再发送到LIOINTC,最后到达CPUINTC::
+
+     +-----+     +---------+     +-------+
+     | IPI | --> | CPUINTC | <-- | Timer |
+     +-----+     +---------+     +-------+
+                      ^
+                      |
+                 +---------+     +-------+
+                 | LIOINTC | <-- | UARTs |
+                 +---------+     +-------+
+                      ^
+                      |
+                +-----------+
+                | HTVECINTC |
+                +-----------+
+                 ^         ^
+                 |         |
+           +---------+ +---------+
+           | PCH-PIC | | PCH-MSI |
+           +---------+ +---------+
+             ^     ^           ^
+             |     |           |
+     +---------+ +---------+ +---------+
+     | PCH-LPC | | Devices | | Devices |
+     +---------+ +---------+ +---------+
+          ^
+          |
+     +---------+
+     | Devices |
+     +---------+
+
+扩展IRQ模型
+===========
+
+在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC,
+CPU串口(UARTs)中断发送到LIOINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/
+PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC::
+
+          +-----+     +---------+     +-------+
+          | IPI | --> | CPUINTC | <-- | Timer |
+          +-----+     +---------+     +-------+
+                       ^       ^
+                       |       |
+                +---------+ +---------+     +-------+
+                | EIOINTC | | LIOINTC | <-- | UARTs |
+                +---------+ +---------+     +-------+
+                 ^       ^
+                 |       |
+          +---------+ +---------+
+          | PCH-PIC | | PCH-MSI |
+          +---------+ +---------+
+            ^     ^           ^
+            |     |           |
+    +---------+ +---------+ +---------+
+    | PCH-LPC | | Devices | | Devices |
+    +---------+ +---------+ +---------+
+         ^
+         |
+    +---------+
+    | Devices |
+    +---------+
+
+ACPI相关的定义
+==============
+
+CPUINTC::
+
+  ACPI_MADT_TYPE_CORE_PIC;
+  struct acpi_madt_core_pic;
+  enum acpi_madt_core_pic_version;
+
+LIOINTC::
+
+  ACPI_MADT_TYPE_LIO_PIC;
+  struct acpi_madt_lio_pic;
+  enum acpi_madt_lio_pic_version;
+
+EIOINTC::
+
+  ACPI_MADT_TYPE_EIO_PIC;
+  struct acpi_madt_eio_pic;
+  enum acpi_madt_eio_pic_version;
+
+HTVECINTC::
+
+  ACPI_MADT_TYPE_HT_PIC;
+  struct acpi_madt_ht_pic;
+  enum acpi_madt_ht_pic_version;
+
+PCH-PIC::
+
+  ACPI_MADT_TYPE_BIO_PIC;
+  struct acpi_madt_bio_pic;
+  enum acpi_madt_bio_pic_version;
+
+PCH-MSI::
+
+  ACPI_MADT_TYPE_MSI_PIC;
+  struct acpi_madt_msi_pic;
+  enum acpi_madt_msi_pic_version;
+
+PCH-LPC::
+
+  ACPI_MADT_TYPE_LPC_PIC;
+  struct acpi_madt_lpc_pic;
+  enum acpi_madt_lpc_pic_version;
+
+参考文献
+========
+
+龙芯3A5000的文档:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-CN.pdf (中文版)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-3A5000-usermanual-1.02-EN.pdf (英文版)
+
+龙芯LS7A芯片组的文档:
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-CN.pdf (中文版)
+
+  https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (英文版)
+
+注:CPUINTC即《龙芯架构参考手册卷一》第7.4节所描述的CSR.ECFG/CSR.ESTAT寄存器及其中断
+控制逻辑;LIOINTC即《龙芯3A5000处理器使用手册》第11.1节所描述的“传统I/O中断”;EIOINTC
+即《龙芯3A5000处理器使用手册》第11.2节所描述的“扩展I/O中断”;HTVECINTC即《龙芯3A5000
+处理器使用手册》第14.3节所描述的“HyperTransport中断”;PCH-PIC/PCH-MSI即《龙芯7A1000桥
+片用户手册》第5章所描述的“中断控制器”;PCH-LPC即《龙芯7A1000桥片用户手册》第24.3节所
+描述的“LPC中断”。
index c6d034a..1c37159 100644 (file)
@@ -787,6 +787,7 @@ The uvc function provides these attributes in its function directory:
        streaming_maxpacket maximum packet size this endpoint is capable of
                            sending or receiving when this configuration is
                            selected
+       function_name       name of the interface
        =================== ================================================
 
 There are also "control" and "streaming" subdirectories, each of which contain
index b85ee59..a6d3bd9 100644 (file)
@@ -382,7 +382,7 @@ F:  include/acpi/
 F:     tools/power/acpi/
 
 ACPI FOR ARM64 (ACPI/arm64)
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 M:     Hanjun Guo <guohanjun@huawei.com>
 M:     Sudeep Holla <sudeep.holla@arm.com>
 L:     linux-acpi@vger.kernel.org
@@ -1090,6 +1090,14 @@ W:       https://ez.analog.com/linux-software-drivers
 F:     Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
 F:     drivers/iio/adc/ad7292.c
 
+ANALOG DEVICES INC AD3552R DRIVER
+M:     Nuno Sá <nuno.sa@analog.com>
+L:     linux-iio@vger.kernel.org
+S:     Supported
+W:     https://ez.analog.com/linux-software-drivers
+F:     Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
+F:     drivers/iio/dac/ad3552r.c
+
 ANALOG DEVICES INC AD7293 DRIVER
 M:     Antoniu Miclaus <antoniu.miclaus@analog.com>
 L:     linux-iio@vger.kernel.org
@@ -1375,14 +1383,6 @@ L:       linux-input@vger.kernel.org
 S:     Odd fixes
 F:     drivers/input/mouse/bcm5974.c
 
-APPLE DART IOMMU DRIVER
-M:     Sven Peter <sven@svenpeter.dev>
-R:     Alyssa Rosenzweig <alyssa@rosenzweig.io>
-L:     iommu@lists.linux-foundation.org
-S:     Maintained
-F:     Documentation/devicetree/bindings/iommu/apple,dart.yaml
-F:     drivers/iommu/apple-dart.c
-
 APPLE PCIE CONTROLLER DRIVER
 M:     Alyssa Rosenzweig <alyssa@rosenzweig.io>
 M:     Marc Zyngier <maz@kernel.org>
@@ -1834,9 +1834,11 @@ F:       Documentation/devicetree/bindings/arm/apple/*
 F:     Documentation/devicetree/bindings/clock/apple,nco.yaml
 F:     Documentation/devicetree/bindings/i2c/apple,i2c.yaml
 F:     Documentation/devicetree/bindings/interrupt-controller/apple,*
+F:     Documentation/devicetree/bindings/iommu/apple,dart.yaml
 F:     Documentation/devicetree/bindings/iommu/apple,sart.yaml
 F:     Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
 F:     Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml
+F:     Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
 F:     Documentation/devicetree/bindings/pci/apple,pcie.yaml
 F:     Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
 F:     Documentation/devicetree/bindings/power/apple*
@@ -1845,9 +1847,11 @@ F:       arch/arm64/boot/dts/apple/
 F:     drivers/clk/clk-apple-nco.c
 F:     drivers/i2c/busses/i2c-pasemi-core.c
 F:     drivers/i2c/busses/i2c-pasemi-platform.c
+F:     drivers/iommu/apple-dart.c
 F:     drivers/irqchip/irq-apple-aic.c
 F:     drivers/mailbox/apple-mailbox.c
 F:     drivers/nvme/host/apple.c
+F:     drivers/nvmem/apple-efuses.c
 F:     drivers/pinctrl/pinctrl-apple-gpio.c
 F:     drivers/soc/apple/*
 F:     drivers/watchdog/apple_wdt.c
@@ -2131,6 +2135,18 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
 F:     arch/arm/mach-sa1100/include/mach/jornada720.h
 F:     arch/arm/mach-sa1100/jornada720.c
 
+ARM/HPE GXP ARCHITECTURE
+M:     Jean-Marie Verdun <verdun@hpe.com>
+M:     Nick Hawkins <nick.hawkins@hpe.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/arm/hpe,gxp.yaml
+F:     Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
+F:     arch/arm/boot/dts/hpe-bmc*
+F:     arch/arm/boot/dts/hpe-gxp*
+F:     arch/arm/mach-hpe/
+F:     drivers/clocksource/timer-gxp.c
+F:     drivers/watchdog/gxp-wdt.c
+
 ARM/IGEP MACHINE SUPPORT
 M:     Enric Balletbo i Serra <eballetbo@gmail.com>
 M:     Javier Martinez Canillas <javier@dowhile0.org>
@@ -2549,7 +2565,7 @@ F:        drivers/pci/controller/dwc/pcie-qcom.c
 F:     drivers/phy/qualcomm/
 F:     drivers/power/*/msm*
 F:     drivers/reset/reset-qcom-*
-F:     drivers/scsi/ufs/ufs-qcom*
+F:     drivers/ufs/host/ufs-qcom*
 F:     drivers/spi/spi-geni-qcom.c
 F:     drivers/spi/spi-qcom-qspi.c
 F:     drivers/spi/spi-qup.c
@@ -2946,7 +2962,7 @@ N:        uniphier
 ARM/VERSATILE EXPRESS PLATFORM
 M:     Liviu Dudau <liviu.dudau@arm.com>
 M:     Sudeep Holla <sudeep.holla@arm.com>
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     */*/*/vexpress*
@@ -3517,10 +3533,14 @@ R:      Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 R:     Rasmus Villemoes <linux@rasmusvillemoes.dk>
 S:     Maintained
 F:     include/linux/bitmap.h
+F:     include/linux/cpumask.h
 F:     include/linux/find.h
+F:     include/linux/nodemask.h
 F:     lib/bitmap.c
+F:     lib/cpumask.c
 F:     lib/find_bit.c
 F:     lib/find_bit_benchmark.c
+F:     lib/nodemask.c
 F:     lib/test_bitmap.c
 F:     tools/include/linux/bitmap.h
 F:     tools/include/linux/find.h
@@ -4572,8 +4592,8 @@ F:        drivers/power/supply/cw2015_battery.c
 
 CEPH COMMON CODE (LIBCEPH)
 M:     Ilya Dryomov <idryomov@gmail.com>
-M:     Jeff Layton <jlayton@kernel.org>
 M:     Xiubo Li <xiubli@redhat.com>
+R:     Jeff Layton <jlayton@kernel.org>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
@@ -4583,9 +4603,9 @@ F:        include/linux/crush/
 F:     net/ceph/
 
 CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
-M:     Jeff Layton <jlayton@kernel.org>
 M:     Xiubo Li <xiubli@redhat.com>
 M:     Ilya Dryomov <idryomov@gmail.com>
+R:     Jeff Layton <jlayton@kernel.org>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
@@ -5162,7 +5182,7 @@ F:        arch/x86/kernel/cpuid.c
 F:     arch/x86/kernel/msr.c
 
 CPUIDLE DRIVER - ARM BIG LITTLE
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 L:     linux-pm@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -5182,7 +5202,7 @@ F:        drivers/cpuidle/cpuidle-exynos.c
 F:     include/linux/platform_data/cpuidle-exynos.h
 
 CPUIDLE DRIVER - ARM PSCI
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 M:     Sudeep Holla <sudeep.holla@arm.com>
 L:     linux-pm@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -7711,6 +7731,7 @@ F:        include/linux/arm_ffa.h
 
 FIRMWARE LOADER (request_firmware)
 M:     Luis Chamberlain <mcgrof@kernel.org>
+M:     Russ Weight <russell.h.weight@intel.com>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/firmware_class/
@@ -7779,7 +7800,7 @@ R:        Tom Rix <trix@redhat.com>
 L:     linux-fpga@vger.kernel.org
 S:     Maintained
 Q:     http://patchwork.kernel.org/project/linux-fpga/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/fpga/linux-fpga.git
 F:     Documentation/devicetree/bindings/fpga/
 F:     Documentation/driver-api/fpga/
 F:     Documentation/fpga/
@@ -8405,7 +8426,7 @@ M:        Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 L:     linux-gpio@vger.kernel.org
 L:     linux-acpi@vger.kernel.org
-S:     Maintained
+S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
 F:     Documentation/firmware-guide/acpi/gpio-properties.rst
 F:     drivers/gpio/gpiolib-acpi.c
@@ -9056,8 +9077,16 @@ L:       linux-input@vger.kernel.org
 S:     Maintained
 F:     drivers/input/touchscreen/htcpen.c
 
+HTE SUBSYSTEM
+M:     Dipen Patel <dipenp@nvidia.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/timestamp/
+F:     Documentation/hte/
+F:     drivers/hte/
+F:     include/linux/hte.h
+
 HTS221 TEMPERATURE-HUMIDITY IIO DRIVER
-M:     Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M:     Lorenzo Bianconi <lorenzo@kernel.org>
 L:     linux-iio@vger.kernel.org
 S:     Maintained
 W:     http://www.st.com/
@@ -9325,13 +9354,13 @@ F:      drivers/i2c/i2c-stub.c
 I3C DRIVER FOR CADENCE I3C MASTER IP
 M:     Przemysław Gaj <pgaj@cadence.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
+F:     Documentation/devicetree/bindings/i3c/cdns,i3c-master.yaml
 F:     drivers/i3c/master/i3c-master-cdns.c
 
 I3C DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Vitor Soares <vitor.soares@synopsys.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
+F:     Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
 F:     drivers/i3c/master/dw*
 
 I3C SUBSYSTEM
@@ -9870,7 +9899,7 @@ F:        drivers/video/fbdev/intelfb/
 INTEL GPIO DRIVERS
 M:     Andy Shevchenko <andy@kernel.org>
 L:     linux-gpio@vger.kernel.org
-S:     Maintained
+S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
 F:     drivers/gpio/gpio-ich.c
 F:     drivers/gpio/gpio-merrifield.c
@@ -10091,7 +10120,7 @@ F:      drivers/platform/x86/intel/pmc/
 
 INTEL PMIC GPIO DRIVERS
 M:     Andy Shevchenko <andy@kernel.org>
-S:     Maintained
+S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
 F:     drivers/gpio/gpio-*cove.c
 
@@ -11440,8 +11469,6 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching.g
 F:     Documentation/ABI/testing/sysfs-kernel-livepatch
 F:     Documentation/livepatch/
 F:     arch/powerpc/include/asm/livepatch.h
-F:     arch/s390/include/asm/livepatch.h
-F:     arch/x86/include/asm/livepatch.h
 F:     include/linux/livepatch.h
 F:     kernel/livepatch/
 F:     kernel/module/livepatch.c
@@ -11550,6 +11577,16 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
 F:     drivers/gpu/drm/bridge/lontium-lt8912b.c
 
+LOONGARCH
+M:     Huacai Chen <chenhuacai@kernel.org>
+R:     WANG Xuerui <kernel@xen0n.name>
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git
+F:     arch/loongarch/
+F:     drivers/*/*loongarch*
+F:     Documentation/loongarch/
+F:     Documentation/translations/zh_CN/loongarch/
+
 LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
 M:     Sathya Prakash <sathya.prakash@broadcom.com>
 M:     Sreekanth Reddy <sreekanth.reddy@broadcom.com>
@@ -12898,7 +12935,7 @@ F:      arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
 
 MHI BUS
 M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-R:     Hemant Kumar <hemantk@codeaurora.org>
+R:     Hemant Kumar <quic_hemantk@quicinc.com>
 L:     mhi@lists.linux.dev
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -13068,7 +13105,7 @@ M:      Claudiu Beznea <claudiu.beznea@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-pwm@vger.kernel.org
 S:     Supported
-F:     Documentation/devicetree/bindings/pwm/atmel-pwm.txt
+F:     Documentation/devicetree/bindings/pwm/atmel,at91sam-pwm.yaml
 F:     drivers/pwm/pwm-atmel.c
 
 MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER
@@ -13407,7 +13444,7 @@ F:      drivers/net/phy/motorcomm.c
 MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 M:     Jiri Slaby <jirislaby@kernel.org>
 S:     Maintained
-F:     Documentation/driver-api/serial/moxa-smartio.rst
+F:     Documentation/driver-api/tty/moxa-smartio.rst
 F:     drivers/tty/mxser.*
 
 MR800 AVERMEDIA USB FM RADIO DRIVER
@@ -15290,7 +15327,7 @@ F:      drivers/pci/controller/pci-v3-semi.c
 
 PCI ENDPOINT SUBSYSTEM
 M:     Kishon Vijay Abraham I <kishon@ti.com>
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 R:     Krzysztof Wilczyński <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
@@ -15353,7 +15390,7 @@ F:      Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
 F:     drivers/pci/controller/pci-xgene-msi.c
 
 PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 R:     Rob Herring <robh@kernel.org>
 R:     Krzysztof Wilczyński <kw@linux.com>
 L:     linux-pci@vger.kernel.org
@@ -15906,7 +15943,7 @@ F:      include/linux/dtpm.h
 
 POWER STATE COORDINATION INTERFACE (PSCI)
 M:     Mark Rutland <mark.rutland@arm.com>
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/firmware/psci/
@@ -17001,6 +17038,14 @@ S:     Supported
 F:     Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
 F:     drivers/iio/adc/rzg2l_adc.c
 
+RENESAS RZ/N1 RTC CONTROLLER DRIVER
+M:     Miquel Raynal <miquel.raynal@bootlin.com>
+L:     linux-rtc@vger.kernel.org
+L:     linux-renesas-soc@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/rtc/renesas,rzn1-rtc.yaml
+F:     drivers/rtc/rtc-rzn1.c
+
 RENESAS R-CAR GEN3 & RZ/N1 NAND CONTROLLER DRIVER
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
 L:     linux-mtd@lists.infradead.org
@@ -17710,6 +17755,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
 F:     Documentation/devicetree/bindings/scsi/
 F:     drivers/scsi/
+F:     drivers/ufs/
 F:     include/scsi/
 
 SCSI TAPE DRIVER
@@ -18285,7 +18331,7 @@ F:      drivers/net/ethernet/smsc/smc91x.*
 
 SECURE MONITOR CALL(SMC) CALLING CONVENTION (SMCCC)
 M:     Mark Rutland <mark.rutland@arm.com>
-M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
 M:     Sudeep Holla <sudeep.holla@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -18782,7 +18828,7 @@ S:      Maintained
 F:     arch/alpha/kernel/srm_env.c
 
 ST LSM6DSx IMU IIO DRIVER
-M:     Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M:     Lorenzo Bianconi <lorenzo@kernel.org>
 L:     linux-iio@vger.kernel.org
 S:     Maintained
 W:     http://www.st.com/
@@ -19037,6 +19083,12 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/nvmem/sunplus,sp7021-ocotp.yaml
 F:     drivers/nvmem/sunplus-ocotp.c
 
+SUNPLUS PWM DRIVER
+M:     Hammer Hsieh <hammerh0314@gmail.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/pwm/sunplus,sp7021-pwm.yaml
+F:     drivers/pwm/pwm-sunplus.c
+
 SUNPLUS RTC DRIVER
 M:     Vincent Shih <vincent.sunplus@gmail.com>
 L:     linux-rtc@vger.kernel.org
@@ -19057,6 +19109,13 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/serial/sunplus,sp7021-uart.yaml
 F:     drivers/tty/serial/sunplus-uart.c
 
+SUNPLUS WATCHDOG DRIVER
+M:     Xiantao Hu <xt.hu@cqplus1.com>
+L:     linux-watchdog@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/watchdog/sunplus,sp7021-wdt.yaml
+F:     drivers/watchdog/sunplus_wdt.c
+
 SUPERH
 M:     Yoshinori Sato <ysato@users.sourceforge.jp>
 M:     Rich Felker <dalias@libc.org>
@@ -20353,35 +20412,28 @@ F:    drivers/cdrom/cdrom.c
 F:     include/linux/cdrom.h
 F:     include/uapi/linux/cdrom.h
 
-UNISYS S-PAR DRIVERS
-M:     David Kershner <david.kershner@unisys.com>
-L:     sparmaintainer@unisys.com (Unisys internal)
-S:     Supported
-F:     drivers/staging/unisys/
-F:     drivers/visorbus/
-F:     include/linux/visorbus.h
-
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 R:     Avri Altman <avri.altman@wdc.com>
+R:     Bart Van Assche <bvanassche@acm.org>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/ufs/
 F:     Documentation/scsi/ufs.rst
-F:     drivers/scsi/ufs/
+F:     drivers/ufs/core/
 
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
 M:     Pedro Sousa <pedrom.sousa@synopsys.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
-F:     drivers/scsi/ufs/*dwc*
+F:     drivers/ufs/host/*dwc*
 
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
 M:     Stanley Chu <stanley.chu@mediatek.com>
 L:     linux-scsi@vger.kernel.org
 L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     drivers/scsi/ufs/ufs-mediatek*
+F:     drivers/ufs/host/ufs-mediatek*
 
 UNSORTED BLOCK IMAGES (UBI)
 M:     Richard Weinberger <richard@nod.at>
@@ -21029,6 +21081,7 @@ F:      include/uapi/linux/virtio_crypto.h
 VIRTIO DRIVERS FOR S390
 M:     Cornelia Huck <cohuck@redhat.com>
 M:     Halil Pasic <pasic@linux.ibm.com>
+M:     Eric Farman <farman@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     virtualization@lists.linux-foundation.org
 L:     kvm@vger.kernel.org
@@ -21179,7 +21232,7 @@ L:      linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
 F:     Documentation/driver-api/vme.rst
-F:     drivers/staging/vme/
+F:     drivers/staging/vme_user/
 F:     drivers/vme/
 F:     include/linux/vme*
 
@@ -21705,23 +21758,29 @@ F:    arch/arm64/include/asm/xen/
 F:     arch/arm64/xen/
 
 XEN HYPERVISOR INTERFACE
-M:     Boris Ostrovsky <boris.ostrovsky@oracle.com>
 M:     Juergen Gross <jgross@suse.com>
-R:     Stefano Stabellini <sstabellini@kernel.org>
+M:     Stefano Stabellini <sstabellini@kernel.org>
+R:     Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
 F:     Documentation/ABI/stable/sysfs-hypervisor-xen
 F:     Documentation/ABI/testing/sysfs-hypervisor-xen
-F:     arch/x86/include/asm/pvclock-abi.h
-F:     arch/x86/include/asm/xen/
-F:     arch/x86/platform/pvh/
-F:     arch/x86/xen/
 F:     drivers/*/xen-*front.c
 F:     drivers/xen/
 F:     include/uapi/xen/
 F:     include/xen/
 
+XEN HYPERVISOR X86
+M:     Juergen Gross <jgross@suse.com>
+R:     Boris Ostrovsky <boris.ostrovsky@oracle.com>
+L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
+S:     Supported
+F:     arch/x86/include/asm/pvclock-abi.h
+F:     arch/x86/include/asm/xen/
+F:     arch/x86/platform/pvh/
+F:     arch/x86/xen/
+
 XEN NETWORK BACKEND DRIVER
 M:     Wei Liu <wei.liu@kernel.org>
 M:     Paul Durrant <paul@xen.org>
@@ -21826,6 +21885,12 @@ F:     drivers/misc/Makefile
 F:     drivers/misc/xilinx_sdfec.c
 F:     include/uapi/misc/xilinx_sdfec.h
 
+XILINX PWM DRIVER
+M:     Sean Anderson <sean.anderson@seco.com>
+S:     Maintained
+F:     drivers/pwm/pwm-xilinx.c
+F:     include/clocksource/timer-xilinx.h
+
 XILINX UARTLITE SERIAL DRIVER
 M:     Peter Korsgaard <jacmet@sunsite.dk>
 L:     linux-serial@vger.kernel.org
@@ -21937,6 +22002,13 @@ L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
 
+Z3FOLD COMPRESSED PAGE ALLOCATOR
+M:     Vitaly Wool <vitaly.wool@konsulko.com>
+R:     Miaohe Lin <linmiaohe@huawei.com>
+L:     linux-mm@kvack.org
+S:     Maintained
+F:     mm/z3fold.c
+
 ZD1211RW WIRELESS DRIVER
 M:     Ulrich Kunitz <kune@deine-taler.de>
 L:     linux-wireless@vger.kernel.org
index edc3f44..c43d825 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 5
-PATCHLEVEL = 18
+PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Superb Owl
 
 # *DOCUMENTATION*
@@ -1490,7 +1490,7 @@ CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
 
 # Directories & files removed with 'make mrproper'
 MRPROPER_FILES += include/config include/generated          \
-                 arch/$(SRCARCH)/include/generated .tmp_objdiff \
+                 arch/$(SRCARCH)/include/generated .objdiff \
                  debian snap tar-install \
                  .config .config.old .version \
                  Module.symvers \
@@ -1857,7 +1857,7 @@ clean: $(clean-dirs)
                -o -name '*.lex.c' -o -name '*.tab.[ch]' \
                -o -name '*.asn1.[ch]' \
                -o -name '*.symtypes' -o -name 'modules.order' \
-               -o -name '.tmp_*.o.*' \
+               -o -name '.tmp_*' \
                -o -name '*.c.[012]*.*' \
                -o -name '*.ll' \
                -o -name '*.gcno' \
index 81a3439..fcf9a41 100644 (file)
@@ -1048,6 +1048,10 @@ config HAVE_NOINSTR_HACK
 config HAVE_NOINSTR_VALIDATION
        bool
 
+config HAVE_UACCESS_VALIDATION
+       bool
+       select OBJTOOL
+
 config HAVE_STACK_VALIDATION
        bool
        help
index 4575ba3..f1290b2 100644 (file)
@@ -2,10 +2,8 @@
 #ifndef _ALPHA_TERMBITS_H
 #define _ALPHA_TERMBITS_H
 
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
 
-typedef unsigned char  cc_t;
-typedef unsigned int   speed_t;
 typedef unsigned int   tcflag_t;
 
 /*
@@ -53,76 +51,58 @@ struct ktermios {
 };
 
 /* c_cc characters */
-#define VEOF 0
-#define VEOL 1
-#define VEOL2 2
-#define VERASE 3
-#define VWERASE 4
-#define VKILL 5
-#define VREPRINT 6
-#define VSWTC 7
-#define VINTR 8
-#define VQUIT 9
-#define VSUSP 10
-#define VSTART 12
-#define VSTOP 13
-#define VLNEXT 14
-#define VDISCARD 15
-#define VMIN 16
-#define VTIME 17
+#define VEOF            0
+#define VEOL            1
+#define VEOL2           2
+#define VERASE          3
+#define VWERASE                 4
+#define VKILL           5
+#define VREPRINT        6
+#define VSWTC           7
+#define VINTR           8
+#define VQUIT           9
+#define VSUSP          10
+#define VSTART         12
+#define VSTOP          13
+#define VLNEXT         14
+#define VDISCARD       15
+#define VMIN           16
+#define VTIME          17
 
 /* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK  0000020
-#define ISTRIP 0000040
-#define INLCR  0000100
-#define IGNCR  0000200
-#define ICRNL  0000400
-#define IXON   0001000
-#define IXOFF  0002000
-#define IXANY  0004000
-#define IUCLC  0010000
-#define IMAXBEL        0020000
-#define IUTF8  0040000
+#define IXON   0x0200
+#define IXOFF  0x0400
+#define IUCLC  0x1000
+#define IMAXBEL        0x2000
+#define IUTF8  0x4000
 
 /* c_oflag bits */
-#define OPOST  0000001
-#define ONLCR  0000002
-#define OLCUC  0000004
-
-#define OCRNL  0000010
-#define ONOCR  0000020
-#define ONLRET 0000040
-
-#define OFILL  00000100
-#define OFDEL  00000200
-#define NLDLY  00001400
-#define   NL0  00000000
-#define   NL1  00000400
-#define   NL2  00001000
-#define   NL3  00001400
-#define TABDLY 00006000
-#define   TAB0 00000000
-#define   TAB1 00002000
-#define   TAB2 00004000
-#define   TAB3 00006000
-#define CRDLY  00030000
-#define   CR0  00000000
-#define   CR1  00010000
-#define   CR2  00020000
-#define   CR3  00030000
-#define FFDLY  00040000
-#define   FF0  00000000
-#define   FF1  00040000
-#define BSDLY  00100000
-#define   BS0  00000000
-#define   BS1  00100000
-#define VTDLY  00200000
-#define   VT0  00000000
-#define   VT1  00200000
+#define ONLCR  0x00002
+#define OLCUC  0x00004
+#define NLDLY  0x00300
+#define   NL0  0x00000
+#define   NL1  0x00100
+#define   NL2  0x00200
+#define   NL3  0x00300
+#define TABDLY 0x00c00
+#define   TAB0 0x00000
+#define   TAB1 0x00400
+#define   TAB2 0x00800
+#define   TAB3 0x00c00
+#define CRDLY  0x03000
+#define   CR0  0x00000
+#define   CR1  0x01000
+#define   CR2  0x02000
+#define   CR3  0x03000
+#define FFDLY  0x04000
+#define   FF0  0x00000
+#define   FF1  0x04000
+#define BSDLY  0x08000
+#define   BS0  0x00000
+#define   BS1  0x08000
+#define VTDLY  0x10000
+#define   VT0  0x00000
+#define   VT1  0x10000
 /*
  * Should be equivalent to TAB3, see description of TAB3 in
  * POSIX.1-2008, Ch. 11.2.3 "Output Modes"
@@ -130,61 +110,36 @@ struct ktermios {
 #define XTABS  TAB3
 
 /* c_cflag bit meaning */
-#define CBAUD  0000037
-#define  B0    0000000         /* hang up */
-#define  B50   0000001
-#define  B75   0000002
-#define  B110  0000003
-#define  B134  0000004
-#define  B150  0000005
-#define  B200  0000006
-#define  B300  0000007
-#define  B600  0000010
-#define  B1200 0000011
-#define  B1800 0000012
-#define  B2400 0000013
-#define  B4800 0000014
-#define  B9600 0000015
-#define  B19200        0000016
-#define  B38400        0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CBAUDEX 0000000
-#define  B57600   00020
-#define  B115200  00021
-#define  B230400  00022
-#define  B460800  00023
-#define  B500000  00024
-#define  B576000  00025
-#define  B921600  00026
-#define B1000000  00027
-#define B1152000  00030
-#define B1500000  00031
-#define B2000000  00032
-#define B2500000  00033
-#define B3000000  00034
-#define B3500000  00035
-#define B4000000  00036
-#define BOTHER    00037
-
-#define CSIZE  00001400
-#define   CS5  00000000
-#define   CS6  00000400
-#define   CS7  00001000
-#define   CS8  00001400
-
-#define CSTOPB 00002000
-#define CREAD  00004000
-#define PARENB 00010000
-#define PARODD 00020000
-#define HUPCL  00040000
-
-#define CLOCAL 00100000
-#define CMSPAR   010000000000          /* mark or space (stick) parity */
-#define CRTSCTS          020000000000          /* flow control */
-
-#define CIBAUD 07600000
-#define IBSHIFT        16
+#define CBAUD          0x0000001f
+#define CBAUDEX                0x00000000
+#define BOTHER         0x0000001f
+#define     B57600     0x00000010
+#define    B115200     0x00000011
+#define    B230400     0x00000012
+#define    B460800     0x00000013
+#define    B500000     0x00000014
+#define    B576000     0x00000015
+#define    B921600     0x00000016
+#define   B1000000     0x00000017
+#define   B1152000     0x00000018
+#define   B1500000     0x00000019
+#define   B2000000     0x0000001a
+#define   B2500000     0x0000001b
+#define   B3000000     0x0000001c
+#define   B3500000     0x0000001d
+#define   B4000000     0x0000001e
+#define CSIZE          0x00000300
+#define   CS5          0x00000000
+#define   CS6          0x00000100
+#define   CS7          0x00000200
+#define   CS8          0x00000300
+#define CSTOPB         0x00000400
+#define CREAD          0x00000800
+#define PARENB         0x00001000
+#define PARODD         0x00002000
+#define HUPCL          0x00004000
+#define CLOCAL         0x00008000
+#define CIBAUD         0x001f0000
 
 /* c_lflag bits */
 #define ISIG   0x00000080
@@ -204,17 +159,6 @@ struct ktermios {
 #define IEXTEN 0x00000400
 #define EXTPROC        0x10000000
 
-/* Values for the ACTION argument to `tcflow'.  */
-#define        TCOOFF          0
-#define        TCOON           1
-#define        TCIOFF          2
-#define        TCION           3
-
-/* Values for the QUEUE_SELECTOR argument to `tcflush'.  */
-#define        TCIFLUSH        0
-#define        TCOFLUSH        1
-#define        TCIOFLUSH       2
-
 /* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'.  */
 #define        TCSANOW         0
 #define        TCSADRAIN       1
index 8bbeebb..d257293 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/types.h>
 #include <linux/ipc.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 #include <linux/uio.h>
 #include <linux/vfs.h>
 #include <linux/rcupdate.h>
index 5f85270..e2e25f8 100644 (file)
@@ -125,7 +125,7 @@ common_shutdown_1(void *generic_ptr)
        /* Wait for the secondaries to halt. */
        set_cpu_present(boot_cpuid, false);
        set_cpu_possible(boot_cpuid, false);
-       while (cpumask_weight(cpu_present_mask))
+       while (!cpumask_empty(cpu_present_mask))
                barrier();
 #endif
 
@@ -233,10 +233,11 @@ release_thread(struct task_struct *dead_task)
 /*
  * Copy architecture-specific thread state
  */
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-               unsigned long kthread_arg, struct task_struct *p,
-               unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        extern void ret_from_fork(void);
        extern void ret_from_kernel_thread(void);
 
@@ -249,13 +250,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        childti->pcb.ksp = (unsigned long) childstack;
        childti->pcb.flags = 1; /* set FEN, clear everything else */
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* kernel thread */
                memset(childstack, 0,
                        sizeof(struct switch_stack) + sizeof(struct pt_regs));
                childstack->r26 = (unsigned long) ret_from_kernel_thread;
-               childstack->r9 = usp;   /* function */
-               childstack->r10 = kthread_arg;
+               childstack->r9 = (unsigned long) args->fn;
+               childstack->r10 = (unsigned long) args->fn_arg;
                childregs->hae = alpha_mv.hae_cache;
                childti->pcb.usp = 0;
                return 0;
index 5f7f5aa..3369f07 100644 (file)
@@ -162,10 +162,11 @@ asmlinkage void ret_from_fork(void);
  * |    user_r25    |
  * ------------------  <===== END of PAGE
  */
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-               unsigned long kthread_arg, struct task_struct *p,
-               unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *c_regs;        /* child's pt_regs */
        unsigned long *childksp;       /* to unwind out of __switch_to() */
        struct callee_regs *c_callee;  /* child's callee regs */
@@ -191,11 +192,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        childksp[0] = 0;                        /* fp */
        childksp[1] = (unsigned long)ret_from_fork; /* blink */
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                memset(c_regs, 0, sizeof(struct pt_regs));
 
-               c_callee->r13 = kthread_arg;
-               c_callee->r14 = usp;  /* function */
+               c_callee->r13 = (unsigned long)args->fn_arg;
+               c_callee->r14 = (unsigned long)args->fn;
 
                return 0;
        }
index 903165a..7630ba9 100644 (file)
@@ -357,25 +357,6 @@ config ARCH_FOOTBRIDGE
          Support for systems based on the DC21285 companion chip
          ("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
 
-config ARCH_PXA
-       bool "PXA2xx/PXA3xx-based"
-       depends on CPU_LITTLE_ENDIAN
-       select ARCH_MTD_XIP
-       select ARM_CPU_SUSPEND if PM
-       select AUTO_ZRELADDR
-       select COMMON_CLK
-       select CLKSRC_PXA
-       select CLKSRC_MMIO
-       select TIMER_OF
-       select CPU_XSCALE if !CPU_XSC3
-       select GPIO_PXA
-       select GPIOLIB
-       select IRQ_DOMAIN
-       select PLAT_PXA
-       select SPARSE_IRQ
-       help
-         Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
-
 config ARCH_RPC
        bool "RiscPC"
        depends on !CC_IS_CLANG && GCC_VERSION < 90100 && GCC_VERSION >= 60000
@@ -415,19 +396,6 @@ config ARCH_SA1100
        help
          Support for StrongARM 11x0 based boards.
 
-config ARCH_OMAP1
-       bool "TI OMAP1"
-       depends on CPU_LITTLE_ENDIAN
-       select CLKSRC_MMIO
-       select FORCE_PCI if PCCARD
-       select GENERIC_IRQ_CHIP
-       select GPIOLIB
-       select HAVE_LEGACY_CLK
-       select IRQ_DOMAIN
-       select SPARSE_IRQ
-       help
-         Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx)
-
 endchoice
 
 menu "Multiple platform selection"
@@ -550,6 +518,8 @@ source "arch/arm/mach-highbank/Kconfig"
 
 source "arch/arm/mach-hisi/Kconfig"
 
+source "arch/arm/mach-hpe/Kconfig"
+
 source "arch/arm/mach-imx/Kconfig"
 
 source "arch/arm/mach-iop32x/Kconfig"
@@ -593,7 +563,6 @@ source "arch/arm/mach-orion5x/Kconfig"
 source "arch/arm/mach-oxnas/Kconfig"
 
 source "arch/arm/mach-pxa/Kconfig"
-source "arch/arm/plat-pxa/Kconfig"
 
 source "arch/arm/mach-qcom/Kconfig"
 
@@ -672,9 +641,6 @@ config PLAT_ORION_LEGACY
        bool
        select PLAT_ORION
 
-config PLAT_PXA
-       bool
-
 config PLAT_VERSATILE
        bool
 
index 954ec70..c8e3633 100644 (file)
@@ -179,6 +179,7 @@ machine-$(CONFIG_ARCH_FOOTBRIDGE)   += footbridge
 machine-$(CONFIG_ARCH_GEMINI)          += gemini
 machine-$(CONFIG_ARCH_HIGHBANK)                += highbank
 machine-$(CONFIG_ARCH_HISI)            += hisi
+machine-$(CONFIG_ARCH_HPE)             += hpe
 machine-$(CONFIG_ARCH_IOP32X)          += iop32x
 machine-$(CONFIG_ARCH_IXP4XX)          += ixp4xx
 machine-$(CONFIG_ARCH_KEYSTONE)                += keystone
@@ -225,7 +226,6 @@ machine-$(CONFIG_PLAT_SPEAR)                += spear
 # Platform directory name.  This list is sorted alphanumerically
 # by CONFIG_* macro name.
 plat-$(CONFIG_PLAT_ORION)      += orion
-plat-$(CONFIG_PLAT_PXA)                += pxa
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 TEXT_OFFSET := $(textofs-y)
index edfbeda..1848998 100644 (file)
@@ -259,6 +259,8 @@ dtb-$(CONFIG_ARCH_HISI) += \
        hi3519-demb.dtb
 dtb-$(CONFIG_ARCH_HIX5HD2) += \
        hisi-x5hd2-dkb.dtb
+dtb-$(CONFIG_ARCH_HPE_GXP) += \
+       hpe-bmc-dl360gen10.dtb
 dtb-$(CONFIG_ARCH_INTEGRATOR) += \
        integratorap.dtb \
        integratorap-im-pd1.dtb \
@@ -1584,6 +1586,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
        aspeed-bmc-lenovo-hr630.dtb \
        aspeed-bmc-lenovo-hr855xg2.dtb \
        aspeed-bmc-microsoft-olympus.dtb \
+       aspeed-bmc-nuvia-dc-scm.dtb \
        aspeed-bmc-opp-lanyang.dtb \
        aspeed-bmc-opp-mihawk.dtb \
        aspeed-bmc-opp-mowgli.dtb \
index b7eb552..5a6063b 100644 (file)
                reg = <0x80000000 0x80000000>;
        };
 
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               video_engine_memory: video {
+                       size = <0x04000000>;
+                       alignment = <0x01000000>;
+                       compatible = "shared-dma-pool";
+                       reusable;
+               };
+
+               gfx_memory: framebuffer {
+                       size = <0x01000000>;
+                       alignment = <0x01000000>;
+                       compatible = "shared-dma-pool";
+                       reusable;
+               };
+       };
+
        vcc_sdhci0: regulator-vcc-sdhci0 {
                compatible = "regulator-fixed";
                regulator-name = "SDHCI0 Vcc";
 &mac0 {
        status = "okay";
 
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        phy-handle = <&ethphy0>;
 
        pinctrl-names = "default";
 &mac1 {
        status = "okay";
 
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        phy-handle = <&ethphy1>;
 
        pinctrl-names = "default";
        vqmmc-supply = <&vccq_sdhci1>;
        clk-phase-sd-hs = <7>, <200>;
 };
+
+&vhub {
+       status = "okay";
+       pinctrl-names = "default";
+};
+
+&video {
+       status = "okay";
+       memory-region = <&video_engine_memory>;
+};
+
+&gfx {
+       status = "okay";
+       memory-region = <&gfx_memory>;
+};
index eaf1bc2..41d2b15 100644 (file)
                };
        };
 
-       spi2_gpio: spi2-gpio {
-               compatible = "spi-gpio";
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               gpio-sck = <&gpio0 ASPEED_GPIO(X, 3) GPIO_ACTIVE_HIGH>;
-               gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
-               gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
-               num-chipselects = <1>;
-               cs-gpios = <&gpio0 ASPEED_GPIO(X, 0) GPIO_ACTIVE_LOW>;
-
-               flash@0 {
-                       reg = <0>;
-                       compatible = "jedec,spi-nor";
-                       m25p,fast-read;
-                       label = "pnor";
-                       spi-max-frequency = <100000000>;
-               };
-       };
-
        switchphy: ethernet-phy@0 {
                // Fixed link
        };
        };
 };
 
+&spi2 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_spi2_default>;
+
+       flash@0 {
+               status = "okay";
+               m25p,fast-read;
+               label = "pnor";
+               spi-max-frequency = <50000000>;
+       };
+};
+
 &i2c0 {
        status = "okay";
        ina230@45 {
                reg = <0x4f>;
        };
 
+       sled1_ioexp41: pca9536@41 {
+               compatible = "nxp,pca9536";
+               reg = <0x41>;
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               gpio-line-names =
+               "SLED1_SWD_MUX", "SLED1_XRES_SWD_N",
+               "SLED1_CLKREQ_N", "SLED1_PCIE_PWR_EN";
+       };
+
        sled1_ioexp: pca9539@76 {
                compatible = "nxp,pca9539";
                reg = <0x76>;
                        op-sink-microwatt = <10000000>;
                };
        };
+
+       eeprom@54 {
+               compatible = "atmel,24c64";
+               reg = <0x54>;
+       };
 };
 
 &i2c1 {
                reg = <0x4f>;
        };
 
+       sled2_ioexp41: pca9536@41 {
+               compatible = "nxp,pca9536";
+               reg = <0x41>;
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               gpio-line-names =
+               "SLED2_SWD_MUX", "SLED2_XRES_SWD_N",
+               "SLED2_CLKREQ_N", "SLED2_PCIE_PWR_EN";
+       };
+
        sled2_ioexp: pca9539@76 {
                compatible = "nxp,pca9539";
                reg = <0x76>;
                        op-sink-microwatt = <10000000>;
                };
        };
+
+       eeprom@54 {
+               compatible = "atmel,24c64";
+               reg = <0x54>;
+       };
 };
 
 &i2c2 {
                reg = <0x4f>;
        };
 
+       sled3_ioexp41: pca9536@41 {
+               compatible = "nxp,pca9536";
+               reg = <0x41>;
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               gpio-line-names =
+               "SLED3_SWD_MUX", "SLED3_XRES_SWD_N",
+               "SLED3_CLKREQ_N", "SLED3_PCIE_PWR_EN";
+       };
+
        sled3_ioexp: pca9539@76 {
                compatible = "nxp,pca9539";
                reg = <0x76>;
                        op-sink-microwatt = <10000000>;
                };
        };
+
+       eeprom@54 {
+               compatible = "atmel,24c64";
+               reg = <0x54>;
+       };
 };
 
 &i2c3 {
                reg = <0x4f>;
        };
 
+       sled4_ioexp41: pca9536@41 {
+               compatible = "nxp,pca9536";
+               reg = <0x41>;
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               gpio-line-names =
+               "SLED4_SWD_MUX", "SLED4_XRES_SWD_N",
+               "SLED4_CLKREQ_N", "SLED4_PCIE_PWR_EN";
+       };
+
        sled4_ioexp: pca9539@76 {
                compatible = "nxp,pca9539";
                reg = <0x76>;
                        op-sink-microwatt = <10000000>;
                };
        };
+
+       eeprom@54 {
+               compatible = "atmel,24c64";
+               reg = <0x54>;
+       };
 };
 
 &i2c4 {
                reg = <0x4f>;
        };
 
+       sled5_ioexp41: pca9536@41 {
+               compatible = "nxp,pca9536";
+               reg = <0x41>;
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               gpio-line-names =
+               "SLED5_SWD_MUX", "SLED5_XRES_SWD_N",
+               "SLED5_CLKREQ_N", "SLED5_PCIE_PWR_EN";
+       };
+
        sled5_ioexp: pca9539@76 {
                compatible = "nxp,pca9539";
                reg = <0x76>;
                        op-sink-microwatt = <10000000>;
                };
        };
+
+       eeprom@54 {
+               compatible = "atmel,24c64";
+               reg = <0x54>;
+       };
 };
 
 &i2c5 {
                reg = <0x4f>;
        };
 
+       sled6_ioexp41: pca9536@41 {
+               compatible = "nxp,pca9536";
+               reg = <0x41>;
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               gpio-line-names =
+               "SLED6_SWD_MUX", "SLED6_XRES_SWD_N",
+               "SLED6_CLKREQ_N", "SLED6_PCIE_PWR_EN";
+       };
+
        sled6_ioexp: pca9539@76 {
                compatible = "nxp,pca9539";
                reg = <0x76>;
                        op-sink-microwatt = <10000000>;
                };
        };
+
+       eeprom@54 {
+               compatible = "atmel,24c64";
+               reg = <0x54>;
+       };
 };
 
 &i2c6 {
                compatible = "adi,adm1278";
                reg = <0x11>;
                shunt-resistor-micro-ohms = <300>;
+               adi,volt-curr-sample-average = <128>;
+               adi,power-sample-average = <128>;
        };
 
        tmp421@4c {
 &gpio0 {
        gpio-line-names =
        /*A0-A7*/       "","","","","","","","",
-       /*B0-B7*/       "","","SEL_SPI2_MUX","SPI2_MUX1",
-                       "SPI2_MUX2","SPI2_MUX3","","",
+       /*B0-B7*/       "FUSB302_SLED1_INT_N","FUSB302_SLED2_INT_N",
+                       "SEL_SPI2_MUX","SPI2_MUX1",
+                       "SPI2_MUX2","SPI2_MUX3",
+                       "","FUSB302_SLED3_INT_N",
        /*C0-C7*/       "","","","","","","","",
        /*D0-D7*/       "","","","","","","","",
        /*E0-E7*/       "","","","","","","","",
-       /*F0-F7*/       "","","","","","","","",
-       /*G0-G7*/       "BSM_FRU_WP","SWITCH_FRU_MUX","","",
+       /*F0-F7*/       "BMC_SLED1_STCK","BMC_SLED2_STCK",
+                       "BMC_SLED3_STCK","BMC_SLED4_STCK",
+                       "BMC_SLED5_STCK","BMC_SLED6_STCK",
+                       "","",
+       /*G0-G7*/       "BSM_FRU_WP","SWITCH_FRU_MUX","","FM_SOL_UART_CH_SEL",
                        "PWRGD_P1V05_VDDCORE","PWRGD_P1V5_VDD","","",
        /*H0-H7*/       "presence-riser1","presence-riser2",
                        "presence-sled1","presence-sled2",
                        "presence-sled3","presence-sled4",
                        "presence-sled5","presence-sled6",
-       /*I0-I7*/       "REV_ID0","","REV_ID1","REV_ID2",
-                       "","BSM_FLASH_WP_STATUS","BMC_TPM_PRES","",
+       /*I0-I7*/       "REV_ID0","",
+                       "REV_ID1","REV_ID2",
+                       "","BSM_FLASH_WP_STATUS",
+                       "BMC_TPM_PRES_N","FUSB302_SLED6_INT_N",
        /*J0-J7*/       "","","","","","","","",
        /*K0-K7*/       "","","","","","","","",
        /*L0-L7*/       "","","","","","BMC_RTC_INT","","",
-       /*M0-M7*/       "ALERT_SLED1","ALERT_SLED2",
-                       "ALERT_SLED3","ALERT_SLED4",
-                       "ALERT_SLED5","ALERT_SLED6",
-                       "P12V_AUX_ALERT1","",
-       /*N0-N7*/       "","","","","","","","",
+       /*M0-M7*/       "ALERT_SLED1_N","ALERT_SLED2_N",
+                       "ALERT_SLED3_N","ALERT_SLED4_N",
+                       "ALERT_SLED5_N","ALERT_SLED6_N",
+                       "","",
+       /*N0-N7*/       "LED_POSTCODE_0","LED_POSTCODE_1",
+                       "LED_POSTCODE_2","LED_POSTCODE_3",
+                       "LED_POSTCODE_4","LED_POSTCODE_5",
+                       "LED_POSTCODE_5","LED_POSTCODE_7",
        /*O0-O7*/       "","","","",
                        "","BOARD_ID0","BOARD_ID1","BOARD_ID2",
        /*P0-P7*/       "","","","","","","","BMC_HEARTBEAT",
        /*Q0-Q7*/       "","","","","","","","",
        /*R0-R7*/       "","","","","","","","",
        /*S0-S7*/       "","","","BAT_DETECT",
-                       "BMC_BT_WP0","BMC_BT_WP1","","",
+                       "BMC_BT_WP0_N","BMC_BT_WP1_N","","FUSB302_SLED4_INT_N",
        /*T0-T7*/       "","","","","","","","",
        /*U0-U7*/       "","","","","","","","",
-       /*V0-V7*/       "PWRGD_CNS_PSU","RST_BMC_MVL","","PSU_PRSNT",
+       /*V0-V7*/       "PWRGD_CNS_PSU","RST_BMC_MVL_N",
+                       "P12V_AUX_ALERT1_N","PSU_PRSNT",
                        "USB2_SEL0_A","USB2_SEL1_A",
                        "USB2_SEL0_B","USB2_SEL1_B",
-       /*W0-W7*/       "RST_FRONT_IOEXP","","","","","","","",
+       /*W0-W7*/       "RST_FRONT_IOEXP_N","","","","","","","",
        /*X0-X7*/       "","","","","","","","",
-       /*Y0-Y7*/       "BMC_SELF_HW_RST","BSM_PRSNT","BSM_FLASH_LATCH","",
+       /*Y0-Y7*/       "BMC_SELF_HW_RST","BSM_PRSNT_N",
+                       "BSM_FLASH_LATCH_N","FUSB302_SLED5_INT_N",
                        "","","","",
        /*Z0-Z7*/       "","","","","","","","";
 };
                &pinctrl_adc14_default &pinctrl_adc15_default>;
 };
 
+&mdio0 {
+       status = "okay";
+       /* TODO: Add Marvell 88E6191X */
+};
+
 &mdio3 {
        status = "okay";
        /* TODO: Add Marvell 88X3310 */
 };
+
+&ehci0 {
+       status = "okay";
+};
index 578f9e2..382da79 100644 (file)
        /*P0-P7*/       "","","","","led-pcieslot-power","","","",
        /*Q0-Q7*/       "","","regulator-standby-faulted","","","","","",
        /*R0-R7*/       "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","I2C_FLASH_MICRO_N","","",
-       /*S0-S7*/       "","","","","","","","",
+       /*S0-S7*/       "","","","","power-ffs-sync-history","","","",
        /*T0-T7*/       "","","","","","","","",
        /*U0-U7*/       "","","","","","","","",
        /*V0-V7*/       "","BMC_3RESTART_ATTEMPT_P","","","","","","",
index 528b49e..7213434 100644 (file)
        /*Q0-Q7*/       "cfam-reset","","regulator-standby-faulted","","","","","",
        /*R0-R7*/       "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","","","",
        /*S0-S7*/       "presence-ps0","presence-ps1","presence-ps2","presence-ps3",
-                               "","","","",
+       "power-ffs-sync-history","","","",
        /*T0-T7*/       "","","","","","","","",
        /*U0-U7*/       "","","","","","","","",
        /*V0-V7*/       "","","","","","","","",
diff --git a/arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts b/arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts
new file mode 100644 (file)
index 0000000..f4a97cf
--- /dev/null
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+
+/ {
+       model = "Nuvia DC-SCM BMC";
+       compatible = "nuvia,dc-scm-bmc", "aspeed,ast2600";
+
+       aliases {
+               serial4 = &uart5;
+       };
+
+       chosen {
+               stdout-path = &uart5;
+               bootargs = "console=ttyS4,115200n8";
+       };
+
+       memory@80000000 {
+               device_type = "memory";
+               reg = <0x80000000 0x40000000>;
+       };
+};
+
+&mdio3 {
+       status = "okay";
+
+       ethphy3: ethernet-phy@1 {
+               compatible = "ethernet-phy-ieee802.3-c22";
+               reg = <1>;
+       };
+};
+
+&mac2 {
+       status = "okay";
+
+       /* Bootloader sets up the MAC to insert delay */
+       phy-mode = "rgmii";
+       phy-handle = <&ethphy3>;
+
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_rgmii3_default>;
+};
+
+&mac3 {
+       status = "okay";
+
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_rmii4_default>;
+
+       use-ncsi;
+};
+
+&rtc {
+       status = "okay";
+};
+
+&fmc {
+       status = "okay";
+
+       flash@0 {
+               status = "okay";
+               m25p,fast-read;
+               label = "bmc";
+               spi-max-frequency = <133000000>;
+#include "openbmc-flash-layout-64.dtsi"
+       };
+
+       flash@1 {
+               status = "okay";
+               m25p,fast-read;
+               label = "alt-bmc";
+               spi-max-frequency = <133000000>;
+#include "openbmc-flash-layout-64-alt.dtsi"
+       };
+};
+
+&spi1 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_spi1_default>;
+
+       flash@0 {
+               status = "okay";
+               m25p,fast-read;
+               label = "bios";
+               spi-max-frequency = <133000000>;
+       };
+};
+
+&gpio0 {
+       gpio-line-names =
+       /*A0-A7*/       "","","","","","","","",
+       /*B0-B7*/       "BMC_FLASH_MUX_SEL","","","","","","","",
+       /*C0-C7*/       "","","","","","","","",
+       /*D0-D7*/       "","","","","","","","",
+       /*E0-E7*/       "","","","","","","","",
+       /*F0-F7*/       "","","","","","","","",
+       /*G0-G7*/       "","","","","","","","",
+       /*H0-H7*/       "","","","","","","","",
+       /*I0-I7*/       "","","","","","","","",
+       /*J0-J7*/       "","","","","","","","",
+       /*K0-K7*/       "","","","","","","","",
+       /*L0-L7*/       "","","","","","","","",
+       /*M0-M7*/       "","","","","","","","",
+       /*N0-N7*/       "BMC_FWSPI_RST_N","","GPIO_1_BMC_3V3","","","","","",
+       /*O0-O7*/       "JTAG_MUX_A","JTAG_MUX_B","","","","","","",
+       /*P0-P7*/       "","","","","","","","",
+       /*Q0-Q7*/       "","","","","","","","",
+       /*R0-R7*/       "","","","","","","","",
+       /*S0-S7*/       "","","","","","","","",
+       /*T0-T7*/       "","","","","","","","",
+       /*U0-U7*/       "","","","","","","","",
+       /*V0-V7*/       "","","","SCMFPGA_SPARE_GPIO1_3V3",
+                       "SCMFPGA_SPARE_GPIO2_3V3","SCMFPGA_SPARE_GPIO3_3V3",
+                       "SCMFPGA_SPARE_GPIO4_3V3","SCMFPGA_SPARE_GPIO5_3V3",
+       /*W0-W7*/       "","","","","","","","",
+       /*X0-X7*/       "","","","","","","","",
+       /*Y0-Y7*/       "","","","","","","","",
+       /*Z0-Z7*/       "","","","","","","","",
+       /*AA0-AA7*/     "","","","","","","","",
+       /*AB0-AB7*/     "","","","","","","","",
+       /*AC0-AC7*/     "","","","","","","","";
+};
+
+&gpio1 {
+       gpio-line-names =
+       /*A0-A7*/       "GPI_1_BMC_1V8","","","","","",
+                       "SCMFPGA_SPARE_GPIO1_1V8","SCMFPGA_SPARE_GPIO2_1V8",
+       /*B0-B7*/       "SCMFPGA_SPARE_GPIO3_1V8","SCMFPGA_SPARE_GPIO4_1V8",
+                       "SCMFPGA_SPARE_GPIO5_1V8","","","","","",
+       /*C0-C7*/       "","","","","","","","",
+       /*D0-D7*/       "","BMC_SPI1_RST_N","BIOS_FLASH_MUX_SEL","",
+                       "","TPM2_PIRQ_N","TPM2_RST_N","",
+       /*E0-E7*/       "","","","","","","","";
+};
+
+&i2c2 {
+       status = "okay";
+};
+
+&i2c4 {
+       status = "okay";
+};
+
+&i2c5 {
+       status = "okay";
+};
+
+&i2c6 {
+       status = "okay";
+};
+
+&i2c7 {
+       status = "okay";
+};
+
+&i2c8 {
+       status = "okay";
+};
+
+&i2c9 {
+       status = "okay";
+};
+
+&i2c10 {
+       status = "okay";
+};
+
+&i2c12 {
+       status = "okay";
+};
+
+&i2c13 {
+       status = "okay";
+};
+
+&i2c14 {
+       status = "okay";
+};
+
+&i2c15 {
+       status = "okay";
+};
+
+&vhub {
+       status = "okay";
+};
index fa8b581..530491a 100644 (file)
@@ -54,8 +54,7 @@
                ranges;
 
                fmc: spi@1e620000 {
-                       reg = < 0x1e620000 0x94
-                               0x20000000 0x10000000 >;
+                       reg = <0x1e620000 0x94>, <0x20000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2400-fmc";
                        flash@0 {
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
+                               spi-rx-bus-width = <2>;
                                spi-max-frequency = <50000000>;
                                status = "disabled";
                        };
                        flash@1 {
                                reg = < 1 >;
                                compatible = "jedec,spi-nor";
+                               spi-rx-bus-width = <2>;
+                               spi-max-frequency = <50000000>;
                                status = "disabled";
                        };
                        flash@2 {
                                reg = < 2 >;
                                compatible = "jedec,spi-nor";
+                               spi-rx-bus-width = <2>;
+                               spi-max-frequency = <50000000>;
                                status = "disabled";
                        };
                        flash@3 {
                                reg = < 3 >;
                                compatible = "jedec,spi-nor";
+                               spi-rx-bus-width = <2>;
+                               spi-max-frequency = <50000000>;
                                status = "disabled";
                        };
                        flash@4 {
                                reg = < 4 >;
                                compatible = "jedec,spi-nor";
+                               spi-rx-bus-width = <2>;
+                               spi-max-frequency = <50000000>;
                                status = "disabled";
                        };
                };
 
                spi: spi@1e630000 {
-                       reg = < 0x1e630000 0x18
-                               0x30000000 0x10000000 >;
+                       reg = <0x1e630000 0x18>, <0x30000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2400-spi";
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                };
index 4147b39..c89092c 100644 (file)
@@ -55,8 +55,7 @@
                ranges;
 
                fmc: spi@1e620000 {
-                       reg = < 0x1e620000 0xc4
-                               0x20000000 0x10000000 >;
+                       reg = <0x1e620000 0xc4>, <0x20000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2500-fmc";
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@1 {
                                reg = < 1 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@2 {
                                reg = < 2 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                };
 
                spi1: spi@1e630000 {
-                       reg = < 0x1e630000 0xc4
-                               0x30000000 0x08000000 >;
+                       reg = <0x1e630000 0xc4>, <0x30000000 0x08000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2500-spi";
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@1 {
                                reg = < 1 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                };
 
                spi2: spi@1e631000 {
-                       reg = < 0x1e631000 0xc4
-                               0x38000000 0x08000000 >;
+                       reg = <0x1e631000 0xc4>, <0x38000000 0x08000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2500-spi";
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@1 {
                                reg = < 1 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                };
index 3c10116..6660564 100644 (file)
@@ -95,8 +95,7 @@
                        };
 
                fmc: spi@1e620000 {
-                       reg = < 0x1e620000 0xc4
-                               0x20000000 0x10000000 >;
+                       reg = <0x1e620000 0xc4>, <0x20000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2600-fmc";
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@1 {
                                reg = < 1 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@2 {
                                reg = < 2 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                };
 
                spi1: spi@1e630000 {
-                       reg = < 0x1e630000 0xc4
-                               0x30000000 0x10000000 >;
+                       reg = <0x1e630000 0xc4>, <0x30000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2600-spi";
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@1 {
                                reg = < 1 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                };
 
                spi2: spi@1e631000 {
-                       reg = < 0x1e631000 0xc4
-                               0x50000000 0x10000000 >;
+                       reg = <0x1e631000 0xc4>, <0x50000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "aspeed,ast2600-spi";
                                reg = < 0 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@1 {
                                reg = < 1 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                        flash@2 {
                                reg = < 2 >;
                                compatible = "jedec,spi-nor";
                                spi-max-frequency = <50000000>;
+                               spi-rx-bus-width = <2>;
                                status = "disabled";
                        };
                };
                        status = "disabled";
                };
 
+               udc: usb@1e6a2000 {
+                       compatible = "aspeed,ast2600-udc";
+                       reg = <0x1e6a2000 0x300>;
+                       interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&syscon ASPEED_CLK_GATE_USBPORT2CLK>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_usb2bd_default>;
+                       status = "disabled";
+               };
+
                apb {
                        compatible = "simple-bus";
                        #address-cells = <1>;
                                quality = <100>;
                        };
 
+                       gfx: display@1e6e6000 {
+                               compatible = "aspeed,ast2600-gfx", "syscon";
+                               reg = <0x1e6e6000 0x1000>;
+                               reg-io-width = <4>;
+                               clocks = <&syscon ASPEED_CLK_GATE_D1CLK>;
+                               resets = <&syscon ASPEED_RESET_GRAPHICS>;
+                               syscon = <&syscon>;
+                               status = "disabled";
+                               interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+                       };
+
                        xdma: xdma@1e6e7000 {
                                compatible = "aspeed,ast2600-xdma";
                                reg = <0x1e6e7000 0x100>;
index d83f76a..1035446 100644 (file)
@@ -14,6 +14,7 @@
 #include <dt-bindings/mfd/atmel-flexcom.h>
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/pinctrl/at91.h>
+#include <dt-bindings/sound/microchip,pdmc.h>
 
 / {
        model = "Microchip SAMA7G5-EK";
                     &pinctrl_gmac1_mdio_default
                     &pinctrl_gmac1_phy_irq>;
        phy-mode = "rmii";
-       status = "okay";
+       status = "okay"; /* Conflict with pdmc0. */
 
        ethernet-phy@0 {
                reg = <0x0>;
        pinctrl-0 = <&pinctrl_i2s0_default>;
 };
 
+&pdmc0 {
+       #sound-dai-cells = <0>;
+       microchip,mic-pos = <MCHP_PDMC_DS0 MCHP_PDMC_CLK_NEGATIVE>, /* MIC 1 */
+                           <MCHP_PDMC_DS1 MCHP_PDMC_CLK_NEGATIVE>, /* MIC 2 */
+                           <MCHP_PDMC_DS0 MCHP_PDMC_CLK_POSITIVE>, /* MIC 3 */
+                           <MCHP_PDMC_DS1 MCHP_PDMC_CLK_POSITIVE>; /* MIC 4 */
+       status = "disabled"; /* Conflict with gmac1. */
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_pdmc0_default>;
+};
+
 &pioA {
 
        pinctrl_can0_default: can0_default {
                bias-disable;
        };
 
+       pinctrl_pdmc0_default: pdmc0_default {
+               pinmux = <PIN_PD23__PDMC0_DS0>,
+                        <PIN_PD24__PDMC0_DS1>,
+                        <PIN_PD22__PDMC0_CLK>;
+               bias_disable;
+       };
+
        pinctrl_qspi: qspi {
                pinmux = <PIN_PB12__QSPI0_IO0>,
                         <PIN_PB11__QSPI0_IO1>,
index 8f11c0b..6fb4fe4 100644 (file)
                                status = "okay";
                        };
 
+                       rtc@fffffd20 {
+                               atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+                       };
+
                        watchdog@fffffd40 {
                                status = "okay";
                        };
index 42e7340..e732565 100644 (file)
                                };
                        };
 
+                       rtc@fffffd20 {
+                               atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+                       };
+
                        watchdog@fffffd40 {
                                status = "okay";
                        };
index d74b8d9..ddaadfe 100644 (file)
                                status = "okay";
                        };
 
+                       rtc@fffffd20 {
+                               atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+                       };
+
                        rtc@fffffe00 {
                                status = "okay";
                        };
index c3942b4..0386376 100644 (file)
                                            "scheduler", "queuemgr";
                                interrupts = <58>;
                                #dma-cells = <2>;
+                               /* For backwards compatibility: */
                                #dma-channels = <4>;
+                               dma-channels = <4>;
                                power-domains = <&psc1 1>;
                                status = "okay";
                        };
diff --git a/arch/arm/boot/dts/hpe-bmc-dl360gen10.dts b/arch/arm/boot/dts/hpe-bmc-dl360gen10.dts
new file mode 100644 (file)
index 0000000..3a7382c
--- /dev/null
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for HPE DL360Gen10
+ */
+
+/include/ "hpe-gxp.dtsi"
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "hpe,gxp-dl360gen10", "hpe,gxp";
+       model = "Hewlett Packard Enterprise ProLiant dl360 Gen10";
+
+       aliases {
+               serial0 = &uartc;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
+       memory@40000000 {
+               device_type = "memory";
+               reg = <0x40000000 0x20000000>;
+       };
+};
diff --git a/arch/arm/boot/dts/hpe-gxp.dtsi b/arch/arm/boot/dts/hpe-gxp.dtsi
new file mode 100644 (file)
index 0000000..cf735b3
--- /dev/null
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for HPE GXP
+ */
+
+/dts-v1/;
+/ {
+       model = "Hewlett Packard Enterprise GXP BMC";
+       compatible = "hpe,gxp";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a9";
+                       reg = <0>;
+                       device_type = "cpu";
+                       next-level-cache = <&L2>;
+               };
+       };
+
+       clocks {
+               pll: clock-0 {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <1600000000>;
+               };
+
+               iopclk: clock-1 {
+                       compatible = "fixed-factor-clock";
+                       #clock-cells = <0>;
+                       clock-div = <4>;
+                       clock-mult = <1>;
+                       clocks = <&pll>;
+               };
+       };
+
+       axi {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+               dma-ranges;
+
+               L2: cache-controller@b0040000 {
+                       compatible = "arm,pl310-cache";
+                       reg = <0xb0040000 0x1000>;
+                       cache-unified;
+                       cache-level = <2>;
+               };
+
+               ahb@c0000000 {
+                       compatible = "simple-bus";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0x0 0xc0000000 0x30000000>;
+                       dma-ranges;
+
+                       vic0: interrupt-controller@eff0000 {
+                               compatible = "arm,pl192-vic";
+                               reg = <0xeff0000 0x1000>;
+                               interrupt-controller;
+                               #interrupt-cells = <1>;
+                       };
+
+                       vic1: interrupt-controller@80f00000 {
+                               compatible = "arm,pl192-vic";
+                               reg = <0x80f00000 0x1000>;
+                               interrupt-controller;
+                               #interrupt-cells = <1>;
+                       };
+
+                       uarta: serial@e0 {
+                               compatible = "ns16550a";
+                               reg = <0xe0 0x8>;
+                               interrupts = <17>;
+                               interrupt-parent = <&vic0>;
+                               clock-frequency = <1846153>;
+                               reg-shift = <0>;
+                       };
+
+                       uartb: serial@e8 {
+                               compatible = "ns16550a";
+                               reg = <0xe8 0x8>;
+                               interrupts = <18>;
+                               interrupt-parent = <&vic0>;
+                               clock-frequency = <1846153>;
+                               reg-shift = <0>;
+                       };
+
+                       uartc: serial@f0 {
+                               compatible = "ns16550a";
+                               reg = <0xf0 0x8>;
+                               interrupts = <19>;
+                               interrupt-parent = <&vic0>;
+                               clock-frequency = <1846153>;
+                               reg-shift = <0>;
+                       };
+
+                       usb0: usb@efe0000 {
+                               compatible = "hpe,gxp-ehci", "generic-ehci";
+                               reg = <0xefe0000 0x100>;
+                               interrupts = <7>;
+                               interrupt-parent = <&vic0>;
+                       };
+
+                       st: timer@80 {
+                               compatible = "hpe,gxp-timer";
+                               reg = <0x80 0x16>;
+                               interrupts = <0>;
+                               interrupt-parent = <&vic0>;
+                               clocks = <&iopclk>;
+                               clock-names = "iop";
+                       };
+
+                       usb1: usb@efe0100 {
+                               compatible = "hpe,gxp-ohci", "generic-ohci";
+                               reg = <0xefe0100 0x110>;
+                               interrupts = <6>;
+                               interrupt-parent = <&vic0>;
+                       };
+               };
+       };
+};
index 46984d4..987d792 100644 (file)
                                compatible = "marvell,pdma-1.0";
                                reg = <0xd4000000 0x10000>;
                                interrupts = <48>;
+                               /* For backwards compatibility: */
                                #dma-channels = <16>;
+                               dma-channels = <16>;
                                status = "disabled";
                        };
 
index a248bf0..5f8300e 100644 (file)
                        compatible = "marvell,pdma-1.0";
                        reg = <0x40000000 0x10000>;
                        interrupts = <25>;
-                       #dma-channels = <16>;
                        #dma-cells = <2>;
+                       /* For backwards compatibility: */
+                       #dma-channels = <16>;
+                       dma-channels = <16>;
                        #dma-requests = <40>;
+                       dma-requests = <40>;
                        status = "okay";
                };
 
index ccbecad..a2cbfb3 100644 (file)
                        compatible = "marvell,pdma-1.0";
                        reg = <0x40000000 0x10000>;
                        interrupts = <25>;
-                       #dma-channels = <32>;
                        #dma-cells = <2>;
+                       /* For backwards compatibility: */
+                       #dma-channels = <32>;
+                       dma-channels = <32>;
                        #dma-requests = <75>;
+                       dma-requests = <75>;
                        status = "okay";
                };
 
index d196748..f9c216f 100644 (file)
                        compatible = "marvell,pdma-1.0";
                        reg = <0x40000000 0x10000>;
                        interrupts = <25>;
-                       #dma-channels = <32>;
                        #dma-cells = <2>;
+                       /* For backwards compatibility: */
+                       #dma-channels = <32>;
+                       dma-channels = <32>;
                        #dma-requests = <100>;
+                       dma-requests = <100>;
                        status = "okay";
                };
 
index 9d5e934..c5da723 100644 (file)
                        clocks = <&gcc GCC_USB3_MASTER_CLK>,
                                 <&gcc GCC_USB3_SLEEP_CLK>,
                                 <&gcc GCC_USB3_MOCK_UTMI_CLK>;
-                       clock-names = "master", "sleep", "mock_utmi";
+                       clock-names = "core", "sleep", "mock_utmi";
                        ranges;
                        status = "disabled";
 
index 1233907..1c2b208 100644 (file)
                        clocks = <&gcc GCC_USB30_SLV_AHB_CLK>,
                                 <&gcc GCC_USB30_MASTER_CLK>,
                                 <&gcc GCC_USB30_MSTR_AXI_CLK>,
-                                <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_SLEEP_CLK>,
+                                <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_MASTER_CLK>;
index 242ce42..9b0f049 100644 (file)
        cru: clock-controller@20000000 {
                compatible = "rockchip,rk3036-cru";
                reg = <0x20000000 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                rockchip,grf = <&grf>;
                #clock-cells = <1>;
                #reset-cells = <1>;
index c25b969..de9915d 100644 (file)
        cru: clock-controller@20000000 {
                compatible = "rockchip,rk3066a-cru";
                reg = <0x20000000 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                rockchip,grf = <&grf>;
-
                #clock-cells = <1>;
                #reset-cells = <1>;
                assigned-clocks = <&cru PLL_CPLL>, <&cru PLL_GPLL>,
index a94321e..cdd4a0b 100644 (file)
        cru: clock-controller@20000000 {
                compatible = "rockchip,rk3188-cru";
                reg = <0x20000000 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                rockchip,grf = <&grf>;
-
                #clock-cells = <1>;
                #reset-cells = <1>;
        };
index 6513ffc..ffc16d6 100644 (file)
        cru: clock-controller@110e0000 {
                compatible = "rockchip,rk3228-cru";
                reg = <0x110e0000 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                rockchip,grf = <&grf>;
                #clock-cells = <1>;
                #reset-cells = <1>;
index 26b9bbe..487b0e0 100644 (file)
        cru: clock-controller@ff760000 {
                compatible = "rockchip,rk3288-cru";
                reg = <0x0 0xff760000 0x0 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                rockchip,grf = <&grf>;
                #clock-cells = <1>;
                #reset-cells = <1>;
index 4482549..c158a7e 100644 (file)
                #clock-cells = <0>;
        };
 
-       amba: bus {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               pdma: pdma@102a0000 {
-                       compatible = "arm,pl330", "arm,primecell";
-                       reg = <0x102a0000 0x4000>;
-                       interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
-                       #dma-cells = <1>;
-                       arm,pl330-broken-no-flushp;
-                       arm,pl330-periph-burst;
-                       clocks = <&cru ACLK_DMAC>;
-                       clock-names = "apb_pclk";
-               };
-       };
-
        bus_intmem: sram@10080000 {
                compatible = "mmio-sram";
                reg = <0x10080000 0x2000>;
                status = "disabled";
        };
 
+       pdma: dma-controller@102a0000 {
+               compatible = "arm,pl330", "arm,primecell";
+               reg = <0x102a0000 0x4000>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+               #dma-cells = <1>;
+               arm,pl330-broken-no-flushp;
+               arm,pl330-periph-burst;
+               clocks = <&cru ACLK_DMAC>;
+               clock-names = "apb_pclk";
+       };
+
        grf: syscon@10300000 {
                compatible = "rockchip,rv1108-grf", "syscon", "simple-mfd";
                reg = <0x10300000 0x1000>;
        cru: clock-controller@20200000 {
                compatible = "rockchip,rv1108-cru";
                reg = <0x20200000 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                rockchip,grf = <&grf>;
                #clock-cells = <1>;
                #reset-cells = <1>;
index 998629a..c328b67 100644 (file)
                                status = "disabled";
                        };
 
-                       rtt: rtt@fffffe20 {
+                       rtt: rtc@fffffe20 {
                                compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
                                reg = <0xfffffe20 0x20>;
                                interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
index b632631..a37e3a8 100644 (file)
                        status = "disabled";
                };
 
-               rtt: rtt@e001d020 {
+               rtt: rtc@e001d020 {
                        compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
                        reg = <0xe001d020 0x30>;
                        interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
 
+               pdmc0: sound@e1608000 {
+                       compatible = "microchip,sama7g5-pdmc";
+                       reg = <0xe1608000 0x1000>;
+                       interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+                       #sound-dai-cells = <0>;
+                       dmas = <&dma0 AT91_XDMAC_DT_PERID(37)>;
+                       dma-names = "rx";
+                       clocks = <&pmc PMC_TYPE_PERIPHERAL 68>, <&pmc PMC_TYPE_GCK 68>;
+                       clock-names = "pclk", "gclk";
+                       status = "disabled";
+               };
+
+               pdmc1: sound@e160c000 {
+                       compatible = "microchip,sama7g5-pdmc";
+                       reg = <0xe160c000 0x1000>;
+                       interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+                       #sound-dai-cells = <0>;
+                       dmas = <&dma0 AT91_XDMAC_DT_PERID(38)>;
+                       dma-names = "rx";
+                       clocks = <&pmc PMC_TYPE_PERIPHERAL 69>, <&pmc PMC_TYPE_GCK 69>;
+                       clock-names = "pclk", "gclk";
+                       status = "disabled";
+               };
+
                spdifrx: spdifrx@e1614000 {
                        #sound-dai-cells = <0>;
                        compatible = "microchip,sama7g5-spdifrx";
index 2a74552..11ccdc6 100644 (file)
@@ -9,7 +9,7 @@
 &qspi {
        status = "okay";
 
-       flash0: n25q00@0 {
+       flash0: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "micron,mt25qu02g", "jedec,spi-nor";
index 253ef13..b224120 100644 (file)
 &qspi {
        status = "okay";
 
-       flash0: n25q00@0 {
+       flash0: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "micron,mt25qu02g", "jedec,spi-nor";
index b0003f3..2564671 100644 (file)
 &qspi {
        status = "okay";
 
-       flash0: n25q512a@0 {
+       flash0: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "micron,n25q512a", "jedec,spi-nor";
index 25874e1..f24f17c 100644 (file)
 &qspi {
        status = "okay";
 
-       n25q128@0 {
+       flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "micron,n25q128", "jedec,spi-nor";
                cdns,tslch-ns = <4>;
        };
 
-       n25q00@1 {
+       flash@1 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "micron,mt25qu02g", "jedec,spi-nor";
index 24d21ba..da30a4d 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/spinlock.h>
 #include <linux/io.h>
 
-#include <mach/hardware.h>
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
 
index 5367f03..2343e2b 100644 (file)
 #include <linux/clk.h>
 #include <linux/io.h>
 
-#include <mach/hardware.h>
 #include <asm/mach/irq.h>
 #include <asm/mach-types.h>
 #include <linux/sizes.h>
 
 #include <asm/hardware/sa1111.h>
 
+#ifdef CONFIG_ARCH_SA1100
+#include <mach/hardware.h>
+#endif
+
 /* SA1111 IRQs */
 #define IRQ_GPAIN0             (0)
 #define IRQ_GPAIN1             (1)
index 4e49d6c..9252ce0 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_ARCH_GUMSTIX=y
 CONFIG_PCCARD=y
index 45769d0..bb0fcd8 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_MACH_CM_X300=y
index 52bad9a..b29898f 100644 (file)
@@ -16,6 +16,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_COLIBRI=y
 CONFIG_PREEMPT=y
index 26e5a67..f9d1102 100644 (file)
@@ -1,6 +1,7 @@
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_COLIBRI300=y
 CONFIG_AEABI=y
index 15b749f..96c677c 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_PXA_SHARPSL=y
 CONFIG_MACH_POODLE=y
index 046f4dc..2146adc 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_ARCH_PXA_ESERIES=y
 # CONFIG_ARM_THUMB is not set
index 0788a89..5d000c8 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_PXA_EZX=y
 CONFIG_NO_HZ=y
index f5a338f..a67d602 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_H5000=y
 CONFIG_AEABI=y
index 3a4d0e6..5c0a671 100644 (file)
@@ -2,6 +2,7 @@ CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SLAB=y
 CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_LOGICPD_PXA270=y
 # CONFIG_ARM_THUMB is not set
index 4ce2da2..cf49dc1 100644 (file)
@@ -1,6 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_ARCH_LUBBOCK=y
 # CONFIG_ARM_THUMB is not set
index abde1fb..13da808 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_H4700=y
 CONFIG_MACH_MAGICIAN=y
index 26499b6..03b4c61 100644 (file)
@@ -1,6 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_MAINSTONE=y
 # CONFIG_ARM_THUMB is not set
index 5479181..ce9826b 100644 (file)
@@ -17,6 +17,7 @@ CONFIG_SOC_SAMA5D2=y
 CONFIG_SOC_SAMA5D3=y
 CONFIG_SOC_SAMA5D4=y
 CONFIG_SOC_SAMA7G5=y
+CONFIG_SOC_LAN966=y
 CONFIG_ARCH_BCM=y
 CONFIG_ARCH_BCM_CYGNUS=y
 CONFIG_ARCH_BCM_HR2=y
@@ -43,6 +44,8 @@ CONFIG_ARCH_HI3xxx=y
 CONFIG_ARCH_HIP01=y
 CONFIG_ARCH_HIP04=y
 CONFIG_ARCH_HIX5HD2=y
+CONFIG_ARCH_HPE=y
+CONFIG_ARCH_HPE_GXP=y
 CONFIG_ARCH_MXC=y
 CONFIG_SOC_IMX50=y
 CONFIG_SOC_IMX51=y
@@ -277,6 +280,7 @@ CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
 CONFIG_PXA168_ETH=m
 CONFIG_KS8851=y
+CONFIG_LAN966X_SWITCH=m
 CONFIG_R8169=y
 CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
@@ -287,6 +291,7 @@ CONFIG_TI_CPSW=y
 CONFIG_TI_CPSW_SWITCHDEV=y
 CONFIG_TI_CPTS=y
 CONFIG_XILINX_EMACLITE=y
+CONFIG_SFP=m
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
 CONFIG_MARVELL_PHY=y
@@ -294,6 +299,7 @@ CONFIG_AT803X_PHY=y
 CONFIG_ROCKCHIP_PHY=y
 CONFIG_DP83867_PHY=y
 CONFIG_USB_BRCMSTB=m
+CONFIG_MDIO_MSCC_MIIM=m
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_RTL8152=m
 CONFIG_USB_LAN78XX=m
@@ -430,6 +436,7 @@ CONFIG_I2C_CROS_EC_TUNNEL=m
 CONFIG_I2C_SLAVE_EEPROM=y
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=m
+CONFIG_SPI_ATMEL_QUADSPI=m
 CONFIG_SPI_BCM2835=y
 CONFIG_SPI_BCM2835AUX=y
 CONFIG_SPI_CADENCE=y
@@ -459,6 +466,8 @@ CONFIG_SPMI=y
 CONFIG_PTP_1588_CLOCK=y
 CONFIG_PINCTRL_AS3722=y
 CONFIG_PINCTRL_STMFX=y
+CONFIG_PINCTRL_MICROCHIP_SGPIO=y
+CONFIG_PINCTRL_OCELOT=y
 CONFIG_PINCTRL_PALMAS=y
 CONFIG_PINCTRL_OWL=y
 CONFIG_PINCTRL_S500=y
@@ -517,6 +526,7 @@ CONFIG_CHARGER_TPS65090=y
 CONFIG_SENSORS_ARM_SCMI=y
 CONFIG_SENSORS_ASPEED=m
 CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_LAN966X=m
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
 CONFIG_SENSORS_NTC_THERMISTOR=m
@@ -563,6 +573,7 @@ CONFIG_BCM47XX_WDT=y
 CONFIG_BCM2835_WDT=y
 CONFIG_BCM_KONA_WDT=y
 CONFIG_BCM7038_WDT=m
+CONFIG_GXP_WATCHDOG=y
 CONFIG_BCMA_HOST_SOC=y
 CONFIG_BCMA_DRIVER_GMAC_CMN=y
 CONFIG_BCMA_DRIVER_GPIO=y
@@ -767,6 +778,8 @@ CONFIG_SND_ATMEL_SOC_WM8904=m
 CONFIG_SND_ATMEL_SOC_PDMIC=m
 CONFIG_SND_ATMEL_SOC_I2S=m
 CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_IMX_SOC=m
+CONFIG_SND_SOC_FSL_ASOC_CARD=m
 CONFIG_SND_SOC_FSL_SAI=m
 CONFIG_SND_PXA_SOC_SSP=m
 CONFIG_SND_MMP_SOC_SSPA=m
@@ -1018,6 +1031,7 @@ CONFIG_CROS_EC_SPI=m
 CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_COMMON_CLK_RK808=m
 CONFIG_COMMON_CLK_SCMI=y
+CONFIG_COMMON_CLK_LAN966X=y
 CONFIG_COMMON_CLK_S2MPS11=m
 CONFIG_CLK_RASPBERRYPI=y
 CONFIG_COMMON_CLK_QCOM=y
@@ -1145,6 +1159,7 @@ CONFIG_PWM_SUN4I=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
 CONFIG_KEYSTONE_IRQ=y
+CONFIG_RESET_MCHP_SPARX5=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_SUN9I_USB=y
 CONFIG_PHY_HIX5HD2_SATA=y
@@ -1152,6 +1167,7 @@ CONFIG_PHY_BERLIN_SATA=y
 CONFIG_PHY_BERLIN_USB=y
 CONFIG_PHY_BRCM_USB=m
 CONFIG_PHY_MMP3_USB=m
+CONFIG_PHY_LAN966X_SERDES=m
 CONFIG_PHY_CPCAP_USB=m
 CONFIG_PHY_QCOM_APQ8064_SATA=m
 CONFIG_PHY_QCOM_USB_HS=y
index 3148567..14c17a2 100644 (file)
@@ -17,6 +17,9 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_MULTI_V4T=y
+CONFIG_ARCH_MULTI_V5=y
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_OMAP=y
 CONFIG_ARCH_OMAP1=y
 CONFIG_OMAP_RESET_CLOCKS=y
index b47c8ab..e6acb1d 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_ARCH_PXA_PALM=y
 # CONFIG_MACH_PALMTX is not set
index e97a158..106d5be 100644 (file)
@@ -13,6 +13,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_PCM027=y
 CONFIG_MACH_PCM990_BASEBOARD=y
index 4a383af..5663245 100644 (file)
@@ -1,6 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MODULES=y
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_ARCH_PXA_IDP=y
 # CONFIG_ARM_THUMB is not set
index f0c3401..228d427 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_LITTLETON=y
 CONFIG_MACH_TAVOREVB=y
index 29b1f19..1db70df 100644 (file)
@@ -23,6 +23,7 @@ CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_LDM_PARTITION=y
 CONFIG_CMDLINE_PARTITION=y
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_ARCH_LUBBOCK=y
 CONFIG_MACH_MAINSTONE=y
index f42c7a5..43d079e 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_PXA_SHARPSL=y
 CONFIG_MACH_AKITA=y
index d66f0c2..baeba46 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_TRIZEPS_PXA=y
 CONFIG_MACH_TRIZEPS4=y
index c28539b..7c10297 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_ARCH_VIPER=y
 CONFIG_IWMMXT=y
index 4d8e7f2..3752672 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLOCK is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_XCEP=y
 CONFIG_IWMMXT=y
index 25bb699..03a12fb 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_LOG_BUF_SHIFT=13
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_PXA=y
 CONFIG_MACH_ARCOM_ZEUS=y
 CONFIG_PCCARD=m
index 2e70db6..d8c6f8a 100644 (file)
@@ -13,8 +13,6 @@
 #ifndef _ASM_ARCH_SA1111
 #define _ASM_ARCH_SA1111
 
-#include <mach/bitfield.h>
-
 /*
  * Don't ask the (SAC) DMA engines to move less than this amount.
  */
index 2a0739a..eba7cbc 100644 (file)
@@ -174,7 +174,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
 #define PCI_IO_VIRT_BASE       0xfee00000
 #define PCI_IOBASE             ((void __iomem *)PCI_IO_VIRT_BASE)
 
-#if defined(CONFIG_PCI)
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
 void pci_ioremap_set_mem_type(int mem_type);
 #else
 static inline void pci_ioremap_set_mem_type(int mem_type) {}
@@ -200,32 +200,13 @@ void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
  */
 #ifdef CONFIG_NEED_MACH_IO_H
 #include <mach/io.h>
-#elif defined(CONFIG_PCI)
-#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
-#define __io(a)                __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
 #else
-#define __io(a)                __typesafe_io((a) & IO_SPACE_LIMIT)
-#endif
-
-/*
- * This is the limit of PC card/PCI/ISA IO space, which is by default
- * 64K if we have PC card, PCI or ISA support.  Otherwise, default to
- * zero to prevent ISA/PCI drivers claiming IO space (and potentially
- * oopsing.)
- *
- * Only set this larger if you really need inb() et.al. to operate over
- * a larger address space.  Note that SOC_COMMON ioremaps each sockets
- * IO space area, and so inb() et.al. must be defined to operate as per
- * readb() et.al. on such platforms.
- */
-#ifndef IO_SPACE_LIMIT
-#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
-#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
-#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
+#if IS_ENABLED(CONFIG_PCMCIA) || defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
 #else
 #define IO_SPACE_LIMIT ((resource_size_t)0)
 #endif
+#define __io(a)                __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
 #endif
 
 /*
index 0617af1..3d9cace 100644 (file)
@@ -238,9 +238,11 @@ void release_thread(struct task_struct *dead_task)
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
-               unsigned long stk_sz, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long stack_start = args->stack;
+       unsigned long tls = args->tls;
        struct thread_info *thread = task_thread_info(p);
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -256,15 +258,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        thread->cpu_domain = get_domain();
 #endif
 
-       if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
+       if (likely(!args->fn)) {
                *childregs = *current_pt_regs();
                childregs->ARM_r0 = 0;
                if (stack_start)
                        childregs->ARM_sp = stack_start;
        } else {
                memset(childregs, 0, sizeof(struct pt_regs));
-               thread->cpu_context.r4 = stk_sz;
-               thread->cpu_context.r5 = stack_start;
+               thread->cpu_context.r4 = (unsigned long)args->fn_arg;
+               thread->cpu_context.r5 = (unsigned long)args->fn;
                childregs->ARM_cpsr = SVC_MODE;
        }
        thread->cpu_context.pc = (unsigned long)ret_from_fork;
index 3044fcb..2cb9434 100644 (file)
@@ -116,9 +116,7 @@ void machine_power_off(void)
 {
        local_irq_disable();
        smp_send_stop();
-
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
 }
 
 /*
index 3d7e669..3dd9e71 100644 (file)
@@ -219,7 +219,7 @@ config SOC_SAMA5
        select SRAM if PM
 
 config ATMEL_PM
-       bool "Atmel PM support"
+       bool
 
 config ATMEL_SECURE_PM
        bool "Atmel Secure PM support"
index 4fa6ea5..85a496d 100644 (file)
@@ -345,9 +345,10 @@ static struct clk_hw *clk_hw_register_ddiv(const char *name,
        psc->hw.init = &init;
 
        clk = clk_register(NULL, &psc->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
                kfree(psc);
-
+               return ERR_CAST(clk);
+       }
        return &psc->hw;
 }
 
@@ -452,9 +453,10 @@ static struct clk_hw *clk_hw_register_div(const char *name,
        psc->hw.init = &init;
 
        clk = clk_register(NULL, &psc->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
                kfree(psc);
-
+               return ERR_CAST(clk);
+       }
        return &psc->hw;
 }
 
index e70bac0..d3de728 100644 (file)
@@ -150,7 +150,7 @@ static struct platform_device ts72xx_nand_flash = {
        .num_resources          = ARRAY_SIZE(ts72xx_nand_resource),
 };
 
-void __init ts72xx_register_flash(struct mtd_partition *parts, int n,
+static void __init ts72xx_register_flash(struct mtd_partition *parts, int n,
                                  resource_size_t start)
 {
        /*
diff --git a/arch/arm/mach-hpe/Kconfig b/arch/arm/mach-hpe/Kconfig
new file mode 100644 (file)
index 0000000..3372bbf
--- /dev/null
@@ -0,0 +1,23 @@
+menuconfig ARCH_HPE
+       bool "HPE SoC support"
+       depends on ARCH_MULTI_V7
+       help
+         This enables support for HPE ARM based BMC chips.
+if ARCH_HPE
+
+config ARCH_HPE_GXP
+       bool "HPE GXP SoC"
+       depends on ARCH_MULTI_V7
+       select ARM_VIC
+       select GENERIC_IRQ_CHIP
+       select CLKSRC_MMIO
+       help
+         HPE GXP is the name of the HPE Soc. This SoC is used to implement many
+         BMC features at HPE. It supports ARMv7 architecture based on the Cortex
+         A9 core. It is capable of using an AXI bus to which a memory controller
+         is attached. It has multiple SPI interfaces to connect boot flash and
+         BIOS flash. It uses a 10/100/1000 MAC for network connectivity. It
+         has multiple i2c engines to drive connectivity with a host
+         infrastructure.
+
+endif
diff --git a/arch/arm/mach-hpe/Makefile b/arch/arm/mach-hpe/Makefile
new file mode 100644 (file)
index 0000000..8b0a912
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_ARCH_HPE_GXP) += gxp.o
diff --git a/arch/arm/mach-hpe/gxp.c b/arch/arm/mach-hpe/gxp.c
new file mode 100644 (file)
index 0000000..ef33413
--- /dev/null
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+
+static const char * const gxp_board_dt_compat[] = {
+       "hpe,gxp",
+       NULL,
+};
+
+DT_MACHINE_START(GXP_DT, "HPE GXP")
+       .dt_compat      = gxp_board_dt_compat,
+       .l2c_aux_val = 0,
+       .l2c_aux_mask = ~0,
+MACHINE_END
index 9642e66..333229c 100644 (file)
@@ -39,16 +39,8 @@ config MACH_AVENGERS_LITE
          Say 'Y' here if you want to support the Marvell PXA168-based
          Avengers Lite Development Board.
 
-config MACH_TAVOREVB
-       bool "Marvell's PXA910 TavorEVB Development Board"
-       depends on ARCH_MULTI_V5
-       select CPU_PXA910
-       help
-         Say 'Y' here if you want to support the Marvell PXA910-based
-         TavorEVB Development Board.
-
 config MACH_TTC_DKB
-       bool "Marvell's PXA910 TavorEVB Development Board"
+       bool "Marvell's PXA910 TavorEVB/TTC_DKB Development Board"
        depends on ARCH_MULTI_V5
        select CPU_PXA910
        help
index e3758f7..539d750 100644 (file)
@@ -2,8 +2,6 @@
 #
 # Makefile for Marvell's PXA168 processors line
 #
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-pxa/include
-
 obj-y                          += common.o devices.o time.o
 
 # SoC support
@@ -24,7 +22,6 @@ endif
 obj-$(CONFIG_MACH_ASPENITE)    += aspenite.o
 obj-$(CONFIG_MACH_ZYLONITE2)   += aspenite.o
 obj-$(CONFIG_MACH_AVENGERS_LITE)+= avengers_lite.o
-obj-$(CONFIG_MACH_TAVOREVB)    += tavorevb.o
 obj-$(CONFIG_MACH_TTC_DKB)     += ttc_dkb.o
 obj-$(CONFIG_MACH_BROWNSTONE)  += brownstone.o
 obj-$(CONFIG_MACH_FLINT)       += flint.o
index 18bee66..79f4a2a 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/soc/mmp/cputype.h>
 #include "regs-usb.h"
 
-int __init pxa_register_device(struct pxa_device_desc *desc,
+int __init mmp_register_device(struct mmp_device_desc *desc,
                                void *data, size_t size)
 {
        struct platform_device *pdev;
index 4df596c..d4920eb 100644 (file)
@@ -7,7 +7,7 @@
 #define MAX_RESOURCE_DMA       2
 
 /* structure for describing the on-chip devices */
-struct pxa_device_desc {
+struct mmp_device_desc {
        const char      *dev_name;
        const char      *drv_name;
        int             id;
@@ -18,7 +18,7 @@ struct pxa_device_desc {
 };
 
 #define PXA168_DEVICE(_name, _drv, _id, _irq, _start, _size, _dma...)  \
-struct pxa_device_desc pxa168_device_##_name __initdata = {            \
+struct mmp_device_desc pxa168_device_##_name __initdata = {            \
        .dev_name       = "pxa168-" #_name,                             \
        .drv_name       = _drv,                                         \
        .id             = _id,                                          \
@@ -29,7 +29,7 @@ struct pxa_device_desc pxa168_device_##_name __initdata = {           \
 };
 
 #define PXA910_DEVICE(_name, _drv, _id, _irq, _start, _size, _dma...)  \
-struct pxa_device_desc pxa910_device_##_name __initdata = {            \
+struct mmp_device_desc pxa910_device_##_name __initdata = {            \
        .dev_name       = "pxa910-" #_name,                             \
        .drv_name       = _drv,                                         \
        .id             = _id,                                          \
@@ -40,7 +40,7 @@ struct pxa_device_desc pxa910_device_##_name __initdata = {           \
 };
 
 #define MMP2_DEVICE(_name, _drv, _id, _irq, _start, _size, _dma...)    \
-struct pxa_device_desc mmp2_device_##_name __initdata = {              \
+struct mmp_device_desc mmp2_device_##_name __initdata = {              \
        .dev_name       = "mmp2-" #_name,                               \
        .drv_name       = _drv,                                         \
        .id             = _id,                                          \
@@ -50,7 +50,7 @@ struct pxa_device_desc mmp2_device_##_name __initdata = {             \
        .dma            = { _dma },                                     \
 }
 
-extern int pxa_register_device(struct pxa_device_desc *, void *, size_t);
+extern int mmp_register_device(struct mmp_device_desc *, void *, size_t);
 extern int pxa_usb_phy_init(void __iomem *phy_reg);
 extern void pxa_usb_phy_deinit(void __iomem *phy_reg);
 
index 75a4acb..6f30579 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_MACH_MFP_H
 #define __ASM_MACH_MFP_H
 
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
 
 /*
  * NOTE: the MFPR register bit definitions on PXA168 processor lines are a
index adafc4f..3ebc1bb 100644 (file)
@@ -15,28 +15,28 @@ extern void mmp2_clear_pmic_int(void);
 
 #include "devices.h"
 
-extern struct pxa_device_desc mmp2_device_uart1;
-extern struct pxa_device_desc mmp2_device_uart2;
-extern struct pxa_device_desc mmp2_device_uart3;
-extern struct pxa_device_desc mmp2_device_uart4;
-extern struct pxa_device_desc mmp2_device_twsi1;
-extern struct pxa_device_desc mmp2_device_twsi2;
-extern struct pxa_device_desc mmp2_device_twsi3;
-extern struct pxa_device_desc mmp2_device_twsi4;
-extern struct pxa_device_desc mmp2_device_twsi5;
-extern struct pxa_device_desc mmp2_device_twsi6;
-extern struct pxa_device_desc mmp2_device_sdh0;
-extern struct pxa_device_desc mmp2_device_sdh1;
-extern struct pxa_device_desc mmp2_device_sdh2;
-extern struct pxa_device_desc mmp2_device_sdh3;
-extern struct pxa_device_desc mmp2_device_asram;
-extern struct pxa_device_desc mmp2_device_isram;
+extern struct mmp_device_desc mmp2_device_uart1;
+extern struct mmp_device_desc mmp2_device_uart2;
+extern struct mmp_device_desc mmp2_device_uart3;
+extern struct mmp_device_desc mmp2_device_uart4;
+extern struct mmp_device_desc mmp2_device_twsi1;
+extern struct mmp_device_desc mmp2_device_twsi2;
+extern struct mmp_device_desc mmp2_device_twsi3;
+extern struct mmp_device_desc mmp2_device_twsi4;
+extern struct mmp_device_desc mmp2_device_twsi5;
+extern struct mmp_device_desc mmp2_device_twsi6;
+extern struct mmp_device_desc mmp2_device_sdh0;
+extern struct mmp_device_desc mmp2_device_sdh1;
+extern struct mmp_device_desc mmp2_device_sdh2;
+extern struct mmp_device_desc mmp2_device_sdh3;
+extern struct mmp_device_desc mmp2_device_asram;
+extern struct mmp_device_desc mmp2_device_isram;
 
 extern struct platform_device mmp2_device_gpio;
 
 static inline int mmp2_add_uart(int id)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
 
        switch (id) {
        case 1: d = &mmp2_device_uart1; break;
@@ -47,13 +47,13 @@ static inline int mmp2_add_uart(int id)
                return -EINVAL;
        }
 
-       return pxa_register_device(d, NULL, 0);
+       return mmp_register_device(d, NULL, 0);
 }
 
 static inline int mmp2_add_twsi(int id, struct i2c_pxa_platform_data *data,
                                  struct i2c_board_info *info, unsigned size)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
        int ret;
 
        switch (id) {
@@ -71,12 +71,12 @@ static inline int mmp2_add_twsi(int id, struct i2c_pxa_platform_data *data,
        if (ret)
                return ret;
 
-       return pxa_register_device(d, data, sizeof(*data));
+       return mmp_register_device(d, data, sizeof(*data));
 }
 
 static inline int mmp2_add_sdhost(int id, struct sdhci_pxa_platdata *data)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
 
        switch (id) {
        case 0: d = &mmp2_device_sdh0; break;
@@ -87,17 +87,17 @@ static inline int mmp2_add_sdhost(int id, struct sdhci_pxa_platdata *data)
                return -EINVAL;
        }
 
-       return pxa_register_device(d, data, sizeof(*data));
+       return mmp_register_device(d, data, sizeof(*data));
 }
 
 static inline int mmp2_add_asram(struct sram_platdata *data)
 {
-       return pxa_register_device(&mmp2_device_asram, data, sizeof(*data));
+       return mmp_register_device(&mmp2_device_asram, data, sizeof(*data));
 }
 
 static inline int mmp2_add_isram(struct sram_platdata *data)
 {
-       return pxa_register_device(&mmp2_device_isram, data, sizeof(*data));
+       return mmp_register_device(&mmp2_device_isram, data, sizeof(*data));
 }
 
 #endif /* __ASM_MACH_MMP2_H */
index dff651b..34f907c 100644 (file)
@@ -21,24 +21,24 @@ extern void pxa168_clear_keypad_wakeup(void);
 
 #include "devices.h"
 
-extern struct pxa_device_desc pxa168_device_uart1;
-extern struct pxa_device_desc pxa168_device_uart2;
-extern struct pxa_device_desc pxa168_device_uart3;
-extern struct pxa_device_desc pxa168_device_twsi0;
-extern struct pxa_device_desc pxa168_device_twsi1;
-extern struct pxa_device_desc pxa168_device_pwm1;
-extern struct pxa_device_desc pxa168_device_pwm2;
-extern struct pxa_device_desc pxa168_device_pwm3;
-extern struct pxa_device_desc pxa168_device_pwm4;
-extern struct pxa_device_desc pxa168_device_ssp1;
-extern struct pxa_device_desc pxa168_device_ssp2;
-extern struct pxa_device_desc pxa168_device_ssp3;
-extern struct pxa_device_desc pxa168_device_ssp4;
-extern struct pxa_device_desc pxa168_device_ssp5;
-extern struct pxa_device_desc pxa168_device_nand;
-extern struct pxa_device_desc pxa168_device_fb;
-extern struct pxa_device_desc pxa168_device_keypad;
-extern struct pxa_device_desc pxa168_device_eth;
+extern struct mmp_device_desc pxa168_device_uart1;
+extern struct mmp_device_desc pxa168_device_uart2;
+extern struct mmp_device_desc pxa168_device_uart3;
+extern struct mmp_device_desc pxa168_device_twsi0;
+extern struct mmp_device_desc pxa168_device_twsi1;
+extern struct mmp_device_desc pxa168_device_pwm1;
+extern struct mmp_device_desc pxa168_device_pwm2;
+extern struct mmp_device_desc pxa168_device_pwm3;
+extern struct mmp_device_desc pxa168_device_pwm4;
+extern struct mmp_device_desc pxa168_device_ssp1;
+extern struct mmp_device_desc pxa168_device_ssp2;
+extern struct mmp_device_desc pxa168_device_ssp3;
+extern struct mmp_device_desc pxa168_device_ssp4;
+extern struct mmp_device_desc pxa168_device_ssp5;
+extern struct mmp_device_desc pxa168_device_nand;
+extern struct mmp_device_desc pxa168_device_fb;
+extern struct mmp_device_desc pxa168_device_keypad;
+extern struct mmp_device_desc pxa168_device_eth;
 
 /* pdata can be NULL */
 extern int __init pxa168_add_usb_host(struct mv_usb_platform_data *pdata);
@@ -48,7 +48,7 @@ extern struct platform_device pxa168_device_gpio;
 
 static inline int pxa168_add_uart(int id)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
 
        switch (id) {
        case 1: d = &pxa168_device_uart1; break;
@@ -59,13 +59,13 @@ static inline int pxa168_add_uart(int id)
        if (d == NULL)
                return -EINVAL;
 
-       return pxa_register_device(d, NULL, 0);
+       return mmp_register_device(d, NULL, 0);
 }
 
 static inline int pxa168_add_twsi(int id, struct i2c_pxa_platform_data *data,
                                  struct i2c_board_info *info, unsigned size)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
        int ret;
 
        switch (id) {
@@ -79,12 +79,12 @@ static inline int pxa168_add_twsi(int id, struct i2c_pxa_platform_data *data,
        if (ret)
                return ret;
 
-       return pxa_register_device(d, data, sizeof(*data));
+       return mmp_register_device(d, data, sizeof(*data));
 }
 
 static inline int pxa168_add_pwm(int id)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
 
        switch (id) {
        case 1: d = &pxa168_device_pwm1; break;
@@ -95,12 +95,12 @@ static inline int pxa168_add_pwm(int id)
                return -EINVAL;
        }
 
-       return pxa_register_device(d, NULL, 0);
+       return mmp_register_device(d, NULL, 0);
 }
 
 static inline int pxa168_add_ssp(int id)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
 
        switch (id) {
        case 1: d = &pxa168_device_ssp1; break;
@@ -111,17 +111,17 @@ static inline int pxa168_add_ssp(int id)
        default:
                return -EINVAL;
        }
-       return pxa_register_device(d, NULL, 0);
+       return mmp_register_device(d, NULL, 0);
 }
 
 static inline int pxa168_add_nand(struct pxa3xx_nand_platform_data *info)
 {
-       return pxa_register_device(&pxa168_device_nand, info, sizeof(*info));
+       return mmp_register_device(&pxa168_device_nand, info, sizeof(*info));
 }
 
 static inline int pxa168_add_fb(struct pxa168fb_mach_info *mi)
 {
-       return pxa_register_device(&pxa168_device_fb, mi, sizeof(*mi));
+       return mmp_register_device(&pxa168_device_fb, mi, sizeof(*mi));
 }
 
 static inline int pxa168_add_keypad(struct pxa27x_keypad_platform_data *data)
@@ -129,11 +129,11 @@ static inline int pxa168_add_keypad(struct pxa27x_keypad_platform_data *data)
        if (cpu_is_pxa168())
                data->clear_wakeup_event = pxa168_clear_keypad_wakeup;
 
-       return pxa_register_device(&pxa168_device_keypad, data, sizeof(*data));
+       return mmp_register_device(&pxa168_device_keypad, data, sizeof(*data));
 }
 
 static inline int pxa168_add_eth(struct pxa168_eth_platform_data *data)
 {
-       return pxa_register_device(&pxa168_device_eth, data, sizeof(*data));
+       return mmp_register_device(&pxa168_device_eth, data, sizeof(*data));
 }
 #endif /* __ASM_MACH_PXA168_H */
index 2dfe38e..6ace5a8 100644 (file)
@@ -13,28 +13,28 @@ extern void __init pxa910_init_irq(void);
 
 #include "devices.h"
 
-extern struct pxa_device_desc pxa910_device_uart1;
-extern struct pxa_device_desc pxa910_device_uart2;
-extern struct pxa_device_desc pxa910_device_twsi0;
-extern struct pxa_device_desc pxa910_device_twsi1;
-extern struct pxa_device_desc pxa910_device_pwm1;
-extern struct pxa_device_desc pxa910_device_pwm2;
-extern struct pxa_device_desc pxa910_device_pwm3;
-extern struct pxa_device_desc pxa910_device_pwm4;
-extern struct pxa_device_desc pxa910_device_nand;
+extern struct mmp_device_desc pxa910_device_uart1;
+extern struct mmp_device_desc pxa910_device_uart2;
+extern struct mmp_device_desc pxa910_device_twsi0;
+extern struct mmp_device_desc pxa910_device_twsi1;
+extern struct mmp_device_desc pxa910_device_pwm1;
+extern struct mmp_device_desc pxa910_device_pwm2;
+extern struct mmp_device_desc pxa910_device_pwm3;
+extern struct mmp_device_desc pxa910_device_pwm4;
+extern struct mmp_device_desc pxa910_device_nand;
 extern struct platform_device pxa168_device_usb_phy;
 extern struct platform_device pxa168_device_u2o;
 extern struct platform_device pxa168_device_u2ootg;
 extern struct platform_device pxa168_device_u2oehci;
-extern struct pxa_device_desc pxa910_device_disp;
-extern struct pxa_device_desc pxa910_device_fb;
-extern struct pxa_device_desc pxa910_device_panel;
+extern struct mmp_device_desc pxa910_device_disp;
+extern struct mmp_device_desc pxa910_device_fb;
+extern struct mmp_device_desc pxa910_device_panel;
 extern struct platform_device pxa910_device_gpio;
 extern struct platform_device pxa910_device_rtc;
 
 static inline int pxa910_add_uart(int id)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
 
        switch (id) {
        case 1: d = &pxa910_device_uart1; break;
@@ -44,13 +44,13 @@ static inline int pxa910_add_uart(int id)
        if (d == NULL)
                return -EINVAL;
 
-       return pxa_register_device(d, NULL, 0);
+       return mmp_register_device(d, NULL, 0);
 }
 
 static inline int pxa910_add_twsi(int id, struct i2c_pxa_platform_data *data,
                                  struct i2c_board_info *info, unsigned size)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
        int ret;
 
        switch (id) {
@@ -64,12 +64,12 @@ static inline int pxa910_add_twsi(int id, struct i2c_pxa_platform_data *data,
        if (ret)
                return ret;
 
-       return pxa_register_device(d, data, sizeof(*data));
+       return mmp_register_device(d, data, sizeof(*data));
 }
 
 static inline int pxa910_add_pwm(int id)
 {
-       struct pxa_device_desc *d = NULL;
+       struct mmp_device_desc *d = NULL;
 
        switch (id) {
        case 1: d = &pxa910_device_pwm1; break;
@@ -80,11 +80,11 @@ static inline int pxa910_add_pwm(int id)
                return -EINVAL;
        }
 
-       return pxa_register_device(d, NULL, 0);
+       return mmp_register_device(d, NULL, 0);
 }
 
 static inline int pxa910_add_nand(struct pxa3xx_nand_platform_data *info)
 {
-       return pxa_register_device(&pxa910_device_nand, info, sizeof(*info));
+       return mmp_register_device(&pxa910_device_nand, info, sizeof(*info));
 }
 #endif /* __ASM_MACH_PXA910_H */
diff --git a/arch/arm/mach-mmp/tavorevb.c b/arch/arm/mach-mmp/tavorevb.c
deleted file mode 100644 (file)
index 3261d23..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  linux/arch/arm/mach-mmp/tavorevb.c
- *
- *  Support for the Marvell PXA910-based TavorEVB Development Platform.
- */
-#include <linux/gpio.h>
-#include <linux/gpio-pxa.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/smc91x.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include "addr-map.h"
-#include "mfp-pxa910.h"
-#include "pxa910.h"
-#include "irqs.h"
-
-#include "common.h"
-
-static unsigned long tavorevb_pin_config[] __initdata = {
-       /* UART2 */
-       GPIO47_UART2_RXD,
-       GPIO48_UART2_TXD,
-
-       /* SMC */
-       SM_nCS0_nCS0,
-       SM_ADV_SM_ADV,
-       SM_SCLK_SM_SCLK,
-       SM_SCLK_SM_SCLK,
-       SM_BE0_SM_BE0,
-       SM_BE1_SM_BE1,
-
-       /* DFI */
-       DF_IO0_ND_IO0,
-       DF_IO1_ND_IO1,
-       DF_IO2_ND_IO2,
-       DF_IO3_ND_IO3,
-       DF_IO4_ND_IO4,
-       DF_IO5_ND_IO5,
-       DF_IO6_ND_IO6,
-       DF_IO7_ND_IO7,
-       DF_IO8_ND_IO8,
-       DF_IO9_ND_IO9,
-       DF_IO10_ND_IO10,
-       DF_IO11_ND_IO11,
-       DF_IO12_ND_IO12,
-       DF_IO13_ND_IO13,
-       DF_IO14_ND_IO14,
-       DF_IO15_ND_IO15,
-       DF_nCS0_SM_nCS2_nCS0,
-       DF_ALE_SM_WEn_ND_ALE,
-       DF_CLE_SM_OEn_ND_CLE,
-       DF_WEn_DF_WEn,
-       DF_REn_DF_REn,
-       DF_RDY0_DF_RDY0,
-};
-
-static struct pxa_gpio_platform_data pxa910_gpio_pdata = {
-       .irq_base       = MMP_GPIO_TO_IRQ(0),
-};
-
-static struct smc91x_platdata tavorevb_smc91x_info = {
-       .flags  = SMC91X_USE_16BIT | SMC91X_NOWAIT,
-};
-
-static struct resource smc91x_resources[] = {
-       [0] = {
-               .start  = SMC_CS1_PHYS_BASE + 0x300,
-               .end    = SMC_CS1_PHYS_BASE + 0xfffff,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MMP_GPIO_TO_IRQ(80),
-               .end    = MMP_GPIO_TO_IRQ(80),
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
-       }
-};
-
-static struct platform_device smc91x_device = {
-       .name           = "smc91x",
-       .id             = 0,
-       .dev            = {
-               .platform_data = &tavorevb_smc91x_info,
-       },
-       .num_resources  = ARRAY_SIZE(smc91x_resources),
-       .resource       = smc91x_resources,
-};
-
-static void __init tavorevb_init(void)
-{
-       mfp_config(ARRAY_AND_SIZE(tavorevb_pin_config));
-
-       /* on-chip devices */
-       pxa910_add_uart(1);
-       platform_device_add_data(&pxa910_device_gpio, &pxa910_gpio_pdata,
-                                sizeof(struct pxa_gpio_platform_data));
-       platform_device_register(&pxa910_device_gpio);
-
-       /* off-chip devices */
-       platform_device_register(&smc91x_device);
-}
-
-MACHINE_START(TAVOREVB, "PXA910 Evaluation Board (aka TavorEVB)")
-       .map_io         = mmp_map_io,
-       .nr_irqs        = MMP_NR_IRQS,
-       .init_irq       = pxa910_init_irq,
-       .init_time      = pxa910_timer_init,
-       .init_machine   = tavorevb_init,
-       .restart        = mmp_restart,
-MACHINE_END
index 4f24076..345b2e6 100644 (file)
@@ -253,12 +253,12 @@ static struct spi_board_info spi_board_info[] __initdata = {
 
 static void __init add_disp(void)
 {
-       pxa_register_device(&pxa910_device_disp,
+       mmp_register_device(&pxa910_device_disp,
                &dkb_disp_info, sizeof(dkb_disp_info));
        spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-       pxa_register_device(&pxa910_device_fb,
+       mmp_register_device(&pxa910_device_fb,
                &dkb_fb_info, sizeof(dkb_fb_info));
-       pxa_register_device(&pxa910_device_panel,
+       mmp_register_device(&pxa910_device_panel,
                &dkb_tpo_panel_info, sizeof(dkb_tpo_panel_info));
 }
 #endif
index d4b0cd9..0ac0567 100644 (file)
@@ -1,4 +1,16 @@
 # SPDX-License-Identifier: GPL-2.0-only
+menuconfig ARCH_OMAP1
+       bool "TI OMAP1"
+       depends on ARCH_MULTI_V4T || ARCH_MULTI_V5
+       depends on CPU_LITTLE_ENDIAN
+       select ARCH_HAS_HOLES_MEMORYMODEL
+       select ARCH_OMAP
+       select CLKSRC_MMIO
+       select FORCE_PCI if PCCARD
+       select GPIOLIB
+       help
+         Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx)
+
 if ARCH_OMAP1
 
 menu "TI OMAP1 specific features"
@@ -6,23 +18,27 @@ menu "TI OMAP1 specific features"
 comment "OMAP Core Type"
 
 config ARCH_OMAP730
+       depends on ARCH_MULTI_V5
        bool "OMAP730 Based System"
        select ARCH_OMAP_OTG
        select CPU_ARM926T
        select OMAP_MPU_TIMER
 
 config ARCH_OMAP850
+       depends on ARCH_MULTI_V5
        bool "OMAP850 Based System"
        select ARCH_OMAP_OTG
        select CPU_ARM926T
 
 config ARCH_OMAP15XX
+       depends on ARCH_MULTI_V4T
        default y
        bool "OMAP15xx Based System"
        select CPU_ARM925T
        select OMAP_MPU_TIMER
 
 config ARCH_OMAP16XX
+       depends on ARCH_MULTI_V5
        bool "OMAP16xx Based System"
        select ARCH_OMAP_OTG
        select CPU_ARM926T
index e5bd4d3..83381e2 100644 (file)
@@ -16,7 +16,9 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 #include <linux/soc/ti/omap1-io.h>
+#include <linux/spinlock.h>
 
 #include <asm/mach-types.h>
 
 #include "sram.h"
 
 __u32 arm_idlect1_mask;
-struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
+/* provide direct internal access (not via clk API) to some clocks */
+struct omap1_clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
 
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-static DEFINE_SPINLOCK(clockfw_lock);
+/* protect registeres shared among clk_enable/disable() and clk_set_rate() operations */
+static DEFINE_SPINLOCK(arm_ckctl_lock);
+static DEFINE_SPINLOCK(arm_idlect2_lock);
+static DEFINE_SPINLOCK(mod_conf_ctrl_0_lock);
+static DEFINE_SPINLOCK(mod_conf_ctrl_1_lock);
+static DEFINE_SPINLOCK(swd_clk_div_ctrl_sel_lock);
 
 /*
  * Omap1 specific clock functions
  */
 
-unsigned long omap1_uart_recalc(struct clk *clk)
+unsigned long omap1_uart_recalc(struct omap1_clk *clk, unsigned long p_rate)
 {
        unsigned int val = __raw_readl(clk->enable_reg);
        return val & 1 << clk->enable_bit ? 48000000 : 12000000;
 }
 
-unsigned long omap1_sossi_recalc(struct clk *clk)
+unsigned long omap1_sossi_recalc(struct omap1_clk *clk, unsigned long p_rate)
 {
        u32 div = omap_readl(MOD_CONF_CTRL_1);
 
        div = (div >> 17) & 0x7;
        div++;
 
-       return clk->parent->rate / div;
+       return p_rate / div;
 }
 
-static void omap1_clk_allow_idle(struct clk *clk)
+static void omap1_clk_allow_idle(struct omap1_clk *clk)
 {
        struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
 
@@ -65,7 +71,7 @@ static void omap1_clk_allow_idle(struct clk *clk)
                arm_idlect1_mask |= 1 << iclk->idlect_shift;
 }
 
-static void omap1_clk_deny_idle(struct clk *clk)
+static void omap1_clk_deny_idle(struct omap1_clk *clk)
 {
        struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
 
@@ -129,7 +135,7 @@ static __u16 verify_ckctl_value(__u16 newval)
        return newval;
 }
 
-static int calc_dsor_exp(struct clk *clk, unsigned long rate)
+static int calc_dsor_exp(unsigned long rate, unsigned long realrate)
 {
        /* Note: If target frequency is too low, this function will return 4,
         * which is invalid value. Caller must check for this value and act
@@ -142,15 +148,11 @@ static int calc_dsor_exp(struct clk *clk, unsigned long rate)
         * DSP_CK >= TC_CK
         * DSPMMU_CK >= TC_CK
         */
-       unsigned long realrate;
-       struct clk * parent;
        unsigned  dsor_exp;
 
-       parent = clk->parent;
-       if (unlikely(parent == NULL))
+       if (unlikely(realrate == 0))
                return -EIO;
 
-       realrate = parent->rate;
        for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
                if (realrate <= rate)
                        break;
@@ -161,16 +163,50 @@ static int calc_dsor_exp(struct clk *clk, unsigned long rate)
        return dsor_exp;
 }
 
-unsigned long omap1_ckctl_recalc(struct clk *clk)
+unsigned long omap1_ckctl_recalc(struct omap1_clk *clk, unsigned long p_rate)
 {
        /* Calculate divisor encoded as 2-bit exponent */
        int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
 
-       return clk->parent->rate / dsor;
+       /* update locally maintained rate, required by arm_ck for omap1_show_rates() */
+       clk->rate = p_rate / dsor;
+       return clk->rate;
 }
 
-unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
+static int omap1_clk_is_enabled(struct clk_hw *hw)
 {
+       struct omap1_clk *clk = to_omap1_clk(hw);
+       bool api_ck_was_enabled = true;
+       __u32 regval32;
+       int ret;
+
+       if (!clk->ops)  /* no gate -- always enabled */
+               return 1;
+
+       if (clk->ops == &clkops_dspck) {
+               api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+               if (!api_ck_was_enabled)
+                       if (api_ck_p->ops->enable(api_ck_p) < 0)
+                               return 0;
+       }
+
+       if (clk->flags & ENABLE_REG_32BIT)
+               regval32 = __raw_readl(clk->enable_reg);
+       else
+               regval32 = __raw_readw(clk->enable_reg);
+
+       ret = regval32 & (1 << clk->enable_bit);
+
+       if (!api_ck_was_enabled)
+               api_ck_p->ops->disable(api_ck_p);
+
+       return ret;
+}
+
+
+unsigned long omap1_ckctl_recalc_dsp_domain(struct omap1_clk *clk, unsigned long p_rate)
+{
+       bool api_ck_was_enabled;
        int dsor;
 
        /* Calculate divisor encoded as 2-bit exponent
@@ -180,15 +216,18 @@ unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
         * Note that DSP_CKCTL virt addr = phys addr, so
         * we must use __raw_readw() instead of omap_readw().
         */
-       omap1_clk_enable(api_ck_p);
+       api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+       if (!api_ck_was_enabled)
+               api_ck_p->ops->enable(api_ck_p);
        dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
-       omap1_clk_disable(api_ck_p);
+       if (!api_ck_was_enabled)
+               api_ck_p->ops->disable(api_ck_p);
 
-       return clk->parent->rate / dsor;
+       return p_rate / dsor;
 }
 
 /* MPU virtual clock functions */
-int omap1_select_table_rate(struct clk *clk, unsigned long rate)
+int omap1_select_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
 {
        /* Find the highest supported frequency <= rate and switch to it */
        struct mpu_rate * ptr;
@@ -223,12 +262,12 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate)
        return 0;
 }
 
-int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
+int omap1_clk_set_rate_dsp_domain(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
 {
        int dsor_exp;
        u16 regval;
 
-       dsor_exp = calc_dsor_exp(clk, rate);
+       dsor_exp = calc_dsor_exp(rate, p_rate);
        if (dsor_exp > 3)
                dsor_exp = -EINVAL;
        if (dsor_exp < 0)
@@ -238,42 +277,51 @@ int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
        regval &= ~(3 << clk->rate_offset);
        regval |= dsor_exp << clk->rate_offset;
        __raw_writew(regval, DSP_CKCTL);
-       clk->rate = clk->parent->rate / (1 << dsor_exp);
+       clk->rate = p_rate / (1 << dsor_exp);
 
        return 0;
 }
 
-long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
+long omap1_clk_round_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate,
+                                   unsigned long *p_rate)
 {
-       int dsor_exp = calc_dsor_exp(clk, rate);
+       int dsor_exp = calc_dsor_exp(rate, *p_rate);
+
        if (dsor_exp < 0)
                return dsor_exp;
        if (dsor_exp > 3)
                dsor_exp = 3;
-       return clk->parent->rate / (1 << dsor_exp);
+       return *p_rate / (1 << dsor_exp);
 }
 
-int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
+int omap1_clk_set_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
 {
+       unsigned long flags;
        int dsor_exp;
        u16 regval;
 
-       dsor_exp = calc_dsor_exp(clk, rate);
+       dsor_exp = calc_dsor_exp(rate, p_rate);
        if (dsor_exp > 3)
                dsor_exp = -EINVAL;
        if (dsor_exp < 0)
                return dsor_exp;
 
+       /* protect ARM_CKCTL register from concurrent access via clk_enable/disable() */
+       spin_lock_irqsave(&arm_ckctl_lock, flags);
+
        regval = omap_readw(ARM_CKCTL);
        regval &= ~(3 << clk->rate_offset);
        regval |= dsor_exp << clk->rate_offset;
        regval = verify_ckctl_value(regval);
        omap_writew(regval, ARM_CKCTL);
-       clk->rate = clk->parent->rate / (1 << dsor_exp);
+       clk->rate = p_rate / (1 << dsor_exp);
+
+       spin_unlock_irqrestore(&arm_ckctl_lock, flags);
+
        return 0;
 }
 
-long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
+long omap1_round_to_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
 {
        /* Find the highest supported frequency <= rate */
        struct mpu_rate * ptr;
@@ -324,26 +372,40 @@ static unsigned calc_ext_dsor(unsigned long rate)
 }
 
 /* XXX Only needed on 1510 */
-int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
+long omap1_round_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
 {
+       return rate > 24000000 ? 48000000 : 12000000;
+}
+
+int omap1_set_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
+{
+       unsigned long flags;
        unsigned int val;
 
-       val = __raw_readl(clk->enable_reg);
        if (rate == 12000000)
-               val &= ~(1 << clk->enable_bit);
+               val = 0;
        else if (rate == 48000000)
-               val |= (1 << clk->enable_bit);
+               val = 1 << clk->enable_bit;
        else
                return -EINVAL;
+
+       /* protect MOD_CONF_CTRL_0 register from concurrent access via clk_enable/disable() */
+       spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
+
+       val |= __raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit);
        __raw_writel(val, clk->enable_reg);
+
+       spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
+
        clk->rate = rate;
 
        return 0;
 }
 
 /* External clock (MCLK & BCLK) functions */
-int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
+int omap1_set_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
 {
+       unsigned long flags;
        unsigned dsor;
        __u16 ratio_bits;
 
@@ -354,25 +416,53 @@ int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
        else
                ratio_bits = (dsor - 2) << 2;
 
+       /* protect SWD_CLK_DIV_CTRL_SEL register from concurrent access via clk_enable/disable() */
+       spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
+
        ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
        __raw_writew(ratio_bits, clk->enable_reg);
 
+       spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
+
        return 0;
 }
 
-int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
+static int calc_div_sossi(unsigned long rate, unsigned long p_rate)
 {
-       u32 l;
        int div;
-       unsigned long p_rate;
 
-       p_rate = clk->parent->rate;
        /* Round towards slower frequency */
        div = (p_rate + rate - 1) / rate;
-       div--;
+
+       return --div;
+}
+
+long omap1_round_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
+{
+       int div;
+
+       div = calc_div_sossi(rate, *p_rate);
+       if (div < 0)
+               div = 0;
+       else if (div > 7)
+               div = 7;
+
+       return *p_rate / (div + 1);
+}
+
+int omap1_set_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
+{
+       unsigned long flags;
+       u32 l;
+       int div;
+
+       div = calc_div_sossi(rate, p_rate);
        if (div < 0 || div > 7)
                return -EINVAL;
 
+       /* protect MOD_CONF_CTRL_1 register from concurrent access via clk_enable/disable() */
+       spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
+
        l = omap_readl(MOD_CONF_CTRL_1);
        l &= ~(7 << 17);
        l |= div << 17;
@@ -380,15 +470,17 @@ int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
 
        clk->rate = p_rate / (div + 1);
 
+       spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
+
        return 0;
 }
 
-long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
+long omap1_round_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
 {
        return 96000000 / calc_ext_dsor(rate);
 }
 
-void omap1_init_ext_clk(struct clk *clk)
+int omap1_init_ext_clk(struct omap1_clk *clk)
 {
        unsigned dsor;
        __u16 ratio_bits;
@@ -404,59 +496,59 @@ void omap1_init_ext_clk(struct clk *clk)
                dsor = ratio_bits + 2;
 
        clk-> rate = 96000000 / dsor;
+
+       return 0;
 }
 
-int omap1_clk_enable(struct clk *clk)
+static int omap1_clk_enable(struct clk_hw *hw)
 {
+       struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
        int ret = 0;
 
-       if (clk->usecount++ == 0) {
-               if (clk->parent) {
-                       ret = omap1_clk_enable(clk->parent);
-                       if (ret)
-                               goto err;
-
-                       if (clk->flags & CLOCK_NO_IDLE_PARENT)
-                               omap1_clk_deny_idle(clk->parent);
-               }
+       if (parent && clk->flags & CLOCK_NO_IDLE_PARENT)
+               omap1_clk_deny_idle(parent);
 
+       if (clk->ops && !(WARN_ON(!clk->ops->enable)))
                ret = clk->ops->enable(clk);
-               if (ret) {
-                       if (clk->parent)
-                               omap1_clk_disable(clk->parent);
-                       goto err;
-               }
-       }
-       return ret;
 
-err:
-       clk->usecount--;
        return ret;
 }
 
-void omap1_clk_disable(struct clk *clk)
+static void omap1_clk_disable(struct clk_hw *hw)
 {
-       if (clk->usecount > 0 && !(--clk->usecount)) {
+       struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
+
+       if (clk->ops && !(WARN_ON(!clk->ops->disable)))
                clk->ops->disable(clk);
-               if (likely(clk->parent)) {
-                       omap1_clk_disable(clk->parent);
-                       if (clk->flags & CLOCK_NO_IDLE_PARENT)
-                               omap1_clk_allow_idle(clk->parent);
-               }
-       }
+
+       if (likely(parent) && clk->flags & CLOCK_NO_IDLE_PARENT)
+               omap1_clk_allow_idle(parent);
 }
 
-static int omap1_clk_enable_generic(struct clk *clk)
+static int omap1_clk_enable_generic(struct omap1_clk *clk)
 {
+       unsigned long flags;
        __u16 regval16;
        __u32 regval32;
 
        if (unlikely(clk->enable_reg == NULL)) {
                printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
-                      clk->name);
+                      clk_hw_get_name(&clk->hw));
                return -EINVAL;
        }
 
+       /* protect clk->enable_reg from concurrent access via clk_set_rate() */
+       if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+               spin_lock_irqsave(&arm_ckctl_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+               spin_lock_irqsave(&arm_idlect2_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+               spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+               spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+               spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
+
        if (clk->flags & ENABLE_REG_32BIT) {
                regval32 = __raw_readl(clk->enable_reg);
                regval32 |= (1 << clk->enable_bit);
@@ -467,17 +559,41 @@ static int omap1_clk_enable_generic(struct clk *clk)
                __raw_writew(regval16, clk->enable_reg);
        }
 
+       if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+               spin_unlock_irqrestore(&arm_ckctl_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+               spin_unlock_irqrestore(&arm_idlect2_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+               spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+               spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+               spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
+
        return 0;
 }
 
-static void omap1_clk_disable_generic(struct clk *clk)
+static void omap1_clk_disable_generic(struct omap1_clk *clk)
 {
+       unsigned long flags;
        __u16 regval16;
        __u32 regval32;
 
        if (clk->enable_reg == NULL)
                return;
 
+       /* protect clk->enable_reg from concurrent access via clk_set_rate() */
+       if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+               spin_lock_irqsave(&arm_ckctl_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+               spin_lock_irqsave(&arm_idlect2_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+               spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+               spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+               spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
+
        if (clk->flags & ENABLE_REG_32BIT) {
                regval32 = __raw_readl(clk->enable_reg);
                regval32 &= ~(1 << clk->enable_bit);
@@ -487,6 +603,17 @@ static void omap1_clk_disable_generic(struct clk *clk)
                regval16 &= ~(1 << clk->enable_bit);
                __raw_writew(regval16, clk->enable_reg);
        }
+
+       if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
+               spin_unlock_irqrestore(&arm_ckctl_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
+               spin_unlock_irqrestore(&arm_idlect2_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
+               spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
+               spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
+       else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
+               spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
 }
 
 const struct clkops clkops_generic = {
@@ -494,25 +621,38 @@ const struct clkops clkops_generic = {
        .disable        = omap1_clk_disable_generic,
 };
 
-static int omap1_clk_enable_dsp_domain(struct clk *clk)
+static int omap1_clk_enable_dsp_domain(struct omap1_clk *clk)
 {
-       int retval;
+       bool api_ck_was_enabled;
+       int retval = 0;
+
+       api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+       if (!api_ck_was_enabled)
+               retval = api_ck_p->ops->enable(api_ck_p);
 
-       retval = omap1_clk_enable(api_ck_p);
        if (!retval) {
                retval = omap1_clk_enable_generic(clk);
-               omap1_clk_disable(api_ck_p);
+
+               if (!api_ck_was_enabled)
+                       api_ck_p->ops->disable(api_ck_p);
        }
 
        return retval;
 }
 
-static void omap1_clk_disable_dsp_domain(struct clk *clk)
+static void omap1_clk_disable_dsp_domain(struct omap1_clk *clk)
 {
-       if (omap1_clk_enable(api_ck_p) == 0) {
-               omap1_clk_disable_generic(clk);
-               omap1_clk_disable(api_ck_p);
-       }
+       bool api_ck_was_enabled;
+
+       api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
+       if (!api_ck_was_enabled)
+               if (api_ck_p->ops->enable(api_ck_p) < 0)
+                       return;
+
+       omap1_clk_disable_generic(clk);
+
+       if (!api_ck_was_enabled)
+               api_ck_p->ops->disable(api_ck_p);
 }
 
 const struct clkops clkops_dspck = {
@@ -521,7 +661,7 @@ const struct clkops clkops_dspck = {
 };
 
 /* XXX SYSC register handling does not belong in the clock framework */
-static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
+static int omap1_clk_enable_uart_functional_16xx(struct omap1_clk *clk)
 {
        int ret;
        struct uart_clk *uclk;
@@ -538,7 +678,7 @@ static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
 }
 
 /* XXX SYSC register handling does not belong in the clock framework */
-static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
+static void omap1_clk_disable_uart_functional_16xx(struct omap1_clk *clk)
 {
        struct uart_clk *uclk;
 
@@ -555,20 +695,33 @@ const struct clkops clkops_uart_16xx = {
        .disable        = omap1_clk_disable_uart_functional_16xx,
 };
 
-long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
+static unsigned long omap1_clk_recalc_rate(struct clk_hw *hw, unsigned long p_rate)
 {
-       if (clk->round_rate != NULL)
-               return clk->round_rate(clk, rate);
+       struct omap1_clk *clk = to_omap1_clk(hw);
+
+       if (clk->recalc)
+               return clk->recalc(clk, p_rate);
 
        return clk->rate;
 }
 
-int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
+static long omap1_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *p_rate)
+{
+       struct omap1_clk *clk = to_omap1_clk(hw);
+
+       if (clk->round_rate != NULL)
+               return clk->round_rate(clk, rate, p_rate);
+
+       return omap1_clk_recalc_rate(hw, *p_rate);
+}
+
+static int omap1_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long p_rate)
 {
+       struct omap1_clk *clk = to_omap1_clk(hw);
        int  ret = -EINVAL;
 
        if (clk->set_rate)
-               ret = clk->set_rate(clk, rate);
+               ret = clk->set_rate(clk, rate, p_rate);
        return ret;
 }
 
@@ -576,243 +729,105 @@ int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
  * Omap1 clock reset and init functions
  */
 
+static int omap1_clk_init_op(struct clk_hw *hw)
+{
+       struct omap1_clk *clk = to_omap1_clk(hw);
+
+       if (clk->init)
+               return clk->init(clk);
+
+       return 0;
+}
+
 #ifdef CONFIG_OMAP_RESET_CLOCKS
 
-void omap1_clk_disable_unused(struct clk *clk)
+static void omap1_clk_disable_unused(struct clk_hw *hw)
 {
-       __u32 regval32;
+       struct omap1_clk *clk = to_omap1_clk(hw);
+       const char *name = clk_hw_get_name(hw);
 
        /* Clocks in the DSP domain need api_ck. Just assume bootloader
         * has not enabled any DSP clocks */
        if (clk->enable_reg == DSP_IDLECT2) {
-               pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
-                       clk->name);
+               pr_info("Skipping reset check for DSP domain clock \"%s\"\n", name);
                return;
        }
 
-       /* Is the clock already disabled? */
-       if (clk->flags & ENABLE_REG_32BIT)
-               regval32 = __raw_readl(clk->enable_reg);
-       else
-               regval32 = __raw_readw(clk->enable_reg);
-
-       if ((regval32 & (1 << clk->enable_bit)) == 0)
-               return;
-
-       printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
-       clk->ops->disable(clk);
+       pr_info("Disabling unused clock \"%s\"... ", name);
+       omap1_clk_disable(hw);
        printk(" done\n");
 }
 
 #endif
 
+const struct clk_ops omap1_clk_gate_ops = {
+       .enable         = omap1_clk_enable,
+       .disable        = omap1_clk_disable,
+       .is_enabled     = omap1_clk_is_enabled,
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+       .disable_unused = omap1_clk_disable_unused,
+#endif
+};
 
-int clk_enable(struct clk *clk)
-{
-       unsigned long flags;
-       int ret;
-
-       if (IS_ERR_OR_NULL(clk))
-               return -EINVAL;
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       ret = omap1_clk_enable(clk);
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-       unsigned long flags;
-
-       if (IS_ERR_OR_NULL(clk))
-               return;
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       if (clk->usecount == 0) {
-               pr_err("Trying disable clock %s with 0 usecount\n",
-                      clk->name);
-               WARN_ON(1);
-               goto out;
-       }
-
-       omap1_clk_disable(clk);
-
-out:
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
-       unsigned long flags;
-       unsigned long ret;
-
-       if (IS_ERR_OR_NULL(clk))
-               return 0;
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       ret = clk->rate;
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/*
- * Optional clock functions defined in include/linux/clk.h
- */
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned long flags;
-       long ret;
-
-       if (IS_ERR_OR_NULL(clk))
-               return 0;
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       ret = omap1_clk_round_rate(clk, rate);
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
-       unsigned long flags;
-       int ret = -EINVAL;
-
-       if (IS_ERR_OR_NULL(clk))
-               return ret;
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       ret = omap1_clk_set_rate(clk, rate);
-       if (ret == 0)
-               propagate_rate(clk);
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
-       WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
-
-       return -EINVAL;
-}
-EXPORT_SYMBOL(clk_set_parent);
+const struct clk_ops omap1_clk_rate_ops = {
+       .recalc_rate    = omap1_clk_recalc_rate,
+       .round_rate     = omap1_clk_round_rate,
+       .set_rate       = omap1_clk_set_rate,
+       .init           = omap1_clk_init_op,
+};
 
-struct clk *clk_get_parent(struct clk *clk)
-{
-       return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
+const struct clk_ops omap1_clk_full_ops = {
+       .enable         = omap1_clk_enable,
+       .disable        = omap1_clk_disable,
+       .is_enabled     = omap1_clk_is_enabled,
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+       .disable_unused = omap1_clk_disable_unused,
+#endif
+       .recalc_rate    = omap1_clk_recalc_rate,
+       .round_rate     = omap1_clk_round_rate,
+       .set_rate       = omap1_clk_set_rate,
+       .init           = omap1_clk_init_op,
+};
 
 /*
  * OMAP specific clock functions shared between omap1 and omap2
  */
 
 /* Used for clocks that always have same value as the parent clock */
-unsigned long followparent_recalc(struct clk *clk)
+unsigned long followparent_recalc(struct omap1_clk *clk, unsigned long p_rate)
 {
-       return clk->parent->rate;
+       return p_rate;
 }
 
 /*
  * Used for clocks that have the same value as the parent clock,
  * divided by some factor
  */
-unsigned long omap_fixed_divisor_recalc(struct clk *clk)
+unsigned long omap_fixed_divisor_recalc(struct omap1_clk *clk, unsigned long p_rate)
 {
        WARN_ON(!clk->fixed_div);
 
-       return clk->parent->rate / clk->fixed_div;
+       return p_rate / clk->fixed_div;
 }
 
 /* Propagate rate to children */
-void propagate_rate(struct clk *tclk)
+void propagate_rate(struct omap1_clk *tclk)
 {
        struct clk *clkp;
 
-       list_for_each_entry(clkp, &tclk->children, sibling) {
-               if (clkp->recalc)
-                       clkp->rate = clkp->recalc(clkp);
-               propagate_rate(clkp);
-       }
-}
-
-static LIST_HEAD(root_clks);
-
-/**
- * clk_preinit - initialize any fields in the struct clk before clk init
- * @clk: struct clk * to initialize
- *
- * Initialize any struct clk fields needed before normal clk initialization
- * can run.  No return value.
- */
-void clk_preinit(struct clk *clk)
-{
-       INIT_LIST_HEAD(&clk->children);
-}
-
-int clk_register(struct clk *clk)
-{
-       if (IS_ERR_OR_NULL(clk))
-               return -EINVAL;
-
-       /*
-        * trap out already registered clocks
-        */
-       if (clk->node.next || clk->node.prev)
-               return 0;
-
-       mutex_lock(&clocks_mutex);
-       if (clk->parent)
-               list_add(&clk->sibling, &clk->parent->children);
-       else
-               list_add(&clk->sibling, &root_clks);
-
-       list_add(&clk->node, &clocks);
-       if (clk->init)
-               clk->init(clk);
-       mutex_unlock(&clocks_mutex);
-
-       return 0;
-}
-EXPORT_SYMBOL(clk_register);
-
-void clk_unregister(struct clk *clk)
-{
-       if (IS_ERR_OR_NULL(clk))
+       /* depend on CCF ability to recalculate new rates across whole clock subtree */
+       if (WARN_ON(!(clk_hw_get_flags(&tclk->hw) & CLK_GET_RATE_NOCACHE)))
                return;
 
-       mutex_lock(&clocks_mutex);
-       list_del(&clk->sibling);
-       list_del(&clk->node);
-       mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unregister);
-
-/*
- * Low level helpers
- */
-static int clkll_enable_null(struct clk *clk)
-{
-       return 0;
-}
+       clkp = clk_get_sys(NULL, clk_hw_get_name(&tclk->hw));
+       if (WARN_ON(!clkp))
+               return;
 
-static void clkll_disable_null(struct clk *clk)
-{
+       clk_get_rate(clkp);
+       clk_put(clkp);
 }
 
-const struct clkops clkops_null = {
-       .enable         = clkll_enable_null,
-       .disable        = clkll_disable_null,
+const struct clk_ops omap1_clk_null_ops = {
 };
 
 /*
@@ -820,114 +835,6 @@ const struct clkops clkops_null = {
  *
  * Used for clock aliases that are needed on some OMAPs, but not others
  */
-struct clk dummy_ck = {
-       .name   = "dummy",
-       .ops    = &clkops_null,
+struct omap1_clk dummy_ck __refdata = {
+       .hw.init        = CLK_HW_INIT_NO_PARENT("dummy", &omap1_clk_null_ops, 0),
 };
-
-/*
- *
- */
-
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-/*
- * Disable any unused clocks left on by the bootloader
- */
-static int __init clk_disable_unused(void)
-{
-       struct clk *ck;
-       unsigned long flags;
-
-       pr_info("clock: disabling unused clocks to save power\n");
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       list_for_each_entry(ck, &clocks, node) {
-               if (ck->ops == &clkops_null)
-                       continue;
-
-               if (ck->usecount > 0 || !ck->enable_reg)
-                       continue;
-
-               omap1_clk_disable_unused(ck);
-       }
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-
-       return 0;
-}
-late_initcall(clk_disable_unused);
-#endif
-
-#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
-/*
- *     debugfs support to trace clock tree hierarchy and attributes
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static struct dentry *clk_debugfs_root;
-
-static int debug_clock_show(struct seq_file *s, void *unused)
-{
-       struct clk *c;
-       struct clk *pa;
-
-       mutex_lock(&clocks_mutex);
-       seq_printf(s, "%-30s %-30s %-10s %s\n",
-                  "clock-name", "parent-name", "rate", "use-count");
-
-       list_for_each_entry(c, &clocks, node) {
-               pa = c->parent;
-               seq_printf(s, "%-30s %-30s %-10lu %d\n",
-                          c->name, pa ? pa->name : "none", c->rate,
-                          c->usecount);
-       }
-       mutex_unlock(&clocks_mutex);
-
-       return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(debug_clock);
-
-static void clk_debugfs_register_one(struct clk *c)
-{
-       struct dentry *d;
-       struct clk *pa = c->parent;
-
-       d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
-       c->dent = d;
-
-       debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
-       debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
-       debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
-}
-
-static void clk_debugfs_register(struct clk *c)
-{
-       struct clk *pa = c->parent;
-
-       if (pa && !pa->dent)
-               clk_debugfs_register(pa);
-
-       if (!c->dent)
-               clk_debugfs_register_one(c);
-}
-
-static int __init clk_debugfs_init(void)
-{
-       struct clk *c;
-       struct dentry *d;
-
-       d = debugfs_create_dir("clock", NULL);
-       clk_debugfs_root = d;
-
-       list_for_each_entry(c, &clocks, node)
-               clk_debugfs_register(c);
-
-       debugfs_create_file("summary", S_IRUGO, d, NULL, &debug_clock_fops);
-
-       return 0;
-}
-late_initcall(clk_debugfs_init);
-
-#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
index 8025e4a..16cfb2e 100644 (file)
 #define __ARCH_ARM_MACH_OMAP1_CLOCK_H
 
 #include <linux/clk.h>
-#include <linux/list.h>
-
 #include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 
 struct module;
-struct clk;
+struct omap1_clk;
 
 struct omap_clk {
        u16                             cpu;
@@ -29,7 +28,7 @@ struct omap_clk {
                .lk = {                 \
                        .dev_id = dev,  \
                        .con_id = con,  \
-                       .clk = ck,      \
+                       .clk_hw = ck,   \
                },                      \
        }
 
@@ -40,10 +39,6 @@ struct omap_clk {
 #define CK_16XX                (1 << 3)        /* 16xx, 17xx, 5912 */
 #define CK_1710                (1 << 4)        /* 1710 extra for rate selection */
 
-
-/* Temporary, needed during the common clock framework conversion */
-#define __clk_get_name(clk)    (clk->name)
-
 /**
  * struct clkops - some clock function pointers
  * @enable: fn ptr that enables the current clock in hardware
@@ -51,8 +46,8 @@ struct omap_clk {
  * @allow_idle: fn ptr that enables autoidle for the current clock in hardware
  */
 struct clkops {
-       int                     (*enable)(struct clk *);
-       void                    (*disable)(struct clk *);
+       int                     (*enable)(struct omap1_clk *clk);
+       void                    (*disable)(struct omap1_clk *clk);
 };
 
 /*
@@ -65,13 +60,9 @@ struct clkops {
 #define CLOCK_NO_IDLE_PARENT   (1 << 2)
 
 /**
- * struct clk - OMAP struct clk
- * @node: list_head connecting this clock into the full clock list
+ * struct omap1_clk - OMAP1 struct clk
+ * @hw: struct clk_hw for common clock framework integration
  * @ops: struct clkops * for this clock
- * @name: the name of the clock in the hardware (used in hwmod data and debug)
- * @parent: pointer to this clock's parent struct clk
- * @children: list_head connecting to the child clks' @sibling list_heads
- * @sibling: list_head connecting this clk to its parent clk's @children
  * @rate: current clock rate
  * @enable_reg: register to write to enable the clock (see @enable_bit)
  * @recalc: fn ptr that returns the clock's current rate
@@ -79,102 +70,65 @@ struct clkops {
  * @round_rate: fn ptr that can round the clock's current rate
  * @init: fn ptr to do clock-specific initialization
  * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg)
- * @usecount: number of users that have requested this clock to be enabled
  * @fixed_div: when > 0, this clock's rate is its parent's rate / @fixed_div
  * @flags: see "struct clk.flags possibilities" above
  * @rate_offset: bitshift for rate selection bitfield (OMAP1 only)
- *
- * XXX @rate_offset should probably be removed and OMAP1
- * clock code converted to use clksel.
- *
- * XXX @usecount is poorly named.  It should be "enable_count" or
- * something similar.  "users" in the description refers to kernel
- * code (core code or drivers) that have called clk_enable() and not
- * yet called clk_disable(); the usecount of parent clocks is also
- * incremented by the clock code when clk_enable() is called on child
- * clocks and decremented by the clock code when clk_disable() is
- * called on child clocks.
- *
- * XXX @usecount, @children, @sibling should be marked for
- * internal use only.
- *
- * @children and @sibling are used to optimize parent-to-child clock
- * tree traversals.  (child-to-parent traversals use @parent.)
- *
- * XXX The notion of the clock's current rate probably needs to be
- * separated from the clock's target rate.
  */
-struct clk {
-       struct list_head        node;
+struct omap1_clk {
+       struct clk_hw           hw;
        const struct clkops     *ops;
-       const char              *name;
-       struct clk              *parent;
-       struct list_head        children;
-       struct list_head        sibling;        /* node for children */
        unsigned long           rate;
        void __iomem            *enable_reg;
-       unsigned long           (*recalc)(struct clk *);
-       int                     (*set_rate)(struct clk *, unsigned long);
-       long                    (*round_rate)(struct clk *, unsigned long);
-       void                    (*init)(struct clk *);
+       unsigned long           (*recalc)(struct omap1_clk *clk, unsigned long rate);
+       int                     (*set_rate)(struct omap1_clk *clk, unsigned long rate,
+                                           unsigned long p_rate);
+       long                    (*round_rate)(struct omap1_clk *clk, unsigned long rate,
+                                             unsigned long *p_rate);
+       int                     (*init)(struct omap1_clk *clk);
        u8                      enable_bit;
-       s8                      usecount;
        u8                      fixed_div;
        u8                      flags;
        u8                      rate_offset;
-#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
-       struct dentry           *dent;  /* For visible tree hierarchy */
-#endif
 };
+#define to_omap1_clk(_hw)      container_of(_hw, struct omap1_clk, hw)
 
-extern void clk_preinit(struct clk *clk);
-extern int clk_register(struct clk *clk);
-extern void clk_unregister(struct clk *clk);
-extern void propagate_rate(struct clk *clk);
-extern unsigned long followparent_recalc(struct clk *clk);
-unsigned long omap_fixed_divisor_recalc(struct clk *clk);
+void propagate_rate(struct omap1_clk *clk);
+unsigned long followparent_recalc(struct omap1_clk *clk, unsigned long p_rate);
+unsigned long omap_fixed_divisor_recalc(struct omap1_clk *clk, unsigned long p_rate);
 
-extern const struct clkops clkops_null;
-
-extern struct clk dummy_ck;
+extern struct omap1_clk dummy_ck;
 
 int omap1_clk_init(void);
 void omap1_clk_late_init(void);
-extern int omap1_clk_enable(struct clk *clk);
-extern void omap1_clk_disable(struct clk *clk);
-extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
-extern int omap1_clk_set_rate(struct clk *clk, unsigned long rate);
-extern unsigned long omap1_ckctl_recalc(struct clk *clk);
-extern int omap1_set_sossi_rate(struct clk *clk, unsigned long rate);
-extern unsigned long omap1_sossi_recalc(struct clk *clk);
-extern unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk);
-extern int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate);
-extern int omap1_set_uart_rate(struct clk *clk, unsigned long rate);
-extern unsigned long omap1_uart_recalc(struct clk *clk);
-extern int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate);
-extern long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate);
-extern void omap1_init_ext_clk(struct clk *clk);
-extern int omap1_select_table_rate(struct clk *clk, unsigned long rate);
-extern long omap1_round_to_table_rate(struct clk *clk, unsigned long rate);
-extern int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate);
-extern long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate);
-
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-extern void omap1_clk_disable_unused(struct clk *clk);
-#else
-#define omap1_clk_disable_unused       NULL
-#endif
+unsigned long omap1_ckctl_recalc(struct omap1_clk *clk, unsigned long p_rate);
+long omap1_round_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_set_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+unsigned long omap1_sossi_recalc(struct omap1_clk *clk, unsigned long p_rate);
+unsigned long omap1_ckctl_recalc_dsp_domain(struct omap1_clk *clk, unsigned long p_rate);
+int omap1_clk_set_rate_dsp_domain(struct omap1_clk *clk, unsigned long rate,
+                                 unsigned long p_rate);
+long omap1_round_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_set_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+unsigned long omap1_uart_recalc(struct omap1_clk *clk, unsigned long p_rate);
+int omap1_set_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+long omap1_round_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_init_ext_clk(struct omap1_clk *clk);
+int omap1_select_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+long omap1_round_to_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate);
+int omap1_clk_set_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate);
+long omap1_clk_round_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate,
+                                   unsigned long *p_rate);
 
 struct uart_clk {
-       struct clk      clk;
-       unsigned long   sysc_addr;
+       struct omap1_clk        clk;
+       unsigned long           sysc_addr;
 };
 
 /* Provide a method for preventing idling some ARM IDLECT clocks */
 struct arm_idlect1_clk {
-       struct clk      clk;
-       unsigned long   no_idle_count;
-       __u8            idlect_shift;
+       struct omap1_clk        clk;
+       unsigned long           no_idle_count;
+       __u8                    idlect_shift;
 };
 
 /* ARM_CKCTL bit shifts */
@@ -224,7 +178,7 @@ struct arm_idlect1_clk {
 #define SOFT_REQ_REG2          0xfffe0880
 
 extern __u32 arm_idlect1_mask;
-extern struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
+extern struct omap1_clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
 
 extern const struct clkops clkops_dspck;
 extern const struct clkops clkops_uart_16xx;
@@ -233,4 +187,9 @@ extern const struct clkops clkops_generic;
 /* used for passing SoC type to omap1_{select,round_to}_table_rate() */
 extern u32 cpu_mask;
 
+extern const struct clk_ops omap1_clk_null_ops;
+extern const struct clk_ops omap1_clk_gate_ops;
+extern const struct clk_ops omap1_clk_rate_ops;
+extern const struct clk_ops omap1_clk_full_ops;
+
 #endif
index 165b6a7..96d846c 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 #include <linux/cpufreq.h>
 #include <linux/delay.h>
 #include <linux/soc/ti/omap1-io.h>
  * Omap1 clocks
  */
 
-static struct clk ck_ref = {
-       .name           = "ck_ref",
-       .ops            = &clkops_null,
+static struct omap1_clk ck_ref = {
+       .hw.init        = CLK_HW_INIT_NO_PARENT("ck_ref", &omap1_clk_rate_ops, 0),
        .rate           = 12000000,
 };
 
-static struct clk ck_dpll1 = {
-       .name           = "ck_dpll1",
-       .ops            = &clkops_null,
-       .parent         = &ck_ref,
+static struct omap1_clk ck_dpll1 = {
+       .hw.init        = CLK_HW_INIT("ck_dpll1", "ck_ref", &omap1_clk_rate_ops,
+                                     /*
+                                      * force recursive refresh of rates of the clock
+                                      * and its children when clk_get_rate() is called
+                                      */
+                                     CLK_GET_RATE_NOCACHE),
 };
 
 /*
@@ -89,32 +93,28 @@ static struct clk ck_dpll1 = {
  */
 static struct arm_idlect1_clk ck_dpll1out = {
        .clk = {
-               .name           = "ck_dpll1out",
+               .hw.init        = CLK_HW_INIT("ck_dpll1out", "ck_dpll1", &omap1_clk_gate_ops, 0),
                .ops            = &clkops_generic,
-               .parent         = &ck_dpll1,
                .flags          = CLOCK_IDLE_CONTROL | ENABLE_REG_32BIT,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_CKOUT_ARM,
-               .recalc         = &followparent_recalc,
        },
        .idlect_shift   = IDL_CLKOUT_ARM_SHIFT,
 };
 
-static struct clk sossi_ck = {
-       .name           = "ck_sossi",
+static struct omap1_clk sossi_ck = {
+       .hw.init        = CLK_HW_INIT("ck_sossi", "ck_dpll1out", &omap1_clk_full_ops, 0),
        .ops            = &clkops_generic,
-       .parent         = &ck_dpll1out.clk,
        .flags          = CLOCK_NO_IDLE_PARENT | ENABLE_REG_32BIT,
        .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1),
        .enable_bit     = CONF_MOD_SOSSI_CLK_EN_R,
        .recalc         = &omap1_sossi_recalc,
+       .round_rate     = &omap1_round_sossi_rate,
        .set_rate       = &omap1_set_sossi_rate,
 };
 
-static struct clk arm_ck = {
-       .name           = "arm_ck",
-       .ops            = &clkops_null,
-       .parent         = &ck_dpll1,
+static struct omap1_clk arm_ck = {
+       .hw.init        = CLK_HW_INIT("arm_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
        .rate_offset    = CKCTL_ARMDIV_OFFSET,
        .recalc         = &omap1_ckctl_recalc,
        .round_rate     = omap1_clk_round_rate_ckctl_arm,
@@ -123,9 +123,9 @@ static struct clk arm_ck = {
 
 static struct arm_idlect1_clk armper_ck = {
        .clk = {
-               .name           = "armper_ck",
+               .hw.init        = CLK_HW_INIT("armper_ck", "ck_dpll1", &omap1_clk_full_ops,
+                                             CLK_IS_CRITICAL),
                .ops            = &clkops_generic,
-               .parent         = &ck_dpll1,
                .flags          = CLOCK_IDLE_CONTROL,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_PERCK,
@@ -141,46 +141,41 @@ static struct arm_idlect1_clk armper_ck = {
  * FIXME: This clock seems to be necessary but no-one has asked for its
  * activation.  [ GPIO code for 1510 ]
  */
-static struct clk arm_gpio_ck = {
-       .name           = "ick",
+static struct omap1_clk arm_gpio_ck = {
+       .hw.init        = CLK_HW_INIT("ick", "ck_dpll1", &omap1_clk_gate_ops, CLK_IS_CRITICAL),
        .ops            = &clkops_generic,
-       .parent         = &ck_dpll1,
        .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
        .enable_bit     = EN_GPIOCK,
-       .recalc         = &followparent_recalc,
 };
 
 static struct arm_idlect1_clk armxor_ck = {
        .clk = {
-               .name           = "armxor_ck",
+               .hw.init        = CLK_HW_INIT("armxor_ck", "ck_ref", &omap1_clk_gate_ops,
+                                             CLK_IS_CRITICAL),
                .ops            = &clkops_generic,
-               .parent         = &ck_ref,
                .flags          = CLOCK_IDLE_CONTROL,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_XORPCK,
-               .recalc         = &followparent_recalc,
        },
        .idlect_shift   = IDLXORP_ARM_SHIFT,
 };
 
 static struct arm_idlect1_clk armtim_ck = {
        .clk = {
-               .name           = "armtim_ck",
+               .hw.init        = CLK_HW_INIT("armtim_ck", "ck_ref", &omap1_clk_gate_ops,
+                                             CLK_IS_CRITICAL),
                .ops            = &clkops_generic,
-               .parent         = &ck_ref,
                .flags          = CLOCK_IDLE_CONTROL,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_TIMCK,
-               .recalc         = &followparent_recalc,
        },
        .idlect_shift   = IDLTIM_ARM_SHIFT,
 };
 
 static struct arm_idlect1_clk armwdt_ck = {
        .clk = {
-               .name           = "armwdt_ck",
+               .hw.init        = CLK_HW_INIT("armwdt_ck", "ck_ref", &omap1_clk_full_ops, 0),
                .ops            = &clkops_generic,
-               .parent         = &ck_ref,
                .flags          = CLOCK_IDLE_CONTROL,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_WDTCK,
@@ -190,11 +185,8 @@ static struct arm_idlect1_clk armwdt_ck = {
        .idlect_shift   = IDLWDT_ARM_SHIFT,
 };
 
-static struct clk arminth_ck16xx = {
-       .name           = "arminth_ck",
-       .ops            = &clkops_null,
-       .parent         = &arm_ck,
-       .recalc         = &followparent_recalc,
+static struct omap1_clk arminth_ck16xx = {
+       .hw.init        = CLK_HW_INIT("arminth_ck", "arm_ck", &omap1_clk_null_ops, 0),
        /* Note: On 16xx the frequency can be divided by 2 by programming
         * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
         *
@@ -202,10 +194,9 @@ static struct clk arminth_ck16xx = {
         */
 };
 
-static struct clk dsp_ck = {
-       .name           = "dsp_ck",
+static struct omap1_clk dsp_ck = {
+       .hw.init        = CLK_HW_INIT("dsp_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
        .ops            = &clkops_generic,
-       .parent         = &ck_dpll1,
        .enable_reg     = OMAP1_IO_ADDRESS(ARM_CKCTL),
        .enable_bit     = EN_DSPCK,
        .rate_offset    = CKCTL_DSPDIV_OFFSET,
@@ -214,20 +205,17 @@ static struct clk dsp_ck = {
        .set_rate       = omap1_clk_set_rate_ckctl_arm,
 };
 
-static struct clk dspmmu_ck = {
-       .name           = "dspmmu_ck",
-       .ops            = &clkops_null,
-       .parent         = &ck_dpll1,
+static struct omap1_clk dspmmu_ck = {
+       .hw.init        = CLK_HW_INIT("dspmmu_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
        .rate_offset    = CKCTL_DSPMMUDIV_OFFSET,
        .recalc         = &omap1_ckctl_recalc,
        .round_rate     = omap1_clk_round_rate_ckctl_arm,
        .set_rate       = omap1_clk_set_rate_ckctl_arm,
 };
 
-static struct clk dspper_ck = {
-       .name           = "dspper_ck",
+static struct omap1_clk dspper_ck = {
+       .hw.init        = CLK_HW_INIT("dspper_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
        .ops            = &clkops_dspck,
-       .parent         = &ck_dpll1,
        .enable_reg     = DSP_IDLECT2,
        .enable_bit     = EN_PERCK,
        .rate_offset    = CKCTL_PERDIV_OFFSET,
@@ -236,29 +224,23 @@ static struct clk dspper_ck = {
        .set_rate       = &omap1_clk_set_rate_dsp_domain,
 };
 
-static struct clk dspxor_ck = {
-       .name           = "dspxor_ck",
+static struct omap1_clk dspxor_ck = {
+       .hw.init        = CLK_HW_INIT("dspxor_ck", "ck_ref", &omap1_clk_gate_ops, 0),
        .ops            = &clkops_dspck,
-       .parent         = &ck_ref,
        .enable_reg     = DSP_IDLECT2,
        .enable_bit     = EN_XORPCK,
-       .recalc         = &followparent_recalc,
 };
 
-static struct clk dsptim_ck = {
-       .name           = "dsptim_ck",
+static struct omap1_clk dsptim_ck = {
+       .hw.init        = CLK_HW_INIT("dsptim_ck", "ck_ref", &omap1_clk_gate_ops, 0),
        .ops            = &clkops_dspck,
-       .parent         = &ck_ref,
        .enable_reg     = DSP_IDLECT2,
        .enable_bit     = EN_DSPTIMCK,
-       .recalc         = &followparent_recalc,
 };
 
 static struct arm_idlect1_clk tc_ck = {
        .clk = {
-               .name           = "tc_ck",
-               .ops            = &clkops_null,
-               .parent         = &ck_dpll1,
+               .hw.init        = CLK_HW_INIT("tc_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
                .flags          = CLOCK_IDLE_CONTROL,
                .rate_offset    = CKCTL_TCDIV_OFFSET,
                .recalc         = &omap1_ckctl_recalc,
@@ -268,116 +250,88 @@ static struct arm_idlect1_clk tc_ck = {
        .idlect_shift   = IDLIF_ARM_SHIFT,
 };
 
-static struct clk arminth_ck1510 = {
-       .name           = "arminth_ck",
-       .ops            = &clkops_null,
-       .parent         = &tc_ck.clk,
-       .recalc         = &followparent_recalc,
+static struct omap1_clk arminth_ck1510 = {
+       .hw.init        = CLK_HW_INIT("arminth_ck", "tc_ck", &omap1_clk_null_ops, 0),
        /* Note: On 1510 the frequency follows TC_CK
         *
         * 16xx version is in MPU clocks.
         */
 };
 
-static struct clk tipb_ck = {
+static struct omap1_clk tipb_ck = {
        /* No-idle controlled by "tc_ck" */
-       .name           = "tipb_ck",
-       .ops            = &clkops_null,
-       .parent         = &tc_ck.clk,
-       .recalc         = &followparent_recalc,
+       .hw.init        = CLK_HW_INIT("tipb_ck", "tc_ck", &omap1_clk_null_ops, 0),
 };
 
-static struct clk l3_ocpi_ck = {
+static struct omap1_clk l3_ocpi_ck = {
        /* No-idle controlled by "tc_ck" */
-       .name           = "l3_ocpi_ck",
+       .hw.init        = CLK_HW_INIT("l3_ocpi_ck", "tc_ck", &omap1_clk_gate_ops, 0),
        .ops            = &clkops_generic,
-       .parent         = &tc_ck.clk,
        .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT3),
        .enable_bit     = EN_OCPI_CK,
-       .recalc         = &followparent_recalc,
 };
 
-static struct clk tc1_ck = {
-       .name           = "tc1_ck",
+static struct omap1_clk tc1_ck = {
+       .hw.init        = CLK_HW_INIT("tc1_ck", "tc_ck", &omap1_clk_gate_ops, 0),
        .ops            = &clkops_generic,
-       .parent         = &tc_ck.clk,
        .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT3),
        .enable_bit     = EN_TC1_CK,
-       .recalc         = &followparent_recalc,
 };
 
 /*
  * FIXME: This clock seems to be necessary but no-one has asked for its
  * activation.  [ pm.c (SRAM), CCP, Camera ]
  */
-static struct clk tc2_ck = {
-       .name           = "tc2_ck",
+
+static struct omap1_clk tc2_ck = {
+       .hw.init        = CLK_HW_INIT("tc2_ck", "tc_ck", &omap1_clk_gate_ops, CLK_IS_CRITICAL),
        .ops            = &clkops_generic,
-       .parent         = &tc_ck.clk,
        .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT3),
        .enable_bit     = EN_TC2_CK,
-       .recalc         = &followparent_recalc,
 };
 
-static struct clk dma_ck = {
+static struct omap1_clk dma_ck = {
        /* No-idle controlled by "tc_ck" */
-       .name           = "dma_ck",
-       .ops            = &clkops_null,
-       .parent         = &tc_ck.clk,
-       .recalc         = &followparent_recalc,
+       .hw.init        = CLK_HW_INIT("dma_ck", "tc_ck", &omap1_clk_null_ops, 0),
 };
 
-static struct clk dma_lcdfree_ck = {
-       .name           = "dma_lcdfree_ck",
-       .ops            = &clkops_null,
-       .parent         = &tc_ck.clk,
-       .recalc         = &followparent_recalc,
+static struct omap1_clk dma_lcdfree_ck = {
+       .hw.init        = CLK_HW_INIT("dma_lcdfree_ck", "tc_ck", &omap1_clk_null_ops, 0),
 };
 
 static struct arm_idlect1_clk api_ck = {
        .clk = {
-               .name           = "api_ck",
+               .hw.init        = CLK_HW_INIT("api_ck", "tc_ck", &omap1_clk_gate_ops, 0),
                .ops            = &clkops_generic,
-               .parent         = &tc_ck.clk,
                .flags          = CLOCK_IDLE_CONTROL,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_APICK,
-               .recalc         = &followparent_recalc,
        },
        .idlect_shift   = IDLAPI_ARM_SHIFT,
 };
 
 static struct arm_idlect1_clk lb_ck = {
        .clk = {
-               .name           = "lb_ck",
+               .hw.init        = CLK_HW_INIT("lb_ck", "tc_ck", &omap1_clk_gate_ops, 0),
                .ops            = &clkops_generic,
-               .parent         = &tc_ck.clk,
                .flags          = CLOCK_IDLE_CONTROL,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_LBCK,
-               .recalc         = &followparent_recalc,
        },
        .idlect_shift   = IDLLB_ARM_SHIFT,
 };
 
-static struct clk rhea1_ck = {
-       .name           = "rhea1_ck",
-       .ops            = &clkops_null,
-       .parent         = &tc_ck.clk,
-       .recalc         = &followparent_recalc,
+static struct omap1_clk rhea1_ck = {
+       .hw.init        = CLK_HW_INIT("rhea1_ck", "tc_ck", &omap1_clk_null_ops, 0),
 };
 
-static struct clk rhea2_ck = {
-       .name           = "rhea2_ck",
-       .ops            = &clkops_null,
-       .parent         = &tc_ck.clk,
-       .recalc         = &followparent_recalc,
+static struct omap1_clk rhea2_ck = {
+       .hw.init        = CLK_HW_INIT("rhea2_ck", "tc_ck", &omap1_clk_null_ops, 0),
 };
 
-static struct clk lcd_ck_16xx = {
-       .name           = "lcd_ck",
+static struct omap1_clk lcd_ck_16xx = {
+       .hw.init        = CLK_HW_INIT("lcd_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
        .ops            = &clkops_generic,
-       .parent         = &ck_dpll1,
        .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
        .enable_bit     = EN_LCDCK,
        .rate_offset    = CKCTL_LCDDIV_OFFSET,
@@ -388,9 +342,8 @@ static struct clk lcd_ck_16xx = {
 
 static struct arm_idlect1_clk lcd_ck_1510 = {
        .clk = {
-               .name           = "lcd_ck",
+               .hw.init        = CLK_HW_INIT("lcd_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
                .ops            = &clkops_generic,
-               .parent         = &ck_dpll1,
                .flags          = CLOCK_IDLE_CONTROL,
                .enable_reg     = OMAP1_IO_ADDRESS(ARM_IDLECT2),
                .enable_bit     = EN_LCDCK,
@@ -402,37 +355,35 @@ static struct arm_idlect1_clk lcd_ck_1510 = {
        .idlect_shift   = OMAP1510_IDLLCD_ARM_SHIFT,
 };
 
+
 /*
  * XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz.  Reimplement with clksel.
+ * and 48MHz.  Reimplement with clk_mux.
  *
  * XXX does this need SYSC register handling?
  */
-static struct clk uart1_1510 = {
-       .name           = "uart1_ck",
-       .ops            = &clkops_null,
+static struct omap1_clk uart1_1510 = {
        /* Direct from ULPD, no real parent */
-       .parent         = &armper_ck.clk,
-       .rate           = 12000000,
+       .hw.init        = CLK_HW_INIT("uart1_ck", "armper_ck", &omap1_clk_full_ops, 0),
        .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
        .enable_bit     = CONF_MOD_UART1_CLK_MODE_R,
+       .round_rate     = &omap1_round_uart_rate,
        .set_rate       = &omap1_set_uart_rate,
        .recalc         = &omap1_uart_recalc,
 };
 
 /*
  * XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz.  Reimplement with clksel.
+ * and 48MHz.  Reimplement with clk_mux.
  *
  * XXX SYSC register handling does not belong in the clock framework
  */
 static struct uart_clk uart1_16xx = {
        .clk    = {
-               .name           = "uart1_ck",
                .ops            = &clkops_uart_16xx,
                /* Direct from ULPD, no real parent */
-               .parent         = &armper_ck.clk,
+               .hw.init        = CLK_HW_INIT("uart1_ck", "armper_ck", &omap1_clk_full_ops, 0),
                .rate           = 48000000,
                .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
                .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
@@ -443,54 +394,49 @@ static struct uart_clk uart1_16xx = {
 
 /*
  * XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz.  Reimplement with clksel.
+ * and 48MHz.  Reimplement with clk_mux.
  *
  * XXX does this need SYSC register handling?
  */
-static struct clk uart2_ck = {
-       .name           = "uart2_ck",
-       .ops            = &clkops_null,
+static struct omap1_clk uart2_ck = {
        /* Direct from ULPD, no real parent */
-       .parent         = &armper_ck.clk,
-       .rate           = 12000000,
+       .hw.init        = CLK_HW_INIT("uart2_ck", "armper_ck", &omap1_clk_full_ops, 0),
        .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
        .enable_bit     = CONF_MOD_UART2_CLK_MODE_R,
+       .round_rate     = &omap1_round_uart_rate,
        .set_rate       = &omap1_set_uart_rate,
        .recalc         = &omap1_uart_recalc,
 };
 
 /*
  * XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz.  Reimplement with clksel.
+ * and 48MHz.  Reimplement with clk_mux.
  *
  * XXX does this need SYSC register handling?
  */
-static struct clk uart3_1510 = {
-       .name           = "uart3_ck",
-       .ops            = &clkops_null,
+static struct omap1_clk uart3_1510 = {
        /* Direct from ULPD, no real parent */
-       .parent         = &armper_ck.clk,
-       .rate           = 12000000,
+       .hw.init        = CLK_HW_INIT("uart3_ck", "armper_ck", &omap1_clk_full_ops, 0),
        .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
        .enable_bit     = CONF_MOD_UART3_CLK_MODE_R,
+       .round_rate     = &omap1_round_uart_rate,
        .set_rate       = &omap1_set_uart_rate,
        .recalc         = &omap1_uart_recalc,
 };
 
 /*
  * XXX The enable_bit here is misused - it simply switches between 12MHz
- * and 48MHz.  Reimplement with clksel.
+ * and 48MHz.  Reimplement with clk_mux.
  *
  * XXX SYSC register handling does not belong in the clock framework
  */
 static struct uart_clk uart3_16xx = {
        .clk    = {
-               .name           = "uart3_ck",
                .ops            = &clkops_uart_16xx,
                /* Direct from ULPD, no real parent */
-               .parent         = &armper_ck.clk,
+               .hw.init        = CLK_HW_INIT("uart3_ck", "armper_ck", &omap1_clk_full_ops, 0),
                .rate           = 48000000,
                .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
                .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
@@ -499,30 +445,30 @@ static struct uart_clk uart3_16xx = {
        .sysc_addr      = 0xfffb9854,
 };
 
-static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
-       .name           = "usb_clko",
+static struct omap1_clk usb_clko = {   /* 6 MHz output on W4_USB_CLKO */
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("usb_clko", &omap1_clk_full_ops, 0),
        .rate           = 6000000,
        .flags          = ENABLE_REG_32BIT,
        .enable_reg     = OMAP1_IO_ADDRESS(ULPD_CLOCK_CTRL),
        .enable_bit     = USB_MCLK_EN_BIT,
 };
 
-static struct clk usb_hhc_ck1510 = {
-       .name           = "usb_hhc_ck",
+static struct omap1_clk usb_hhc_ck1510 = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("usb_hhc_ck", &omap1_clk_full_ops, 0),
        .rate           = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
        .flags          = ENABLE_REG_32BIT,
        .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
        .enable_bit     = USB_HOST_HHC_UHOST_EN,
 };
 
-static struct clk usb_hhc_ck16xx = {
-       .name           = "usb_hhc_ck",
+static struct omap1_clk usb_hhc_ck16xx = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("usb_hhc_ck", &omap1_clk_full_ops, 0),
        .rate           = 48000000,
        /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
        .flags          = ENABLE_REG_32BIT,
@@ -530,46 +476,46 @@ static struct clk usb_hhc_ck16xx = {
        .enable_bit     = OTG_SYSCON_2_UHOST_EN_SHIFT
 };
 
-static struct clk usb_dc_ck = {
-       .name           = "usb_dc_ck",
+static struct omap1_clk usb_dc_ck = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("usb_dc_ck", &omap1_clk_full_ops, 0),
        .rate           = 48000000,
        .enable_reg     = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
        .enable_bit     = SOFT_USB_OTG_DPLL_REQ_SHIFT,
 };
 
-static struct clk uart1_7xx = {
-       .name           = "uart1_ck",
+static struct omap1_clk uart1_7xx = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("uart1_ck", &omap1_clk_full_ops, 0),
        .rate           = 12000000,
        .enable_reg     = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
        .enable_bit     = 9,
 };
 
-static struct clk uart2_7xx = {
-       .name           = "uart2_ck",
+static struct omap1_clk uart2_7xx = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("uart2_ck", &omap1_clk_full_ops, 0),
        .rate           = 12000000,
        .enable_reg     = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
        .enable_bit     = 11,
 };
 
-static struct clk mclk_1510 = {
-       .name           = "mclk",
+static struct omap1_clk mclk_1510 = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("mclk", &omap1_clk_full_ops, 0),
        .rate           = 12000000,
        .enable_reg     = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
        .enable_bit     = SOFT_COM_MCKO_REQ_SHIFT,
 };
 
-static struct clk mclk_16xx = {
-       .name           = "mclk",
+static struct omap1_clk mclk_16xx = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("mclk", &omap1_clk_full_ops, 0),
        .enable_reg     = OMAP1_IO_ADDRESS(COM_CLK_DIV_CTRL_SEL),
        .enable_bit     = COM_ULPD_PLL_CLK_REQ,
        .set_rate       = &omap1_set_ext_clk_rate,
@@ -577,17 +523,16 @@ static struct clk mclk_16xx = {
        .init           = &omap1_init_ext_clk,
 };
 
-static struct clk bclk_1510 = {
-       .name           = "bclk",
-       .ops            = &clkops_generic,
+static struct omap1_clk bclk_1510 = {
        /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("bclk", &omap1_clk_rate_ops, 0),
        .rate           = 12000000,
 };
 
-static struct clk bclk_16xx = {
-       .name           = "bclk",
+static struct omap1_clk bclk_16xx = {
        .ops            = &clkops_generic,
        /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+       .hw.init        = CLK_HW_INIT_NO_PARENT("bclk", &omap1_clk_full_ops, 0),
        .enable_reg     = OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL),
        .enable_bit     = SWD_ULPD_PLL_CLK_REQ,
        .set_rate       = &omap1_set_ext_clk_rate,
@@ -595,11 +540,10 @@ static struct clk bclk_16xx = {
        .init           = &omap1_init_ext_clk,
 };
 
-static struct clk mmc1_ck = {
-       .name           = "mmc1_ck",
+static struct omap1_clk mmc1_ck = {
        .ops            = &clkops_generic,
        /* Functional clock is direct from ULPD, interface clock is ARMPER */
-       .parent         = &armper_ck.clk,
+       .hw.init        = CLK_HW_INIT("mmc1_ck", "armper_ck", &omap1_clk_full_ops, 0),
        .rate           = 48000000,
        .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
@@ -610,32 +554,29 @@ static struct clk mmc1_ck = {
  * XXX MOD_CONF_CTRL_0 bit 20 is defined in the 1510 TRM as
  * CONF_MOD_MCBSP3_AUXON ??
  */
-static struct clk mmc2_ck = {
-       .name           = "mmc2_ck",
+static struct omap1_clk mmc2_ck = {
        .ops            = &clkops_generic,
        /* Functional clock is direct from ULPD, interface clock is ARMPER */
-       .parent         = &armper_ck.clk,
+       .hw.init        = CLK_HW_INIT("mmc2_ck", "armper_ck", &omap1_clk_full_ops, 0),
        .rate           = 48000000,
        .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
        .enable_bit     = 20,
 };
 
-static struct clk mmc3_ck = {
-       .name           = "mmc3_ck",
+static struct omap1_clk mmc3_ck = {
        .ops            = &clkops_generic,
        /* Functional clock is direct from ULPD, interface clock is ARMPER */
-       .parent         = &armper_ck.clk,
+       .hw.init        = CLK_HW_INIT("mmc3_ck", "armper_ck", &omap1_clk_full_ops, 0),
        .rate           = 48000000,
        .flags          = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
        .enable_bit     = SOFT_MMC_DPLL_REQ_SHIFT,
 };
 
-static struct clk virtual_ck_mpu = {
-       .name           = "mpu",
-       .ops            = &clkops_null,
-       .parent         = &arm_ck, /* Is smarter alias for */
+static struct omap1_clk virtual_ck_mpu = {
+       /* Is smarter alias for arm_ck */
+       .hw.init        = CLK_HW_INIT("mpu", "arm_ck", &omap1_clk_rate_ops, 0),
        .recalc         = &followparent_recalc,
        .set_rate       = &omap1_select_table_rate,
        .round_rate     = &omap1_round_to_table_rate,
@@ -643,20 +584,14 @@ static struct clk virtual_ck_mpu = {
 
 /* virtual functional clock domain for I2C. Just for making sure that ARMXOR_CK
 remains active during MPU idle whenever this is enabled */
-static struct clk i2c_fck = {
-       .name           = "i2c_fck",
-       .ops            = &clkops_null,
+static struct omap1_clk i2c_fck = {
+       .hw.init        = CLK_HW_INIT("i2c_fck", "armxor_ck", &omap1_clk_gate_ops, 0),
        .flags          = CLOCK_NO_IDLE_PARENT,
-       .parent         = &armxor_ck.clk,
-       .recalc         = &followparent_recalc,
 };
 
-static struct clk i2c_ick = {
-       .name           = "i2c_ick",
-       .ops            = &clkops_null,
+static struct omap1_clk i2c_ick = {
+       .hw.init        = CLK_HW_INIT("i2c_ick", "armper_ck", &omap1_clk_gate_ops, 0),
        .flags          = CLOCK_NO_IDLE_PARENT,
-       .parent         = &armper_ck.clk,
-       .recalc         = &followparent_recalc,
 };
 
 /*
@@ -665,81 +600,81 @@ static struct clk i2c_ick = {
 
 static struct omap_clk omap_clks[] = {
        /* non-ULPD clocks */
-       CLK(NULL,       "ck_ref",       &ck_ref,        CK_16XX | CK_1510 | CK_310 | CK_7XX),
-       CLK(NULL,       "ck_dpll1",     &ck_dpll1,      CK_16XX | CK_1510 | CK_310 | CK_7XX),
+       CLK(NULL,       "ck_ref",       &ck_ref.hw,     CK_16XX | CK_1510 | CK_310 | CK_7XX),
+       CLK(NULL,       "ck_dpll1",     &ck_dpll1.hw,   CK_16XX | CK_1510 | CK_310 | CK_7XX),
        /* CK_GEN1 clocks */
-       CLK(NULL,       "ck_dpll1out",  &ck_dpll1out.clk, CK_16XX),
-       CLK(NULL,       "ck_sossi",     &sossi_ck,      CK_16XX),
-       CLK(NULL,       "arm_ck",       &arm_ck,        CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "armper_ck",    &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
-       CLK("omap_gpio.0", "ick",       &arm_gpio_ck,   CK_1510 | CK_310),
-       CLK(NULL,       "armxor_ck",    &armxor_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
-       CLK(NULL,       "armtim_ck",    &armtim_ck.clk, CK_16XX | CK_1510 | CK_310),
-       CLK("omap_wdt", "fck",          &armwdt_ck.clk, CK_16XX | CK_1510 | CK_310),
-       CLK("omap_wdt", "ick",          &armper_ck.clk, CK_16XX),
-       CLK("omap_wdt", "ick",          &dummy_ck,      CK_1510 | CK_310),
-       CLK(NULL,       "arminth_ck",   &arminth_ck1510, CK_1510 | CK_310),
-       CLK(NULL,       "arminth_ck",   &arminth_ck16xx, CK_16XX),
+       CLK(NULL,       "ck_dpll1out",  &ck_dpll1out.clk.hw, CK_16XX),
+       CLK(NULL,       "ck_sossi",     &sossi_ck.hw,   CK_16XX),
+       CLK(NULL,       "arm_ck",       &arm_ck.hw,     CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "armper_ck",    &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+       CLK("omap_gpio.0", "ick",       &arm_gpio_ck.hw, CK_1510 | CK_310),
+       CLK(NULL,       "armxor_ck",    &armxor_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+       CLK(NULL,       "armtim_ck",    &armtim_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+       CLK("omap_wdt", "fck",          &armwdt_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+       CLK("omap_wdt", "ick",          &armper_ck.clk.hw, CK_16XX),
+       CLK("omap_wdt", "ick",          &dummy_ck.hw,   CK_1510 | CK_310),
+       CLK(NULL,       "arminth_ck",   &arminth_ck1510.hw, CK_1510 | CK_310),
+       CLK(NULL,       "arminth_ck",   &arminth_ck16xx.hw, CK_16XX),
        /* CK_GEN2 clocks */
-       CLK(NULL,       "dsp_ck",       &dsp_ck,        CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "dspmmu_ck",    &dspmmu_ck,     CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "dspper_ck",    &dspper_ck,     CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "dspxor_ck",    &dspxor_ck,     CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "dsptim_ck",    &dsptim_ck,     CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "dsp_ck",       &dsp_ck.hw,     CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "dspmmu_ck",    &dspmmu_ck.hw,  CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "dspper_ck",    &dspper_ck.hw,  CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "dspxor_ck",    &dspxor_ck.hw,  CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "dsptim_ck",    &dsptim_ck.hw,  CK_16XX | CK_1510 | CK_310),
        /* CK_GEN3 clocks */
-       CLK(NULL,       "tc_ck",        &tc_ck.clk,     CK_16XX | CK_1510 | CK_310 | CK_7XX),
-       CLK(NULL,       "tipb_ck",      &tipb_ck,       CK_1510 | CK_310),
-       CLK(NULL,       "l3_ocpi_ck",   &l3_ocpi_ck,    CK_16XX | CK_7XX),
-       CLK(NULL,       "tc1_ck",       &tc1_ck,        CK_16XX),
-       CLK(NULL,       "tc2_ck",       &tc2_ck,        CK_16XX),
-       CLK(NULL,       "dma_ck",       &dma_ck,        CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "dma_lcdfree_ck", &dma_lcdfree_ck, CK_16XX),
-       CLK(NULL,       "api_ck",       &api_ck.clk,    CK_16XX | CK_1510 | CK_310 | CK_7XX),
-       CLK(NULL,       "lb_ck",        &lb_ck.clk,     CK_1510 | CK_310),
-       CLK(NULL,       "rhea1_ck",     &rhea1_ck,      CK_16XX),
-       CLK(NULL,       "rhea2_ck",     &rhea2_ck,      CK_16XX),
-       CLK(NULL,       "lcd_ck",       &lcd_ck_16xx,   CK_16XX | CK_7XX),
-       CLK(NULL,       "lcd_ck",       &lcd_ck_1510.clk, CK_1510 | CK_310),
+       CLK(NULL,       "tc_ck",        &tc_ck.clk.hw,  CK_16XX | CK_1510 | CK_310 | CK_7XX),
+       CLK(NULL,       "tipb_ck",      &tipb_ck.hw,    CK_1510 | CK_310),
+       CLK(NULL,       "l3_ocpi_ck",   &l3_ocpi_ck.hw, CK_16XX | CK_7XX),
+       CLK(NULL,       "tc1_ck",       &tc1_ck.hw,     CK_16XX),
+       CLK(NULL,       "tc2_ck",       &tc2_ck.hw,     CK_16XX),
+       CLK(NULL,       "dma_ck",       &dma_ck.hw,     CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "dma_lcdfree_ck", &dma_lcdfree_ck.hw, CK_16XX),
+       CLK(NULL,       "api_ck",       &api_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+       CLK(NULL,       "lb_ck",        &lb_ck.clk.hw,  CK_1510 | CK_310),
+       CLK(NULL,       "rhea1_ck",     &rhea1_ck.hw,   CK_16XX),
+       CLK(NULL,       "rhea2_ck",     &rhea2_ck.hw,   CK_16XX),
+       CLK(NULL,       "lcd_ck",       &lcd_ck_16xx.hw, CK_16XX | CK_7XX),
+       CLK(NULL,       "lcd_ck",       &lcd_ck_1510.clk.hw, CK_1510 | CK_310),
        /* ULPD clocks */
-       CLK(NULL,       "uart1_ck",     &uart1_1510,    CK_1510 | CK_310),
-       CLK(NULL,       "uart1_ck",     &uart1_16xx.clk, CK_16XX),
-       CLK(NULL,       "uart1_ck",     &uart1_7xx,     CK_7XX),
-       CLK(NULL,       "uart2_ck",     &uart2_ck,      CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "uart2_ck",     &uart2_7xx,     CK_7XX),
-       CLK(NULL,       "uart3_ck",     &uart3_1510,    CK_1510 | CK_310),
-       CLK(NULL,       "uart3_ck",     &uart3_16xx.clk, CK_16XX),
-       CLK(NULL,       "usb_clko",     &usb_clko,      CK_16XX | CK_1510 | CK_310),
-       CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck1510, CK_1510 | CK_310),
-       CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck16xx, CK_16XX),
-       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck,     CK_16XX | CK_7XX),
-       CLK(NULL,       "mclk",         &mclk_1510,     CK_1510 | CK_310),
-       CLK(NULL,       "mclk",         &mclk_16xx,     CK_16XX),
-       CLK(NULL,       "bclk",         &bclk_1510,     CK_1510 | CK_310),
-       CLK(NULL,       "bclk",         &bclk_16xx,     CK_16XX),
-       CLK("mmci-omap.0", "fck",       &mmc1_ck,       CK_16XX | CK_1510 | CK_310),
-       CLK("mmci-omap.0", "fck",       &mmc3_ck,       CK_7XX),
-       CLK("mmci-omap.0", "ick",       &armper_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
-       CLK("mmci-omap.1", "fck",       &mmc2_ck,       CK_16XX),
-       CLK("mmci-omap.1", "ick",       &armper_ck.clk, CK_16XX),
+       CLK(NULL,       "uart1_ck",     &uart1_1510.hw, CK_1510 | CK_310),
+       CLK(NULL,       "uart1_ck",     &uart1_16xx.clk.hw, CK_16XX),
+       CLK(NULL,       "uart1_ck",     &uart1_7xx.hw,  CK_7XX),
+       CLK(NULL,       "uart2_ck",     &uart2_ck.hw,   CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "uart2_ck",     &uart2_7xx.hw,  CK_7XX),
+       CLK(NULL,       "uart3_ck",     &uart3_1510.hw, CK_1510 | CK_310),
+       CLK(NULL,       "uart3_ck",     &uart3_16xx.clk.hw, CK_16XX),
+       CLK(NULL,       "usb_clko",     &usb_clko.hw,   CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck1510.hw, CK_1510 | CK_310),
+       CLK(NULL,       "usb_hhc_ck",   &usb_hhc_ck16xx.hw, CK_16XX),
+       CLK(NULL,       "usb_dc_ck",    &usb_dc_ck.hw,  CK_16XX | CK_7XX),
+       CLK(NULL,       "mclk",         &mclk_1510.hw,  CK_1510 | CK_310),
+       CLK(NULL,       "mclk",         &mclk_16xx.hw,  CK_16XX),
+       CLK(NULL,       "bclk",         &bclk_1510.hw,  CK_1510 | CK_310),
+       CLK(NULL,       "bclk",         &bclk_16xx.hw,  CK_16XX),
+       CLK("mmci-omap.0", "fck",       &mmc1_ck.hw,    CK_16XX | CK_1510 | CK_310),
+       CLK("mmci-omap.0", "fck",       &mmc3_ck.hw,    CK_7XX),
+       CLK("mmci-omap.0", "ick",       &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+       CLK("mmci-omap.1", "fck",       &mmc2_ck.hw,    CK_16XX),
+       CLK("mmci-omap.1", "ick",       &armper_ck.clk.hw, CK_16XX),
        /* Virtual clocks */
-       CLK(NULL,       "mpu",          &virtual_ck_mpu, CK_16XX | CK_1510 | CK_310),
-       CLK("omap_i2c.1", "fck",        &i2c_fck,       CK_16XX | CK_1510 | CK_310 | CK_7XX),
-       CLK("omap_i2c.1", "ick",        &i2c_ick,       CK_16XX),
-       CLK("omap_i2c.1", "ick",        &dummy_ck,      CK_1510 | CK_310 | CK_7XX),
-       CLK("omap1_spi100k.1", "fck",   &dummy_ck,      CK_7XX),
-       CLK("omap1_spi100k.1", "ick",   &dummy_ck,      CK_7XX),
-       CLK("omap1_spi100k.2", "fck",   &dummy_ck,      CK_7XX),
-       CLK("omap1_spi100k.2", "ick",   &dummy_ck,      CK_7XX),
-       CLK("omap_uwire", "fck",        &armxor_ck.clk, CK_16XX | CK_1510 | CK_310),
-       CLK("omap-mcbsp.1", "ick",      &dspper_ck,     CK_16XX),
-       CLK("omap-mcbsp.1", "ick",      &dummy_ck,      CK_1510 | CK_310),
-       CLK("omap-mcbsp.2", "ick",      &armper_ck.clk, CK_16XX),
-       CLK("omap-mcbsp.2", "ick",      &dummy_ck,      CK_1510 | CK_310),
-       CLK("omap-mcbsp.3", "ick",      &dspper_ck,     CK_16XX),
-       CLK("omap-mcbsp.3", "ick",      &dummy_ck,      CK_1510 | CK_310),
-       CLK("omap-mcbsp.1", "fck",      &dspxor_ck,     CK_16XX | CK_1510 | CK_310),
-       CLK("omap-mcbsp.2", "fck",      &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
-       CLK("omap-mcbsp.3", "fck",      &dspxor_ck,     CK_16XX | CK_1510 | CK_310),
+       CLK(NULL,       "mpu",          &virtual_ck_mpu.hw, CK_16XX | CK_1510 | CK_310),
+       CLK("omap_i2c.1", "fck",        &i2c_fck.hw,    CK_16XX | CK_1510 | CK_310 | CK_7XX),
+       CLK("omap_i2c.1", "ick",        &i2c_ick.hw,    CK_16XX),
+       CLK("omap_i2c.1", "ick",        &dummy_ck.hw,   CK_1510 | CK_310 | CK_7XX),
+       CLK("omap1_spi100k.1", "fck",   &dummy_ck.hw,   CK_7XX),
+       CLK("omap1_spi100k.1", "ick",   &dummy_ck.hw,   CK_7XX),
+       CLK("omap1_spi100k.2", "fck",   &dummy_ck.hw,   CK_7XX),
+       CLK("omap1_spi100k.2", "ick",   &dummy_ck.hw,   CK_7XX),
+       CLK("omap_uwire", "fck",        &armxor_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+       CLK("omap-mcbsp.1", "ick",      &dspper_ck.hw,  CK_16XX),
+       CLK("omap-mcbsp.1", "ick",      &dummy_ck.hw,   CK_1510 | CK_310),
+       CLK("omap-mcbsp.2", "ick",      &armper_ck.clk.hw, CK_16XX),
+       CLK("omap-mcbsp.2", "ick",      &dummy_ck.hw,   CK_1510 | CK_310),
+       CLK("omap-mcbsp.3", "ick",      &dspper_ck.hw,  CK_16XX),
+       CLK("omap-mcbsp.3", "ick",      &dummy_ck.hw,   CK_1510 | CK_310),
+       CLK("omap-mcbsp.1", "fck",      &dspxor_ck.hw,  CK_16XX | CK_1510 | CK_310),
+       CLK("omap-mcbsp.2", "fck",      &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
+       CLK("omap-mcbsp.3", "fck",      &dspxor_ck.hw,  CK_16XX | CK_1510 | CK_310),
 };
 
 /*
@@ -778,9 +713,6 @@ int __init omap1_clk_init(void)
        /* By default all idlect1 clocks are allowed to idle */
        arm_idlect1_mask = ~0;
 
-       for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
-               clk_preinit(c->lk.clk);
-
        cpu_mask = 0;
        if (cpu_is_omap1710())
                cpu_mask |= CK_1710;
@@ -793,16 +725,10 @@ int __init omap1_clk_init(void)
        if (cpu_is_omap310())
                cpu_mask |= CK_310;
 
-       for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
-               if (c->cpu & cpu_mask) {
-                       clkdev_add(&c->lk);
-                       clk_register(c->lk.clk);
-               }
-
        /* Pointers to these clocks are needed by code in clock.c */
-       api_ck_p = clk_get(NULL, "api_ck");
-       ck_dpll1_p = clk_get(NULL, "ck_dpll1");
-       ck_ref_p = clk_get(NULL, "ck_ref");
+       api_ck_p = &api_ck.clk;
+       ck_dpll1_p = &ck_dpll1;
+       ck_ref_p = &ck_ref;
 
        if (cpu_is_omap7xx())
                ck_ref.rate = 13000000;
@@ -844,10 +770,7 @@ int __init omap1_clk_init(void)
                        }
                }
        }
-       propagate_rate(&ck_dpll1);
-       /* Cache rates for clocks connected to ck_ref (not dpll1) */
-       propagate_rate(&ck_ref);
-       omap1_show_rates();
+
        if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
                /* Select slicer output as OMAP input clock */
                omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -879,16 +802,28 @@ int __init omap1_clk_init(void)
         */
        omap_writew(0x0000, ARM_IDLECT2);       /* Turn LCD clock off also */
 
-       /*
-        * Only enable those clocks we will need, let the drivers
-        * enable other clocks as necessary
-        */
-       clk_enable(&armper_ck.clk);
-       clk_enable(&armxor_ck.clk);
-       clk_enable(&armtim_ck.clk); /* This should be done by timer code */
+       for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) {
+               if (!(c->cpu & cpu_mask))
+                       continue;
+
+               if (c->lk.clk_hw->init) { /* NULL if provider already registered */
+                       const struct clk_init_data *init = c->lk.clk_hw->init;
+                       const char *name = c->lk.clk_hw->init->name;
+                       int err;
+
+                       err = clk_hw_register(NULL, c->lk.clk_hw);
+                       if (err < 0) {
+                               pr_err("failed to register clock \"%s\"! (%d)\n", name, err);
+                               /* may be tried again, restore init data */
+                               c->lk.clk_hw->init = init;
+                               continue;
+                       }
+               }
+
+               clk_hw_register_clkdev(c->lk.clk_hw, c->lk.con_id, c->lk.dev_id);
+       }
 
-       if (cpu_is_omap15xx())
-               clk_enable(&arm_gpio_ck);
+       omap1_show_rates();
 
        return 0;
 }
@@ -900,7 +835,7 @@ void __init omap1_clk_late_init(void)
        unsigned long rate = ck_dpll1.rate;
 
        /* Find the highest supported frequency and enable it */
-       if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+       if (omap1_select_table_rate(&virtual_ck_mpu, ~0, arm_ck.rate)) {
                pr_err("System frequencies not set, using default. Check your config.\n");
                /*
                 * Reprogramming the DPLL is tricky, it must be done from SRAM.
index 738becb..c228234 100644 (file)
@@ -64,7 +64,7 @@ static inline u32 omap_cs3_phys(void)
 #define OMAP1_IO_OFFSET                0x00f00000      /* Virtual IO = 0xff0b0000 */
 #define OMAP1_IO_ADDRESS(pa)   IOMEM((pa) - OMAP1_IO_OFFSET)
 
-#include <mach/serial.h>
+#include "serial.h"
 
 /*
  * ---------------------------------------------------------------------------
diff --git a/arch/arm/mach-omap1/include/mach/uncompress.h b/arch/arm/mach-omap1/include/mach/uncompress.h
deleted file mode 100644 (file)
index 9cca6a5..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * arch/arm/plat-omap/include/mach/uncompress.h
- *
- * Serial port stubs for kernel decompress status messages
- *
- * Initially based on:
- * linux-2.4.15-rmk1-dsplinux1.6/arch/arm/plat-omap/include/mach1510/uncompress.h
- * Copyright (C) 2000 RidgeRun, Inc.
- * Author: Greg Lonnon <glonnon@ridgerun.com>
- *
- * Rewritten by:
- * Author: <source@mvista.com>
- * 2004 (c) MontaVista Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/types.h>
-#include <linux/serial_reg.h>
-
-#include <asm/memory.h>
-#include <asm/mach-types.h>
-
-#include "serial.h"
-
-#define MDR1_MODE_MASK                 0x07
-
-volatile u8 *uart_base;
-int uart_shift;
-
-/*
- * Store the DEBUG_LL uart number into memory.
- * See also debug-macro.S, and serial.c for related code.
- */
-static void set_omap_uart_info(unsigned char port)
-{
-       /*
-        * Get address of some.bss variable and round it down
-        * a la CONFIG_AUTO_ZRELADDR.
-        */
-       u32 ram_start = (u32)&uart_shift & 0xf8000000;
-       u32 *uart_info = (u32 *)(ram_start + OMAP_UART_INFO_OFS);
-       *uart_info = port;
-}
-
-static inline void putc(int c)
-{
-       if (!uart_base)
-               return;
-
-       /* Check for UART 16x mode */
-       if ((uart_base[UART_OMAP_MDR1 << uart_shift] & MDR1_MODE_MASK) != 0)
-               return;
-
-       while (!(uart_base[UART_LSR << uart_shift] & UART_LSR_THRE))
-               barrier();
-       uart_base[UART_TX << uart_shift] = c;
-}
-
-static inline void flush(void)
-{
-}
-
-/*
- * Macros to configure UART1 and debug UART
- */
-#define _DEBUG_LL_ENTRY(mach, dbg_uart, dbg_shft, dbg_id)              \
-       if (machine_is_##mach()) {                                      \
-               uart_base = (volatile u8 *)(dbg_uart);                  \
-               uart_shift = (dbg_shft);                                \
-               port = (dbg_id);                                        \
-               set_omap_uart_info(port);                               \
-               break;                                                  \
-       }
-
-#define DEBUG_LL_OMAP7XX(p, mach)                                      \
-       _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT, \
-               OMAP1UART##p)
-
-#define DEBUG_LL_OMAP1(p, mach)                                                \
-       _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT,    \
-               OMAP1UART##p)
-
-static inline void arch_decomp_setup(void)
-{
-       int port = 0;
-
-       /*
-        * Initialize the port based on the machine ID from the bootloader.
-        * Note that we're using macros here instead of switch statement
-        * as machine_is functions are optimized out for the boards that
-        * are not selected.
-        */
-       do {
-               /* omap7xx/8xx based boards using UART1 with shift 0 */
-               DEBUG_LL_OMAP7XX(1, herald);
-               DEBUG_LL_OMAP7XX(1, omap_perseus2);
-
-               /* omap15xx/16xx based boards using UART1 */
-               DEBUG_LL_OMAP1(1, ams_delta);
-               DEBUG_LL_OMAP1(1, nokia770);
-               DEBUG_LL_OMAP1(1, omap_h2);
-               DEBUG_LL_OMAP1(1, omap_h3);
-               DEBUG_LL_OMAP1(1, omap_innovator);
-               DEBUG_LL_OMAP1(1, omap_osk);
-               DEBUG_LL_OMAP1(1, omap_palmte);
-               DEBUG_LL_OMAP1(1, omap_palmz71);
-
-               /* omap15xx/16xx based boards using UART2 */
-               DEBUG_LL_OMAP1(2, omap_palmtt);
-
-               /* omap15xx/16xx based boards using UART3 */
-               DEBUG_LL_OMAP1(3, sx1);
-       } while (0);
-}
index 05ee260..d2db9b8 100644 (file)
 #include <asm/mach/map.h>
 
 #include "tc.h"
-#include "mux.h"
 #include "iomap.h"
 #include "common.h"
-#include "clock.h"
 
 /*
  * The machine specific code may provide the extra mapping besides the
@@ -125,11 +123,6 @@ void __init omap1_init_early(void)
         */
        omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL);
        omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL);
-
-       /* Must init clocks early to assure that timer interrupt works
-        */
-       omap1_clk_init();
-       omap1_mux_init();
 }
 
 void __init omap1_init_late(void)
index 299ae11..88928fc 100644 (file)
@@ -19,8 +19,7 @@
 
 #include <asm/mach-types.h>
 
-#include <mach/serial.h>
-
+#include "serial.h"
 #include "mux.h"
 #include "pm.h"
 #include "soc.h"
index c34b9af..d5e1278 100644 (file)
 #include <asm/mach/time.h>
 
 #include "hardware.h"
+#include "mux.h"
 #include "iomap.h"
 #include "common.h"
+#include "clock.h"
 
 #ifdef CONFIG_OMAP_MPU_TIMER
 
@@ -224,6 +226,9 @@ static inline void omap_mpu_timer_init(void)
  */
 void __init omap1_timer_init(void)
 {
+       omap1_clk_init();
+       omap1_mux_init();
+
        if (omap_32k_timer_init() != 0)
                omap_mpu_timer_init();
 }
index 57f0be4..a5df1d9 100644 (file)
@@ -1,4 +1,19 @@
 # SPDX-License-Identifier: GPL-2.0-only
+menuconfig ARCH_PXA
+       bool "PXA2xx/PXA3xx-based"
+       depends on ARCH_MULTI_V5
+       depends on CPU_LITTLE_ENDIAN
+       select ARM_CPU_SUSPEND if PM
+       select CLKSRC_PXA
+       select CLKSRC_MMIO
+       select TIMER_OF
+       select CPU_XSCALE if !CPU_XSC3
+       select GPIO_PXA
+       select GPIOLIB
+       select PLAT_PXA
+       help
+         Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
+
 if ARCH_PXA
 
 menu "Intel PXA2xx/PXA3xx Implementations"
index 68730ce..0aec36e 100644 (file)
@@ -37,7 +37,8 @@ obj-$(CONFIG_MACH_SAAR)               += saar.o
 obj-$(CONFIG_ARCH_PXA_IDP)     += idp.o
 obj-$(CONFIG_ARCH_VIPER)       += viper.o
 obj-$(CONFIG_MACH_ARCOM_ZEUS)  += zeus.o
-obj-$(CONFIG_MACH_BALLOON3)    += balloon3.o
+obj-$(CONFIG_ARCOM_PCMCIA)     += viper-pcmcia.o
+obj-$(CONFIG_MACH_BALLOON3)    += balloon3.o balloon3-pcmcia.o
 obj-$(CONFIG_MACH_CSB726)      += csb726.o
 obj-$(CONFIG_CSB726_CSB701)    += csb701.o
 obj-$(CONFIG_MACH_CM_X300)      += cm-x300.o
@@ -47,18 +48,20 @@ obj-$(CONFIG_GUMSTIX_AM200EPD)      += am200epd.o
 obj-$(CONFIG_GUMSTIX_AM300EPD) += am300epd.o
 obj-$(CONFIG_MACH_XCEP)         += xcep.o
 obj-$(CONFIG_MACH_TRIZEPS4)    += trizeps4.o
+obj-$(CONFIG_TRIZEPS_PCMCIA)   += trizeps4-pcmcia.o
 obj-$(CONFIG_MACH_LOGICPD_PXA270)      += lpd270.o
 obj-$(CONFIG_MACH_PCM027)              += pcm027.o
 obj-$(CONFIG_MACH_PCM990_BASEBOARD)    += pcm990-baseboard.o
-obj-$(CONFIG_MACH_COLIBRI)                     += colibri-pxa270.o
+obj-$(CONFIG_MACH_COLIBRI)             += colibri-pxa270.o colibri-pcmcia.o
 obj-$(CONFIG_MACH_COLIBRI_EVALBOARD)   += colibri-evalboard.o
 obj-$(CONFIG_MACH_COLIBRI_PXA270_INCOME)       += colibri-pxa270-income.o
 obj-$(CONFIG_MACH_COLIBRI300)  += colibri-pxa3xx.o colibri-pxa300.o
-obj-$(CONFIG_MACH_COLIBRI320)  += colibri-pxa3xx.o colibri-pxa320.o
-obj-$(CONFIG_MACH_VPAC270)     += vpac270.o
+obj-$(CONFIG_MACH_COLIBRI320)  += colibri-pxa3xx.o colibri-pxa320.o colibri-pcmcia.o
+obj-$(CONFIG_MACH_VPAC270)     += vpac270.o vpac270-pcmcia.o
 
 # End-user Products
 obj-$(CONFIG_MACH_H4700)       += hx4700.o
+obj-$(CONFIG_MACH_H4700)       += hx4700-pcmcia.o
 obj-$(CONFIG_MACH_H5000)       += h5000.o
 obj-$(CONFIG_MACH_HIMALAYA)    += himalaya.o
 obj-$(CONFIG_MACH_MAGICIAN)    += magician.o
@@ -66,12 +69,12 @@ obj-$(CONFIG_MACH_MIOA701)  += mioa701.o mioa701_bootresume.o
 obj-$(CONFIG_PXA_EZX)           += ezx.o
 obj-$(CONFIG_MACH_MP900C)      += mp900.o
 obj-$(CONFIG_MACH_PALMTE2)     += palmte2.o
-obj-$(CONFIG_MACH_PALMTC)      += palmtc.o
+obj-$(CONFIG_MACH_PALMTC)      += palmtc.o palmtc-pcmcia.o
 obj-$(CONFIG_MACH_PALM27X)     += palm27x.o
 obj-$(CONFIG_MACH_PALMT5)      += palmt5.o
-obj-$(CONFIG_MACH_PALMTX)      += palmtx.o
+obj-$(CONFIG_MACH_PALMTX)      += palmtx.o palmtx-pcmcia.o
 obj-$(CONFIG_MACH_PALMZ72)     += palmz72.o
-obj-$(CONFIG_MACH_PALMLD)      += palmld.o
+obj-$(CONFIG_MACH_PALMLD)      += palmld.o palmld-pcmcia.o
 obj-$(CONFIG_PALM_TREO)                += palmtreo.o
 obj-$(CONFIG_PXA_SHARP_C7xx)   += corgi.o sharpsl_pm.o corgi_pm.o
 obj-$(CONFIG_PXA_SHARP_Cxx00)  += spitz.o sharpsl_pm.o spitz_pm.o
@@ -79,6 +82,7 @@ obj-$(CONFIG_MACH_POODLE)     += poodle.o
 obj-$(CONFIG_MACH_TOSA)                += tosa.o
 obj-$(CONFIG_MACH_ICONTROL)     += icontrol.o mxm8x10.o
 obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o
+obj-$(CONFIG_MACH_E740)                += e740-pcmcia.o
 obj-$(CONFIG_MACH_ZIPIT2)      += z2.o
 
 obj-$(CONFIG_PXA_SYSTEMS_CPLDS)        += pxa_cplds_irqs.o
index 17d08ab..4b55bc8 100644 (file)
@@ -30,7 +30,7 @@
 
 #include "gumstix.h"
 #include "mfp-pxa25x.h"
-#include <mach/irqs.h>
+#include "irqs.h"
 #include <linux/platform_data/video-pxafb.h>
 
 #include "generic.h"
similarity index 98%
rename from drivers/pcmcia/pxa2xx_balloon3.c
rename to arch/arm/mach-pxa/balloon3-pcmcia.c
index 5fe1da7..6a27b76 100644 (file)
 #include <linux/irq.h>
 #include <linux/io.h>
 
-#include <mach/balloon3.h>
+#include "balloon3.h"
 
 #include <asm/mach-types.h>
 
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
 
 static int balloon3_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
 {
index 2614024..896d47d 100644 (file)
@@ -40,8 +40,8 @@
 #include <asm/mach/flash.h>
 
 #include "pxa27x.h"
-#include <mach/balloon3.h>
-#include <mach/audio.h>
+#include "balloon3.h"
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include "udc.h"
index 2e35354..01f364a 100644 (file)
@@ -40,6 +40,8 @@
 #include <linux/spi/spi_gpio.h>
 #include <linux/spi/tdo24m.h>
 
+#include <linux/soc/pxa/cpu.h>
+
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/setup.h>
@@ -51,7 +53,7 @@
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/usb-pxa3xx-ulpi.h>
 
 #include <asm/mach/map.h>
@@ -354,13 +356,13 @@ static struct platform_device cm_x300_spi_gpio = {
 static struct gpiod_lookup_table cm_x300_spi_gpiod_table = {
        .dev_id         = "spi_gpio",
        .table          = {
-               GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL,
+               GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE,
                            "sck", GPIO_ACTIVE_HIGH),
-               GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN,
+               GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE,
                            "mosi", GPIO_ACTIVE_HIGH),
-               GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT,
+               GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE,
                            "miso", GPIO_ACTIVE_HIGH),
-               GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS,
+               GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE,
                            "cs", GPIO_ACTIVE_HIGH),
                { },
        },
index b9c173e..b62af07 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/interrupt.h>
 #include <linux/gpio/machine.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/mach/arch.h>
 #include <linux/i2c.h>
 #include <linux/platform_data/i2c-pxa.h>
similarity index 99%
rename from drivers/pcmcia/pxa2xx_colibri.c
rename to arch/arm/mach-pxa/colibri-pcmcia.c
index f0f725e..9da7b47 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <asm/mach-types.h>
 
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
 
 #define        COLIBRI270_RESET_GPIO   53
 #define        COLIBRI270_PPEN_GPIO    107
index e5879e8..f6eaf46 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/irq.h>
 #include <asm/mach-types.h>
 
-#include <mach/hardware.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include "pxa27x.h"
index 2f2cd2a..5dc6697 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/mach-types.h>
 #include <linux/sizes.h>
 
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include "colibri.h"
 #include "pxa27x.h"
 
index 82052df..11ca6c4 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include <asm/mach-types.h>
 #include <linux/sizes.h>
@@ -23,7 +24,7 @@
 #include "colibri.h"
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include <linux/platform_data/video-pxafb.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #include "generic.h"
 #include "devices.h"
index 35dd3ad..1a59056 100644 (file)
@@ -24,7 +24,7 @@
 #include "colibri.h"
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include "pxa27x-udc.h"
 #include "udc.h"
 
index 3cead80..77d6ef5 100644 (file)
 #include <linux/gpio.h>
 #include <linux/etherdevice.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <linux/sizes.h>
 #include <asm/system_info.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/irq.h>
-#include <mach/pxa3xx-regs.h>
+#include "pxa3xx-regs.h"
 #include "mfp-pxa300.h"
 #include "colibri.h"
 #include <linux/platform_data/mmc-pxamci.h>
index 85525d4..01a46f3 100644 (file)
@@ -3,7 +3,7 @@
 #define _COLIBRI_H_
 
 #include <net/ax88796.h>
-#include <mach/mfp.h>
+#include "mfp.h"
 
 /*
  * base board glue for PXA270 module
index 44659fb..c546356 100644 (file)
@@ -39,7 +39,6 @@
 #include <asm/setup.h>
 #include <asm/memory.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 
 #include <asm/mach/arch.h>
@@ -50,7 +49,7 @@
 #include <linux/platform_data/irda-pxaficp.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include "udc.h"
-#include <mach/corgi.h>
+#include "corgi.h"
 #include "sharpsl_pm.h"
 
 #include <asm/mach/sharpsl_param.h>
@@ -473,6 +472,25 @@ static struct platform_device corgiled_device = {
        },
 };
 
+static struct gpiod_lookup_table corgi_audio_gpio_table = {
+       .dev_id = "corgi-audio",
+       .table = {
+               GPIO_LOOKUP("sharp-scoop",
+                           CORGI_GPIO_MUTE_L - CORGI_SCOOP_GPIO_BASE,
+                           "mute-l", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("sharp-scoop",
+                           CORGI_GPIO_MUTE_R - CORGI_SCOOP_GPIO_BASE,
+                           "mute-r", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("sharp-scoop",
+                           CORGI_GPIO_APM_ON - CORGI_SCOOP_GPIO_BASE,
+                           "apm-on", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("sharp-scoop",
+                           CORGI_GPIO_MIC_BIAS - CORGI_SCOOP_GPIO_BASE,
+                           "mic-bias", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 /*
  * Corgi Audio
  */
@@ -745,6 +763,7 @@ static void __init corgi_init(void)
 
        pxa_set_udc_info(&udc_info);
        gpiod_add_lookup_table(&corgi_mci_gpio_table);
+       gpiod_add_lookup_table(&corgi_audio_gpio_table);
        pxa_set_mci_info(&corgi_mci_platform_data);
        pxa_set_ficp_info(&corgi_ficp_platform_data);
        pxa_set_i2c_info(NULL);
index 092dcb9..555a5c1 100644 (file)
 
 #include <asm/irq.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 
-#include <mach/corgi.h>
-#include <mach/pxa2xx-regs.h>
+#include "corgi.h"
+#include "pxa2xx-regs.h"
 #include "sharpsl_pm.h"
 
 #include "generic.h"
index 98fcdc6..410b1af 100644 (file)
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
+
 #include "csb726.h"
 #include "pxa27x.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/audio.h>
-#include <mach/smemc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
index 30d7cf9..6289287 100644 (file)
@@ -7,7 +7,7 @@
 #ifndef CSB726_H
 #define CSB726_H
 
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 #define CSB726_GPIO_IRQ_LAN    52
 #define CSB726_GPIO_IRQ_SM501  53
index 09b8495..a7b92dd 100644 (file)
@@ -9,21 +9,23 @@
 #include <linux/dmaengine.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/platform_data/i2c-pxa.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include "udc.h"
 #include <linux/platform_data/usb-pxa3xx-ulpi.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/irda-pxaficp.h>
-#include <mach/irqs.h>
+#include "irqs.h"
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include <linux/platform_data/keypad-pxa27x.h>
 #include <linux/platform_data/media/camera-pxa.h>
-#include <mach/audio.h>
-#include <mach/hardware.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/mmp_dma.h>
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
 
+#include "regs-ost.h"
+#include "reset.h"
 #include "devices.h"
 #include "generic.h"
 
@@ -1118,3 +1120,12 @@ void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata)
 {
        pxa_register_device(&pxa2xx_pxa_dma, dma_pdata);
 }
+
+void __init pxa_register_wdt(unsigned int reset_status)
+{
+       struct resource res = DEFINE_RES_MEM(OST_PHYS, OST_LEN);
+
+       reset_status &= RESET_STATUS_WATCHDOG;
+       platform_device_register_resndata(NULL, "sa1100_wdt", -1, &res, 1,
+                                         &reset_status, sizeof(reset_status));
+}
similarity index 98%
rename from drivers/pcmcia/pxa2xx_e740.c
rename to arch/arm/mach-pxa/e740-pcmcia.c
index 72caa6d..11a2c5d 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 
-#include <mach/eseries-gpio.h>
+#include "eseries-gpio.h"
 
 #include <asm/irq.h>
 #include <asm/mach-types.h>
 
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
 
 static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
 {
index f37c44b..08f8737 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/memblock.h>
+#include <linux/gpio/machine.h>
 
 #include <video/w100fb.h>
 
@@ -32,9 +33,9 @@
 #include <asm/mach-types.h>
 
 #include "pxa25x.h"
-#include <mach/eseries-gpio.h>
+#include "eseries-gpio.h"
 #include "eseries-irq.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/video-pxafb.h>
 #include "udc.h"
 #include <linux/platform_data/irda-pxaficp.h>
@@ -520,6 +521,16 @@ static struct platform_device e740_audio_device = {
        .id             = -1,
 };
 
+static struct gpiod_lookup_table e740_audio_gpio_table = {
+       .dev_id = "e740-audio",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa",  GPIO_E740_WM9705_nAVDD2, "Audio power",  GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa",  GPIO_E740_AMP_ON, "Output amp",  GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa",  GPIO_E740_MIC_ON, "Mic amp", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 /* ----------------------------------------------------------------------- */
 
 static struct platform_device *e740_devices[] __initdata = {
@@ -540,6 +551,7 @@ static void __init e740_init(void)
                        "UDCCLK", &pxa25x_device_udc.dev),
        eseries_get_tmio_gpios();
        gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
+       gpiod_add_lookup_table(&e740_audio_gpio_table);
        platform_add_devices(ARRAY_AND_SIZE(e740_devices));
        pxa_set_ac97_info(NULL);
        pxa_set_ficp_info(&e7xx_ficp_platform_data);
@@ -699,7 +711,6 @@ static struct tc6393xb_platform_data e750_tc6393xb_info = {
        .irq_base       = IRQ_BOARD_START,
        .scr_pll2cr     = 0x0cc1,
        .scr_gper       = 0,
-       .gpio_base      = -1,
        .suspend        = &eseries_tmio_suspend,
        .resume         = &eseries_tmio_resume,
        .enable         = &eseries_tmio_enable,
@@ -716,6 +727,15 @@ static struct platform_device e750_tc6393xb_device = {
        .resource      = eseries_tmio_resources,
 };
 
+static struct gpiod_lookup_table e750_audio_gpio_table = {
+       .dev_id = "e750-audio",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa",  GPIO_E750_HP_AMP_OFF, "Output amp",  GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP("gpio-pxa",  GPIO_E750_SPK_AMP_OFF, "Mic amp", GPIO_ACTIVE_LOW),
+               { },
+       },
+};
+
 static struct platform_device e750_audio_device = {
        .name           = "e750-audio",
        .id             = -1,
@@ -740,6 +760,7 @@ static void __init e750_init(void)
                        "GPIO11_CLK", NULL),
        eseries_get_tmio_gpios();
        gpiod_add_lookup_table(&e7xx_gpio_vbus_gpiod_table);
+       gpiod_add_lookup_table(&e750_audio_gpio_table);
        platform_add_devices(ARRAY_AND_SIZE(e750_devices));
        pxa_set_ac97_info(NULL);
        pxa_set_ficp_info(&e7xx_ficp_platform_data);
@@ -918,7 +939,6 @@ static struct tc6393xb_platform_data e800_tc6393xb_info = {
        .irq_base       = IRQ_BOARD_START,
        .scr_pll2cr     = 0x0cc1,
        .scr_gper       = 0,
-       .gpio_base      = -1,
        .suspend        = &eseries_tmio_suspend,
        .resume         = &eseries_tmio_resume,
        .enable         = &eseries_tmio_enable,
@@ -935,6 +955,15 @@ static struct platform_device e800_tc6393xb_device = {
        .resource      = eseries_tmio_resources,
 };
 
+static struct gpiod_lookup_table e800_audio_gpio_table = {
+       .dev_id = "e800-audio",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa",  GPIO_E800_HP_AMP_OFF, "Output amp",  GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP("gpio-pxa",  GPIO_E800_SPK_AMP_ON, "Mic amp", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 static struct platform_device e800_audio_device = {
        .name           = "e800-audio",
        .id             = -1,
@@ -959,6 +988,7 @@ static void __init e800_init(void)
                        "GPIO11_CLK", NULL),
        eseries_get_tmio_gpios();
        gpiod_add_lookup_table(&e800_gpio_vbus_gpiod_table);
+       gpiod_add_lookup_table(&e800_audio_gpio_table);
        platform_add_devices(ARRAY_AND_SIZE(e800_devices));
        pxa_set_ac97_info(NULL);
 }
index eb85950..69c2ec0 100644 (file)
@@ -29,7 +29,6 @@
 #include "pxa27x.h"
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/hardware.h>
 #include <linux/platform_data/keypad-pxa27x.h>
 #include <linux/platform_data/media/camera-pxa.h>
 
index ab7cdff..02fdde7 100644 (file)
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
+#include <linux/clk/pxa.h>
 
-#include <mach/hardware.h>
 #include <asm/mach/map.h>
 #include <asm/mach-types.h>
 
-#include <mach/irqs.h>
-#include <mach/reset.h>
-#include <mach/smemc.h>
-#include <mach/pxa3xx-regs.h>
+#include "addr-map.h"
+#include "irqs.h"
+#include "reset.h"
+#include "smemc.h"
+#include "pxa3xx-regs.h"
 
 #include "generic.h"
 #include <clocksource/pxa.h>
@@ -46,28 +49,47 @@ void clear_reset_status(unsigned int mask)
 void __init pxa_timer_init(void)
 {
        if (cpu_is_pxa25x())
-               pxa25x_clocks_init();
+               pxa25x_clocks_init(io_p2v(0x41300000));
        if (cpu_is_pxa27x())
-               pxa27x_clocks_init();
+               pxa27x_clocks_init(io_p2v(0x41300000));
        if (cpu_is_pxa3xx())
-               pxa3xx_clocks_init();
+               pxa3xx_clocks_init(io_p2v(0x41340000), io_p2v(0x41350000));
        pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000));
 }
 
-/*
- * Get the clock frequency as reflected by CCCR and the turbo flag.
- * We assume these values have been applied via a fcs.
- * If info is not 0 we also display the current settings.
- */
-unsigned int get_clk_frequency_khz(int info)
+void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio)
 {
-       if (cpu_is_pxa25x())
-               return pxa25x_get_clk_frequency_khz(info);
-       else if (cpu_is_pxa27x())
-               return pxa27x_get_clk_frequency_khz(info);
-       return 0;
+       __raw_writel(mcmem, MCMEM(sock));
+       __raw_writel(mcatt, MCATT(sock));
+       __raw_writel(mcio, MCIO(sock));
+}
+EXPORT_SYMBOL_GPL(pxa_smemc_set_pcmcia_timing);
+
+void pxa_smemc_set_pcmcia_socket(int nr)
+{
+       switch (nr) {
+       case 0:
+               __raw_writel(0, MECR);
+               break;
+       case 1:
+               /*
+                * We have at least one socket, so set MECR:CIT
+                * (Card Is There)
+                */
+               __raw_writel(MECR_CIT, MECR);
+               break;
+       case 2:
+               /* Set CIT and MECR:NOS (Number Of Sockets) */
+               __raw_writel(MECR_CIT | MECR_NOS, MECR);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(pxa_smemc_set_pcmcia_socket);
+
+void __iomem *pxa_smemc_get_mdrefr(void)
+{
+       return MDREFR;
 }
-EXPORT_SYMBOL(get_clk_frequency_khz);
 
 /*
  * Intel PXA2xx internal register mapping.
index 3b7873f..7bb1499 100644 (file)
@@ -10,7 +10,6 @@
 
 struct irq_data;
 
-extern unsigned int get_clk_frequency_khz(int info);
 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *,
                                             unsigned int));
 extern void __init pxa_map_io(void);
@@ -23,19 +22,16 @@ extern void pxa_timer_init(void);
 #define ARRAY_AND_SIZE(x)      (x), ARRAY_SIZE(x)
 
 #define pxa25x_handle_irq icip_handle_irq
-extern int __init pxa25x_clocks_init(void);
 extern void __init pxa25x_init_irq(void);
 extern void __init pxa25x_map_io(void);
 extern void __init pxa26x_init_irq(void);
 
 #define pxa27x_handle_irq ichp_handle_irq
-extern int __init pxa27x_clocks_init(void);
 extern unsigned        pxa27x_get_clk_frequency_khz(int);
 extern void __init pxa27x_init_irq(void);
 extern void __init pxa27x_map_io(void);
 
 #define pxa3xx_handle_irq ichp_handle_irq
-extern int __init pxa3xx_clocks_init(void);
 extern void __init pxa3xx_init_irq(void);
 extern void __init pxa3xx_map_io(void);
 
@@ -71,8 +67,3 @@ extern unsigned pxa25x_get_clk_frequency_khz(int);
 #define pxa27x_get_clk_frequency_khz(x)                (0)
 #endif
 
-#ifdef CONFIG_PXA3xx
-extern unsigned        pxa3xx_get_clk_frequency_khz(int);
-#else
-#define pxa3xx_get_clk_frequency_khz(x)                (0)
-#endif
index 49dd618..72b08a9 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/setup.h>
 #include <asm/memory.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 #include <linux/sizes.h>
 
index 470250c..9005b3c 100644 (file)
@@ -3,7 +3,7 @@
  *  arch/arm/mach-pxa/include/mach/gumstix.h
  */
 
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 /* BTRESET - Reset line to Bluetooth module, active low signal. */
 #define GPIO_GUMSTIX_BTRESET          7
index ece1e71..212efe2 100644 (file)
@@ -29,7 +29,7 @@
 #include "pxa25x.h"
 #include "h5000.h"
 #include "udc.h"
-#include <mach/smemc.h>
+#include "smemc.h"
 
 #include "generic.h"
 
similarity index 98%
rename from drivers/pcmcia/pxa2xx_hx4700.c
rename to arch/arm/mach-pxa/hx4700-pcmcia.c
index 87b6a16..e2331df 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/irq.h>
 
 #include <asm/mach-types.h>
-#include <mach/hx4700.h>
+#include "hx4700.h"
 
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
 
 static struct gpio gpios[] = {
        { GPIO114_HX4700_CF_RESET,    GPIOF_OUT_INIT_LOW,   "CF reset"        },
index e1870fb..2ae06ed 100644 (file)
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/platform_data/i2c-pxa.h>
 
-#include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
 #include "pxa27x.h"
-#include <mach/hx4700.h>
+#include "addr-map.h"
+#include "hx4700.h"
 #include <linux/platform_data/irda-pxaficp.h>
 
 #include <sound/ak4641.h>
@@ -834,6 +834,19 @@ static struct i2c_board_info i2c_board_info[] __initdata = {
        },
 };
 
+static struct gpiod_lookup_table hx4700_audio_gpio_table = {
+       .dev_id = "hx4700-audio",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", GPIO75_HX4700_EARPHONE_nDET,
+                           "earphone-det", GPIO_ACTIVE_LOW),
+               GPIO_LOOKUP("gpio-pxa", GPIO92_HX4700_HP_DRIVER,
+                           "hp-driver", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", GPIO107_HX4700_SPK_nSD,
+                           "spk-sd", GPIO_ACTIVE_LOW),
+               { },
+       },
+};
+
 static struct platform_device audio = {
        .name   = "hx4700-audio",
        .id     = -1,
@@ -895,6 +908,7 @@ static void __init hx4700_init(void)
 
        gpiod_add_lookup_table(&bq24022_gpiod_table);
        gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
+       gpiod_add_lookup_table(&hx4700_audio_gpio_table);
        platform_add_devices(devices, ARRAY_SIZE(devices));
        pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
 
index fb0850a..525d01d 100644 (file)
@@ -22,7 +22,6 @@
 #include <asm/setup.h>
 #include <asm/memory.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 
 #include <asm/mach/arch.h>
@@ -31,7 +30,6 @@
 #include "pxa25x.h"
 #include "idp.h"
 #include <linux/platform_data/video-pxafb.h>
-#include <mach/bitfield.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/smc91x.h>
 
index a89e672..81b9bd9 100644 (file)
@@ -20,7 +20,7 @@
  * IDP hardware.
  */
 
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 #define IDP_FLASH_PHYS         (PXA_CS0_PHYS)
 #define IDP_ALT_FLASH_PHYS     (PXA_CS1_PHYS)
diff --git a/arch/arm/mach-pxa/include/mach/bitfield.h b/arch/arm/mach-pxa/include/mach/bitfield.h
deleted file mode 100644 (file)
index fe2ca44..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *     FILE            bitfield.h
- *
- *     Version         1.1
- *     Author          Copyright (c) Marc A. Viredaz, 1998
- *                     DEC Western Research Laboratory, Palo Alto, CA
- *     Date            April 1998 (April 1997)
- *     System          Advanced RISC Machine (ARM)
- *     Language        C or ARM Assembly
- *     Purpose         Definition of macros to operate on bit fields.
- */
-
-
-
-#ifndef __BITFIELD_H
-#define __BITFIELD_H
-
-#ifndef __ASSEMBLY__
-#define UData(Data)    ((unsigned long) (Data))
-#else
-#define UData(Data)    (Data)
-#endif
-
-
-/*
- * MACRO: Fld
- *
- * Purpose
- *    The macro "Fld" encodes a bit field, given its size and its shift value
- *    with respect to bit 0.
- *
- * Note
- *    A more intuitive way to encode bit fields would have been to use their
- *    mask. However, extracting size and shift value information from a bit
- *    field's mask is cumbersome and might break the assembler (255-character
- *    line-size limit).
- *
- * Input
- *    Size             Size of the bit field, in number of bits.
- *    Shft             Shift value of the bit field with respect to bit 0.
- *
- * Output
- *    Fld              Encoded bit field.
- */
-
-#define Fld(Size, Shft)        (((Size) << 16) + (Shft))
-
-
-/*
- * MACROS: FSize, FShft, FMsk, FAlnMsk, F1stBit
- *
- * Purpose
- *    The macros "FSize", "FShft", "FMsk", "FAlnMsk", and "F1stBit" return
- *    the size, shift value, mask, aligned mask, and first bit of a
- *    bit field.
- *
- * Input
- *    Field            Encoded bit field (using the macro "Fld").
- *
- * Output
- *    FSize            Size of the bit field, in number of bits.
- *    FShft            Shift value of the bit field with respect to bit 0.
- *    FMsk             Mask for the bit field.
- *    FAlnMsk          Mask for the bit field, aligned on bit 0.
- *    F1stBit          First bit of the bit field.
- */
-
-#define FSize(Field)   ((Field) >> 16)
-#define FShft(Field)   ((Field) & 0x0000FFFF)
-#define FMsk(Field)    (((UData (1) << FSize (Field)) - 1) << FShft (Field))
-#define FAlnMsk(Field) ((UData (1) << FSize (Field)) - 1)
-#define F1stBit(Field) (UData (1) << FShft (Field))
-
-
-/*
- * MACRO: FInsrt
- *
- * Purpose
- *    The macro "FInsrt" inserts a value into a bit field by shifting the
- *    former appropriately.
- *
- * Input
- *    Value            Bit-field value.
- *    Field            Encoded bit field (using the macro "Fld").
- *
- * Output
- *    FInsrt           Bit-field value positioned appropriately.
- */
-
-#define FInsrt(Value, Field) \
-                       (UData (Value) << FShft (Field))
-
-
-/*
- * MACRO: FExtr
- *
- * Purpose
- *    The macro "FExtr" extracts the value of a bit field by masking and
- *    shifting it appropriately.
- *
- * Input
- *    Data             Data containing the bit-field to be extracted.
- *    Field            Encoded bit field (using the macro "Fld").
- *
- * Output
- *    FExtr            Bit-field value.
- */
-
-#define FExtr(Data, Field) \
-                       ((UData (Data) >> FShft (Field)) & FAlnMsk (Field))
-
-
-#endif /* __BITFIELD_H */
diff --git a/arch/arm/mach-pxa/include/mach/dma.h b/arch/arm/mach-pxa/include/mach/dma.h
deleted file mode 100644 (file)
index 79f9842..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- *  arch/arm/mach-pxa/include/mach/dma.h
- *
- *  Author:    Nicolas Pitre
- *  Created:   Jun 15, 2001
- *  Copyright: MontaVista Software, Inc.
- */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <mach/hardware.h>
-
-/* DMA Controller Registers Definitions */
-#define DMAC_REGS_VIRT io_p2v(0x40000000)
-
-#endif /* _ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-pxa/include/mach/generic.h b/arch/arm/mach-pxa/include/mach/generic.h
deleted file mode 100644 (file)
index 665542e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../generic.h"
diff --git a/arch/arm/mach-pxa/include/mach/mtd-xip.h b/arch/arm/mach-pxa/include/mach/mtd-xip.h
deleted file mode 100644 (file)
index 4b31bef..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * MTD primitives for XIP support. Architecture specific functions
- *
- * Do not include this file directly. It's included from linux/mtd/xip.h
- * 
- * Author:     Nicolas Pitre
- * Created:    Nov 2, 2004
- * Copyright:  (C) 2004 MontaVista Software, Inc.
- */
-
-#ifndef __ARCH_PXA_MTD_XIP_H__
-#define __ARCH_PXA_MTD_XIP_H__
-
-#include <mach/regs-ost.h>
-
-/* restored July 2017, this did not build since 2011! */
-
-#define ICIP                   io_p2v(0x40d00000)
-#define ICMR                   io_p2v(0x40d00004)
-#define xip_irqpending()       (readl(ICIP) & readl(ICMR))
-
-/* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */
-#define xip_currtime()         readl(OSCR)
-#define xip_elapsed_since(x)   (signed)((readl(OSCR) - (x)) / 4)
-
-/*
- * xip_cpu_idle() is used when waiting for a delay equal or larger than
- * the system timer tick period.  This should put the CPU into idle mode
- * to save power and to be woken up only when some interrupts are pending.
- * As above, this should not rely upon standard kernel code.
- */
-
-#define xip_cpu_idle()  asm volatile ("mcr p14, 0, %0, c7, c0, 0" :: "r" (1))
-
-#endif /* __ARCH_PXA_MTD_XIP_H__ */
diff --git a/arch/arm/mach-pxa/include/mach/uncompress.h b/arch/arm/mach-pxa/include/mach/uncompress.h
deleted file mode 100644 (file)
index 1ed629e..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-pxa/include/mach/uncompress.h
- *
- * Author:     Nicolas Pitre
- * Copyright:  (C) 2001 MontaVista Software Inc.
- */
-
-#include <linux/serial_reg.h>
-#include <asm/mach-types.h>
-
-#define FFUART_BASE    (0x40100000)
-#define BTUART_BASE    (0x40200000)
-#define STUART_BASE    (0x40700000)
-
-unsigned long uart_base;
-unsigned int uart_shift;
-unsigned int uart_is_pxa;
-
-static inline unsigned char uart_read(int offset)
-{
-       return *(volatile unsigned char *)(uart_base + (offset << uart_shift));
-}
-
-static inline void uart_write(unsigned char val, int offset)
-{
-       *(volatile unsigned char *)(uart_base + (offset << uart_shift)) = val;
-}
-
-static inline int uart_is_enabled(void)
-{
-       /* assume enabled by default for non-PXA uarts */
-       return uart_is_pxa ? uart_read(UART_IER) & UART_IER_UUE : 1;
-}
-
-static inline void putc(char c)
-{
-       if (!uart_is_enabled())
-               return;
-
-       while (!(uart_read(UART_LSR) & UART_LSR_THRE))
-               barrier();
-
-       uart_write(c, UART_TX);
-}
-
-/*
- * This does not append a newline
- */
-static inline void flush(void)
-{
-}
-
-static inline void arch_decomp_setup(void)
-{
-       /* initialize to default */
-       uart_base = FFUART_BASE;
-       uart_shift = 2;
-       uart_is_pxa = 1;
-
-       if (machine_is_littleton() ||  machine_is_csb726() ||
-           machine_is_cm_x300() || machine_is_balloon3())
-               uart_base = STUART_BASE;
-
-       if (machine_is_arcom_zeus()) {
-               uart_base = 0x10000000; /* nCS4 */
-               uart_shift = 1;
-               uart_is_pxa = 0;
-       }
-}
index 74efc3a..96f33ef 100644 (file)
 #include <linux/irq.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include <asm/exception.h>
 
-#include <mach/hardware.h>
-#include <mach/irqs.h>
+#include "irqs.h"
 
 #include "generic.h"
+#include "pxa-regs.h"
 
 #define ICIP                   (0x000)
 #define ICMR                   (0x004)
index 73f5953..f98dc61 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/setup.h>
 #include <asm/memory.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 
 #include <asm/mach/arch.h>
index 6fc40bc..0e4123c 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/setup.h>
 #include <asm/memory.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 #include <linux/sizes.h>
 
 
 #include "pxa27x.h"
 #include "lpd270.h"
-#include <mach/audio.h>
+#include "addr-map.h"
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/irda-pxaficp.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/smemc.h>
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
index e241197..4f0944f 100644 (file)
@@ -34,7 +34,6 @@
 #include <asm/setup.h>
 #include <asm/memory.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 #include <linux/sizes.h>
 
 #include <asm/hardware/sa1111.h>
 
 #include "pxa25x.h"
-#include <mach/audio.h>
-#include <mach/lubbock.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "lubbock.h"
 #include "udc.h"
 #include <linux/platform_data/irda-pxaficp.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include "pm.h"
-#include <mach/smemc.h>
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
@@ -132,6 +131,13 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
        // no D+ pullup; lubbock can't connect/disconnect in software
 };
 
+static struct resource lubbock_udc_resources[] = {
+       DEFINE_RES_MEM(0x40600000, 0x10000),
+       DEFINE_RES_IRQ(IRQ_USB),
+       DEFINE_RES_IRQ(LUBBOCK_USB_IRQ),
+       DEFINE_RES_IRQ(LUBBOCK_USB_DISC_IRQ),
+};
+
 /* GPIOs for SA1111 PCMCIA */
 static struct gpiod_lookup_table sa1111_pcmcia_gpio_table = {
        .dev_id = "1800",
@@ -497,6 +503,9 @@ static void __init lubbock_init(void)
        lubbock_init_pcmcia();
 
        clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
+       /* lubbock has two extra IRQs */
+       pxa25x_device_udc.resource = lubbock_udc_resources;
+       pxa25x_device_udc.num_resources = ARRAY_SIZE(lubbock_udc_resources);
        pxa_set_udc_info(&udc_info);
        pxa_set_fb_info(NULL, &sharp_lm8v31);
        pxa_set_mci_info(&lubbock_mci_platform_data);
similarity index 95%
rename from arch/arm/mach-pxa/include/mach/lubbock.h
rename to arch/arm/mach-pxa/lubbock.h
index a3af4a2..55cf91e 100644 (file)
@@ -1,13 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- *  arch/arm/mach-pxa/include/mach/lubbock.h
- *
  *  Author:    Nicolas Pitre
  *  Created:   Jun 15, 2001
  *  Copyright: MontaVista Software Inc.
  */
 
-#include <mach/irqs.h>
+#include "irqs.h"
 
 #define LUBBOCK_ETH_PHYS       PXA_CS3_PHYS
 
index 200fd35..20456a5 100644 (file)
 #include <linux/regulator/machine.h>
 #include <linux/platform_data/i2c-pxa.h>
 
-#include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/system_info.h>
 
 #include "pxa27x.h"
-#include <mach/magician.h>
+#include "addr-map.h"
+#include "magician.h"
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/irda-pxaficp.h>
@@ -53,6 +53,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/spi/ads7846.h>
+#include <sound/uda1380.h>
 
 static unsigned long magician_pin_config[] __initdata = {
 
@@ -681,7 +682,7 @@ static struct platform_device bq24022 = {
 static struct gpiod_lookup_table bq24022_gpiod_table = {
        .dev_id = "gpio-regulator",
        .table = {
-               GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2,
+               GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE,
                            NULL, GPIO_ACTIVE_HIGH),
                GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
                            "enable", GPIO_ACTIVE_LOW),
@@ -899,6 +900,53 @@ static struct platform_device strataflash = {
 };
 
 /*
+ * audio support
+ */
+static struct uda1380_platform_data uda1380_info = {
+       .gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
+       .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
+       .dac_clk    = UDA1380_DAC_CLK_WSPLL,
+};
+
+static struct i2c_board_info magician_audio_i2c_board_info[] = {
+       {
+               I2C_BOARD_INFO("uda1380", 0x18),
+               .platform_data = &uda1380_info,
+       },
+};
+
+static struct gpiod_lookup_table magician_audio_gpio_table = {
+       .dev_id = "magician-audio",
+       .table = {
+               GPIO_LOOKUP("htc-egpio-0",
+                           EGPIO_MAGICIAN_SPK_POWER - MAGICIAN_EGPIO_BASE,
+                           "SPK_POWER", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("htc-egpio-0",
+                           EGPIO_MAGICIAN_EP_POWER - MAGICIAN_EGPIO_BASE,
+                           "EP_POWER", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("htc-egpio-0",
+                           EGPIO_MAGICIAN_MIC_POWER - MAGICIAN_EGPIO_BASE,
+                           "MIC_POWER", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("htc-egpio-0",
+                           EGPIO_MAGICIAN_IN_SEL0 - MAGICIAN_EGPIO_BASE,
+                           "IN_SEL0", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("htc-egpio-0",
+                           EGPIO_MAGICIAN_IN_SEL1 - MAGICIAN_EGPIO_BASE,
+                           "IN_SEL1", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
+static void magician_audio_init(void)
+{
+       i2c_register_board_info(0,
+               ARRAY_AND_SIZE(magician_audio_i2c_board_info));
+
+       gpiod_add_lookup_table(&magician_audio_gpio_table);
+       platform_device_register_simple("magician-audio", -1, NULL, 0);
+}
+
+/*
  * PXA I2C main controller
  */
 
@@ -1048,6 +1096,8 @@ static void __init magician_init(void)
        gpiod_add_lookup_table(&bq24022_gpiod_table);
        gpiod_add_lookup_table(&gpio_vbus_gpiod_table);
        platform_add_devices(ARRAY_AND_SIZE(devices));
+
+       magician_audio_init();
 }
 
 MACHINE_START(MAGICIAN, "HTC Magician")
similarity index 99%
rename from arch/arm/mach-pxa/include/mach/magician.h
rename to arch/arm/mach-pxa/magician.h
index 7d3af56..e1e4f9f 100644 (file)
@@ -9,7 +9,7 @@
 #define _MAGICIAN_H_
 
 #include <linux/gpio.h>
-#include <mach/irqs.h>
+#include "irqs.h"
 
 /*
  * PXA GPIOs
index d237bd0..fd386f1 100644 (file)
@@ -35,7 +35,6 @@
 #include <asm/setup.h>
 #include <asm/memory.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 #include <linux/sizes.h>
 
 #include <asm/mach/flash.h>
 
 #include "pxa27x.h"
-#include <mach/mainstone.h>
-#include <mach/audio.h>
+#include "mainstone.h"
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/irda-pxaficp.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include <linux/platform_data/keypad-pxa27x.h>
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
@@ -548,6 +548,14 @@ static struct gpiod_lookup_table mainstone_pcmcia_gpio_table = {
        },
 };
 
+static struct gpiod_lookup_table mainstone_wm97xx_gpio_table = {
+       .dev_id = "wm97xx-touch",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", 4, "touch", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 static void __init mainstone_init(void)
 {
        int SW7 = 0;  /* FIXME: get from SCR (Mst doc section 3.2.1.1) */
@@ -562,6 +570,7 @@ static void __init mainstone_init(void)
                      "mst-pcmcia1", MST_PCMCIA_INPUTS, 0, NULL,
                      NULL, mst_pcmcia1_irqs);
        gpiod_add_lookup_table(&mainstone_pcmcia_gpio_table);
+       gpiod_add_lookup_table(&mainstone_wm97xx_gpio_table);
 
        pxa_set_ffuart_info(NULL);
        pxa_set_btuart_info(NULL);
similarity index 98%
rename from arch/arm/mach-pxa/include/mach/mainstone.h
rename to arch/arm/mach-pxa/mainstone.h
index 1698f2f..f116c56 100644 (file)
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- *  arch/arm/mach-pxa/include/mach/mainstone.h
- *
  *  Author:    Nicolas Pitre
  *  Created:   Nov 14, 2002
  *  Copyright: MontaVista Software Inc.
@@ -10,7 +8,7 @@
 #ifndef ASM_ARCH_MAINSTONE_H
 #define ASM_ARCH_MAINSTONE_H
 
-#include <mach/irqs.h>
+#include "irqs.h"
 
 #define MST_ETH_PHYS           PXA_CS4_PHYS
 
index 6a5451b..57b0782 100644 (file)
@@ -16,8 +16,9 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/syscore_ops.h>
+#include <linux/soc/pxa/cpu.h>
 
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
 #include "mfp-pxa2xx.h"
 
 #include "generic.h"
index 980145e..683a3ea 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_ARCH_MFP_PXA2XX_H
 #define __ASM_ARCH_MFP_PXA2XX_H
 
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
 
 /*
  * the following MFP_xxx bit definitions in mfp.h are re-used for pxa2xx:
index 56114df..d16ab74 100644 (file)
@@ -16,9 +16,8 @@
 #include <linux/io.h>
 #include <linux/syscore_ops.h>
 
-#include <mach/hardware.h>
 #include "mfp-pxa3xx.h"
-#include <mach/pxa3xx-regs.h>
+#include "pxa3xx-regs.h"
 
 #ifdef CONFIG_PM
 /*
index cdd8309..81fec4f 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_ARCH_MFP_PXA3XX_H
 #define __ASM_ARCH_MFP_PXA3XX_H
 
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
 
 #define MFPR_BASE      (0x40e10000)
 
similarity index 91%
rename from arch/arm/mach-pxa/include/mach/mfp.h
rename to arch/arm/mach-pxa/mfp.h
index dbb961f..7e0879b 100644 (file)
@@ -13,6 +13,6 @@
 #ifndef __ASM_ARCH_MFP_H
 #define __ASM_ARCH_MFP_H
 
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
 
 #endif /* __ASM_ARCH_MFP_H */
index a79f296..d08f962 100644 (file)
@@ -41,8 +41,8 @@
 #include "udc.h"
 #include "pxa27x-udc.h"
 #include <linux/platform_data/media/camera-pxa.h>
-#include <mach/audio.h>
-#include <mach/smemc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "smemc.h"
 
 #include "mioa701.h"
 
index fde386f..35546b5 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include "pxa320.h"
 
 #include "mxm8x10.h"
@@ -356,14 +357,9 @@ void __init mxm_8x10_usb_host_init(void)
        pxa_set_ohci_info(&mxm_8x10_ohci_platform_data);
 }
 
-/* AC97 Sound Support */
-static struct platform_device mxm_8x10_ac97_device = {
-       .name = "pxa2xx-ac97"
-};
-
 void __init mxm_8x10_ac97_init(void)
 {
-       platform_device_register(&mxm_8x10_ac97_device);
+       pxa_set_ac97_info(NULL);
 }
 
 /* NAND flash Support */
index 6230381..1a8d25e 100644 (file)
@@ -25,7 +25,7 @@
 #include <asm/mach/map.h>
 
 #include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/irda-pxaficp.h>
similarity index 98%
rename from drivers/pcmcia/pxa2xx_palmld.c
rename to arch/arm/mach-pxa/palmld-pcmcia.c
index cfff41a..720294a 100644 (file)
@@ -13,8 +13,9 @@
 #include <linux/gpio.h>
 
 #include <asm/mach-types.h>
-#include <mach/palmld.h>
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
+
+#include "palmld.h"
 
 static struct gpio palmld_pcmcia_gpios[] = {
        { GPIO_NR_PALMLD_PCMCIA_POWER,  GPIOF_INIT_LOW, "PCMCIA Power" },
index 5f73716..32308c6 100644 (file)
@@ -29,8 +29,8 @@
 #include <asm/mach/map.h>
 
 #include "pxa27x.h"
-#include <mach/audio.h>
-#include <mach/palmld.h>
+#include "palmld.h"
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/irda-pxaficp.h>
@@ -279,9 +279,15 @@ static inline void palmld_leds_init(void) {}
  * HDD
  ******************************************************************************/
 #if defined(CONFIG_PATA_PALMLD) || defined(CONFIG_PATA_PALMLD_MODULE)
+static struct resource palmld_ide_resources[] = {
+       DEFINE_RES_MEM(PALMLD_IDE_PHYS, 0x1000),
+};
+
 static struct platform_device palmld_ide_device = {
-       .name   = "pata_palmld",
-       .id     = -1,
+       .name           = "pata_palmld",
+       .id             = -1,
+       .resource       = palmld_ide_resources,
+       .num_resources  = ARRAY_SIZE(palmld_ide_resources),
 };
 
 static struct gpiod_lookup_table palmld_ide_gpio_table = {
@@ -341,6 +347,14 @@ static struct gpiod_lookup_table palmld_mci_gpio_table = {
        },
 };
 
+static struct gpiod_lookup_table palmld_wm97xx_touch_gpio_table = {
+       .dev_id = "wm97xx-touch",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", 27, "touch", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 static void __init palmld_init(void)
 {
        pxa2xx_mfp_config(ARRAY_AND_SIZE(palmld_pin_config));
@@ -349,6 +363,7 @@ static void __init palmld_init(void)
        pxa_set_stuart_info(NULL);
 
        palm27x_mmc_init(&palmld_mci_gpio_table);
+       gpiod_add_lookup_table(&palmld_wm97xx_touch_gpio_table);
        palm27x_pm_init(PALMLD_STR_BASE);
        palm27x_lcd_init(-1, &palm_320x480_lcd_mode);
        palm27x_irda_init(GPIO_NR_PALMLD_IR_DISABLE);
index 7c7cbb4..463b62e 100644 (file)
@@ -29,7 +29,7 @@
 #include <asm/mach/map.h>
 
 #include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include "palmt5.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
@@ -190,6 +190,14 @@ static struct gpiod_lookup_table palmt5_mci_gpio_table = {
        },
 };
 
+static struct gpiod_lookup_table palmt5_wm97xx_touch_gpio_table = {
+       .dev_id = "wm97xx-touch",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", 27, "touch", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 static void __init palmt5_init(void)
 {
        pxa2xx_mfp_config(ARRAY_AND_SIZE(palmt5_pin_config));
@@ -198,6 +206,7 @@ static void __init palmt5_init(void)
        pxa_set_stuart_info(NULL);
 
        palm27x_mmc_init(&palmt5_mci_gpio_table);
+       gpiod_add_lookup_table(&palmt5_wm97xx_touch_gpio_table);
        palm27x_pm_init(PALMT5_STR_BASE);
        palm27x_lcd_init(-1, &palm_320x480_lcd_mode);
        palm27x_udc_init(GPIO_NR_PALMT5_USB_DETECT_N,
index 1fb1da7..cf84aed 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef _INCLUDE_PALMT5_H_
 #define _INCLUDE_PALMT5_H_
 
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 /** HERE ARE GPIOs **/
 
similarity index 98%
rename from drivers/pcmcia/pxa2xx_palmtc.c
rename to arch/arm/mach-pxa/palmtc-pcmcia.c
index 8fe0561..8e3f382 100644 (file)
@@ -14,8 +14,8 @@
 #include <linux/delay.h>
 
 #include <asm/mach-types.h>
-#include <mach/palmtc.h>
-#include "soc_common.h"
+#include "palmtc.h"
+#include <pcmcia/soc_common.h>
 
 static struct gpio palmtc_pcmcia_gpios[] = {
        { GPIO_NR_PALMTC_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
index 455cb8c..3054ffa 100644 (file)
@@ -29,8 +29,8 @@
 #include <asm/mach/map.h>
 
 #include "pxa25x.h"
-#include <mach/audio.h>
-#include <mach/palmtc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "palmtc.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/irda-pxaficp.h>
index a2b10db..fedac67 100644 (file)
@@ -29,7 +29,7 @@
 #include <asm/mach/map.h>
 
 #include "pxa25x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include "palmte2.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
index 2bf0f7f..238a31f 100644 (file)
@@ -29,7 +29,7 @@
 
 #include "pxa27x.h"
 #include "pxa27x-udc.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include "palmtreo.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
@@ -37,7 +37,7 @@
 #include <linux/platform_data/keypad-pxa27x.h>
 #include "udc.h"
 #include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
 #include <linux/platform_data/asoc-palm27x.h>
 #include <linux/platform_data/media/camera-pxa.h>
 #include "palm27x.h"
similarity index 98%
rename from drivers/pcmcia/pxa2xx_palmtx.c
rename to arch/arm/mach-pxa/palmtx-pcmcia.c
index c449ca7..8c2aaad 100644 (file)
@@ -12,8 +12,8 @@
 #include <linux/gpio.h>
 
 #include <asm/mach-types.h>
-#include <mach/palmtx.h>
-#include "soc_common.h"
+#include "palmtx.h"
+#include <pcmcia/soc_common.h>
 
 static struct gpio palmtx_pcmcia_gpios[] = {
        { GPIO_NR_PALMTX_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
index 07332c9..c0d0762 100644 (file)
@@ -32,8 +32,8 @@
 #include <asm/mach/map.h>
 
 #include "pxa27x.h"
-#include <mach/audio.h>
-#include <mach/palmtx.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "palmtx.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/irda-pxaficp.h>
@@ -345,6 +345,14 @@ static struct gpiod_lookup_table palmtx_mci_gpio_table = {
        },
 };
 
+static struct gpiod_lookup_table palmtx_wm97xx_touch_gpio_table = {
+       .dev_id = "wm97xx-touch",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", 27, "touch", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 static void __init palmtx_init(void)
 {
        pxa2xx_mfp_config(ARRAY_AND_SIZE(palmtx_pin_config));
@@ -353,6 +361,7 @@ static void __init palmtx_init(void)
        pxa_set_stuart_info(NULL);
 
        palm27x_mmc_init(&palmtx_mci_gpio_table);
+       gpiod_add_lookup_table(&palmtx_wm97xx_touch_gpio_table);
        palm27x_pm_init(PALMTX_STR_BASE);
        palm27x_lcd_init(-1, &palm_320x480_lcd_mode);
        palm27x_udc_init(GPIO_NR_PALMTX_USB_DETECT_N,
index b4a5fe0..66e8fe6 100644 (file)
@@ -34,7 +34,7 @@
 #include <asm/mach/map.h>
 
 #include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include "palmz72.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
index 0c4ab63..58ade4a 100644 (file)
@@ -10,7 +10,7 @@
  * Definitions of CPU card resources only
  */
 
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 /* phyCORE-PXA270 (PCM027) Interrupts */
 #define PCM027_IRQ(x)          (IRQ_BOARD_START + (x))
index 8dfcc36..33a9d2e 100644 (file)
@@ -26,7 +26,7 @@
 
 #include <asm/mach/map.h>
 #include "pxa27x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include "pcm990_baseboard.h"
index 5be11d1..18cf71d 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include "pcm027.h"
-#include <mach/irqs.h> /* PXA_GPIO_TO_IRQ */
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 /*
  * definitions relevant only when the PCM-990
index 58cfa43..7772a39 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/mtd/sharpsl.h>
 #include <linux/memblock.h>
 
-#include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/irq.h>
 #include <asm/setup.h>
 #include <asm/mach/irq.h>
 
 #include "pxa25x.h"
-#include <linux/platform_data/mmc-pxamci.h>
 #include "udc.h"
+#include "poodle.h"
+
+#include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/irda-pxaficp.h>
-#include <mach/poodle.h>
 #include <linux/platform_data/video-pxafb.h>
+#include <linux/platform_data/asoc-poodle.h>
 
 #include <asm/hardware/scoop.h>
 #include <asm/hardware/locomo.h>
@@ -156,12 +157,6 @@ static struct scoop_pcmcia_config poodle_pcmcia_config = {
 
 EXPORT_SYMBOL(poodle_scoop_device);
 
-
-static struct platform_device poodle_audio_device = {
-       .name   = "poodle-audio",
-       .id     = -1,
-};
-
 /* LoCoMo device */
 static struct resource locomo_resources[] = {
        [0] = {
@@ -180,7 +175,7 @@ static struct locomo_platform_data locomo_info = {
        .irq_base       = IRQ_BOARD_START,
 };
 
-struct platform_device poodle_locomo_device = {
+static struct platform_device poodle_locomo_device = {
        .name           = "locomo",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(locomo_resources),
@@ -190,7 +185,21 @@ struct platform_device poodle_locomo_device = {
        },
 };
 
-EXPORT_SYMBOL(poodle_locomo_device);
+static struct poodle_audio_platform_data poodle_audio_pdata = {
+       .locomo_dev     = &poodle_locomo_device.dev,
+
+       .gpio_amp_on    = POODLE_LOCOMO_GPIO_AMP_ON,
+       .gpio_mute_l    = POODLE_LOCOMO_GPIO_MUTE_L,
+       .gpio_mute_r    = POODLE_LOCOMO_GPIO_MUTE_R,
+       .gpio_232vcc_on = POODLE_LOCOMO_GPIO_232VCC_ON,
+       .gpio_jk_b      = POODLE_LOCOMO_GPIO_JK_B,
+};
+
+static struct platform_device poodle_audio_device = {
+       .name   = "poodle-audio",
+       .id     = -1,
+       .dev.platform_data = &poodle_audio_pdata,
+};
 
 #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
 static struct pxa2xx_spi_controller poodle_spi_info = {
similarity index 98%
rename from arch/arm/mach-pxa/include/mach/poodle.h
rename to arch/arm/mach-pxa/poodle.h
index b56b193..00798b4 100644 (file)
@@ -89,6 +89,4 @@
 
 #define POODLE_NR_IRQS         (IRQ_BOARD_START + 4)   /* 4 for LoCoMo */
 
-extern struct platform_device poodle_locomo_device;
-
 #endif /* __ASM_ARCH_POODLE_H  */
index d32d5c8..5e5d543 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/of_platform.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <mach/irqs.h>
+#include "irqs.h"
 
 #include "generic.h"
 
diff --git a/arch/arm/mach-pxa/pxa-regs.h b/arch/arm/mach-pxa/pxa-regs.h
new file mode 100644 (file)
index 0000000..ba5120c
--- /dev/null
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Author:    Nicolas Pitre
+ *  Created:   Jun 15, 2001
+ *  Copyright: MontaVista Software Inc.
+ */
+#ifndef __ASM_MACH_PXA_REGS_H
+#define __ASM_MACH_PXA_REGS_H
+
+/*
+ * Workarounds for at least 2 errata so far require this.
+ * The mapping is set in mach-pxa/generic.c.
+ */
+#define UNCACHED_PHYS_0                0xfe000000
+#define UNCACHED_PHYS_0_SIZE   0x00100000
+
+/*
+ * Intel PXA2xx internal register mapping:
+ *
+ * 0x40000000 - 0x41ffffff <--> 0xf2000000 - 0xf3ffffff
+ * 0x44000000 - 0x45ffffff <--> 0xf4000000 - 0xf5ffffff
+ * 0x48000000 - 0x49ffffff <--> 0xf6000000 - 0xf7ffffff
+ * 0x4c000000 - 0x4dffffff <--> 0xf8000000 - 0xf9ffffff
+ * 0x50000000 - 0x51ffffff <--> 0xfa000000 - 0xfbffffff
+ * 0x54000000 - 0x55ffffff <--> 0xfc000000 - 0xfdffffff
+ * 0x58000000 - 0x59ffffff <--> 0xfe000000 - 0xffffffff
+ *
+ * Note that not all PXA2xx chips implement all those addresses, and the
+ * kernel only maps the minimum needed range of this mapping.
+ */
+#define io_v2p(x) (0x3c000000 + ((x) & 0x01ffffff) + (((x) & 0x0e000000) << 1))
+#define io_p2v(x) IOMEM(0xf2000000 + ((x) & 0x01ffffff) + (((x) & 0x1c000000) >> 1))
+
+#ifndef __ASSEMBLY__
+# define __REG(x)      (*((volatile u32 __iomem *)io_p2v(x)))
+
+/* With indexed regs we don't want to feed the index through io_p2v()
+   especially if it is a variable, otherwise horrible code will result. */
+# define __REG2(x,y)   \
+       (*(volatile u32 __iomem*)((u32)&__REG(x) + (y)))
+
+# define __PREG(x)     (io_v2p((u32)&(x)))
+
+#else
+
+# define __REG(x)      io_p2v(x)
+# define __PREG(x)     io_v2p(x)
+
+#endif
+
+
+#endif
index 678641a..6b34d7c 100644 (file)
 #include <linux/irq.h>
 #include <linux/irqchip.h>
 #include <linux/platform_data/mmp_dma.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include <asm/mach/map.h>
 #include <asm/suspend.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
+#include "irqs.h"
 #include "pxa25x.h"
-#include <mach/reset.h>
+#include "reset.h"
 #include "pm.h"
-#include <mach/dma.h>
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
@@ -240,7 +240,7 @@ static int __init pxa25x_init(void)
 
        if (cpu_is_pxa25x()) {
 
-               reset_status = RCSR;
+               pxa_register_wdt(RCSR);
 
                pxa25x_init_pm();
 
index b58d0fb..eaaa876 100644 (file)
@@ -2,9 +2,9 @@
 #ifndef __MACH_PXA25x_H
 #define __MACH_PXA25x_H
 
-#include <mach/hardware.h>
-#include <mach/pxa2xx-regs.h>
+#include "addr-map.h"
+#include "pxa2xx-regs.h"
 #include "mfp-pxa25x.h"
-#include <mach/irqs.h>
+#include "irqs.h"
 
 #endif /* __MACH_PXA25x_H */
index faf7380..2d3df3b 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _ASM_ARCH_PXA27X_UDC_H
 #define _ASM_ARCH_PXA27X_UDC_H
 
+#include "pxa-regs.h"
+
 #ifdef _ASM_ARCH_PXA25X_UDC_H
 #error You cannot include both PXA25x and PXA27x UDC support
 #endif
index f0ba7ed..afbf6ac 100644 (file)
 #include <linux/irq.h>
 #include <linux/platform_data/i2c-pxa.h>
 #include <linux/platform_data/mmp_dma.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include <asm/mach/map.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 #include <asm/suspend.h>
-#include <mach/irqs.h>
+#include "irqs.h"
 #include "pxa27x.h"
-#include <mach/reset.h>
+#include "reset.h"
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include "pm.h"
-#include <mach/dma.h>
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
@@ -337,7 +337,7 @@ static int __init pxa27x_init(void)
 
        if (cpu_is_pxa27x()) {
 
-               reset_status = RCSR;
+               pxa_register_wdt(RCSR);
 
                pxa27x_init_pm();
 
index abdc02f..ede96f3 100644 (file)
@@ -3,10 +3,10 @@
 #define __MACH_PXA27x_H
 
 #include <linux/suspend.h>
-#include <mach/hardware.h>
-#include <mach/pxa2xx-regs.h>
+#include "addr-map.h"
+#include "pxa2xx-regs.h"
 #include "mfp-pxa27x.h"
-#include <mach/irqs.h>
+#include "irqs.h"
 
 #define ARB_CNTRL      __REG(0x48000048)  /* Arbiter Control Register */
 
similarity index 76%
rename from arch/arm/mach-pxa/include/mach/pxa2xx-regs.h
rename to arch/arm/mach-pxa/pxa2xx-regs.h
index fa121e1..0b7eaf6 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef __PXA2XX_REGS_H
 #define __PXA2XX_REGS_H
 
-#include <mach/hardware.h>
+#include "pxa-regs.h"
 
 /*
  * Power Manager
 #define CKEN           io_p2v(0x41300004)  /* Clock Enable Register */
 #define OSCC           io_p2v(0x41300008)  /* Oscillator Configuration Register */
 
-#define CCCR_N_MASK    0x0380  /* Run Mode Frequency to Turbo Mode Frequency Multiplier */
-#define CCCR_M_MASK    0x0060  /* Memory Frequency to Run Mode Frequency Multiplier */
-#define CCCR_L_MASK    0x001f  /* Crystal Frequency to Memory Frequency Multiplier */
-
-#define CCCR_CPDIS_BIT (31)
-#define CCCR_PPDIS_BIT (30)
-#define CCCR_LCD_26_BIT        (27)
-#define CCCR_A_BIT     (25)
-
-#define CCSR_N2_MASK   CCCR_N_MASK
-#define CCSR_M_MASK    CCCR_M_MASK
-#define CCSR_L_MASK    CCCR_L_MASK
-#define CCSR_N2_SHIFT  7
-
-#define CKEN_AC97CONF   (31)    /* AC97 Controller Configuration */
-#define CKEN_CAMERA    (24)    /* Camera Interface Clock Enable */
-#define CKEN_SSP1      (23)    /* SSP1 Unit Clock Enable */
-#define CKEN_MEMC      (22)    /* Memory Controller Clock Enable */
-#define CKEN_MEMSTK    (21)    /* Memory Stick Host Controller */
-#define CKEN_IM                (20)    /* Internal Memory Clock Enable */
-#define CKEN_KEYPAD    (19)    /* Keypad Interface Clock Enable */
-#define CKEN_USIM      (18)    /* USIM Unit Clock Enable */
-#define CKEN_MSL       (17)    /* MSL Unit Clock Enable */
-#define CKEN_LCD       (16)    /* LCD Unit Clock Enable */
-#define CKEN_PWRI2C    (15)    /* PWR I2C Unit Clock Enable */
-#define CKEN_I2C       (14)    /* I2C Unit Clock Enable */
-#define CKEN_FICP      (13)    /* FICP Unit Clock Enable */
-#define CKEN_MMC       (12)    /* MMC Unit Clock Enable */
-#define CKEN_USB       (11)    /* USB Unit Clock Enable */
-#define CKEN_ASSP      (10)    /* ASSP (SSP3) Clock Enable */
-#define CKEN_USBHOST   (10)    /* USB Host Unit Clock Enable */
-#define CKEN_OSTIMER   (9)     /* OS Timer Unit Clock Enable */
-#define CKEN_NSSP      (9)     /* NSSP (SSP2) Clock Enable */
-#define CKEN_I2S       (8)     /* I2S Unit Clock Enable */
-#define CKEN_BTUART    (7)     /* BTUART Unit Clock Enable */
-#define CKEN_FFUART    (6)     /* FFUART Unit Clock Enable */
-#define CKEN_STUART    (5)     /* STUART Unit Clock Enable */
-#define CKEN_HWUART    (4)     /* HWUART Unit Clock Enable */
-#define CKEN_SSP3      (4)     /* SSP3 Unit Clock Enable */
-#define CKEN_SSP       (3)     /* SSP Unit Clock Enable */
-#define CKEN_SSP2      (3)     /* SSP2 Unit Clock Enable */
-#define CKEN_AC97      (2)     /* AC97 Unit Clock Enable */
-#define CKEN_PWM1      (1)     /* PWM1 Clock Enable */
-#define CKEN_PWM0      (0)     /* PWM0 Clock Enable */
-
 #define OSCC_OON       (1 << 1)        /* 32.768kHz OON (write-once only bit) */
 #define OSCC_OOK       (1 << 0)        /* 32.768kHz OOK (read-only bit) */
 
index 2d26cd2..4aafd69 100644 (file)
 #include <linux/device.h>
 #include <linux/io.h>
 
-#include <mach/hardware.h>
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
 #include "mfp-pxa25x.h"
-#include <mach/reset.h>
+#include "generic.h"
+#include "reset.h"
+#include "smemc.h"
+#include <linux/soc/pxa/smemc.h>
 #include <linux/platform_data/irda-pxaficp.h>
 
 void pxa2xx_clear_reset_status(unsigned int mask)
@@ -51,3 +53,27 @@ void pxa2xx_transceiver_mode(struct device *dev, int mode)
                BUG();
 }
 EXPORT_SYMBOL_GPL(pxa2xx_transceiver_mode);
+
+#define MDCNFG_DRAC2(mdcnfg)   (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg)   (((mdcnfg) >> 5) & 0x3)
+
+int pxa2xx_smemc_get_sdram_rows(void)
+{
+       static int sdram_rows;
+       unsigned int drac2 = 0, drac0 = 0;
+       u32 mdcnfg;
+
+       if (sdram_rows)
+               return sdram_rows;
+
+       mdcnfg = readl_relaxed(MDCNFG);
+
+       if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+               drac2 = MDCNFG_DRAC2(mdcnfg);
+
+       if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+               drac0 = MDCNFG_DRAC0(mdcnfg);
+
+       sdram_rows = 1 << (11 + max(drac0, drac2));
+       return sdram_rows;
+}
index 7f2f5a6..f77ec11 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include "pxa300.h"
 
index 78abcc7..e372e6c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include "pxa320.h"
 
similarity index 61%
rename from arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
rename to arch/arm/mach-pxa/pxa3xx-regs.h
index 070f6c7..4b11cf8 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef __ASM_ARCH_PXA3XX_REGS_H
 #define __ASM_ARCH_PXA3XX_REGS_H
 
-#include <mach/hardware.h>
+#include "pxa-regs.h"
 
 /*
  * Oscillator Configuration Register (OSCC)
 #define CKENC          __REG(0x41340024)       /* C Clock Enable Register */
 #define AC97_DIV       __REG(0x41340014)       /* AC97 clock divisor value register */
 
-#define ACCR_XPDIS             (1 << 31)       /* Core PLL Output Disable */
-#define ACCR_SPDIS             (1 << 30)       /* System PLL Output Disable */
-#define ACCR_D0CS              (1 << 26)       /* D0 Mode Clock Select */
-#define ACCR_PCCE              (1 << 11)       /* Power Mode Change Clock Enable */
-#define ACCR_DDR_D0CS          (1 << 7)        /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
-
-#define ACCR_SMCFS_MASK                (0x7 << 23)     /* Static Memory Controller Frequency Select */
-#define ACCR_SFLFS_MASK                (0x3 << 18)     /* Frequency Select for Internal Memory Controller */
-#define ACCR_XSPCLK_MASK       (0x3 << 16)     /* Core Frequency during Frequency Change */
-#define ACCR_HSS_MASK          (0x3 << 14)     /* System Bus-Clock Frequency Select */
-#define ACCR_DMCFS_MASK                (0x3 << 12)     /* Dynamic Memory Controller Clock Frequency Select */
-#define ACCR_XN_MASK           (0x7 << 8)      /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
-#define ACCR_XL_MASK           (0x1f)          /* Core PLL Run-Mode-to-Oscillator Ratio */
-
-#define ACCR_SMCFS(x)          (((x) & 0x7) << 23)
-#define ACCR_SFLFS(x)          (((x) & 0x3) << 18)
-#define ACCR_XSPCLK(x)         (((x) & 0x3) << 16)
-#define ACCR_HSS(x)            (((x) & 0x3) << 14)
-#define ACCR_DMCFS(x)          (((x) & 0x3) << 12)
-#define ACCR_XN(x)             (((x) & 0x7) << 8)
-#define ACCR_XL(x)             ((x) & 0x1f)
-
-/*
- * Clock Enable Bit
- */
-#define CKEN_LCD       1       /* < LCD Clock Enable */
-#define CKEN_USBH      2       /* < USB host clock enable */
-#define CKEN_CAMERA    3       /* < Camera interface clock enable */
-#define CKEN_NAND      4       /* < NAND Flash Controller Clock Enable */
-#define CKEN_USB2      6       /* < USB 2.0 client clock enable. */
-#define CKEN_DMC       8       /* < Dynamic Memory Controller clock enable */
-#define CKEN_SMC       9       /* < Static Memory Controller clock enable */
-#define CKEN_ISC       10      /* < Internal SRAM Controller clock enable */
-#define CKEN_BOOT      11      /* < Boot rom clock enable */
-#define CKEN_MMC1      12      /* < MMC1 Clock enable */
-#define CKEN_MMC2      13      /* < MMC2 clock enable */
-#define CKEN_KEYPAD    14      /* < Keypand Controller Clock Enable */
-#define CKEN_CIR       15      /* < Consumer IR Clock Enable */
-#define CKEN_USIM0     17      /* < USIM[0] Clock Enable */
-#define CKEN_USIM1     18      /* < USIM[1] Clock Enable */
-#define CKEN_TPM       19      /* < TPM clock enable */
-#define CKEN_UDC       20      /* < UDC clock enable */
-#define CKEN_BTUART    21      /* < BTUART clock enable */
-#define CKEN_FFUART    22      /* < FFUART clock enable */
-#define CKEN_STUART    23      /* < STUART clock enable */
-#define CKEN_AC97      24      /* < AC97 clock enable */
-#define CKEN_TOUCH     25      /* < Touch screen Interface Clock Enable */
-#define CKEN_SSP1      26      /* < SSP1 clock enable */
-#define CKEN_SSP2      27      /* < SSP2 clock enable */
-#define CKEN_SSP3      28      /* < SSP3 clock enable */
-#define CKEN_SSP4      29      /* < SSP4 clock enable */
-#define CKEN_MSL0      30      /* < MSL0 clock enable */
-#define CKEN_PWM0      32      /* < PWM[0] clock enable */
-#define CKEN_PWM1      33      /* < PWM[1] clock enable */
-#define CKEN_I2C       36      /* < I2C clock enable */
-#define CKEN_INTC      38      /* < Interrupt controller clock enable */
-#define CKEN_GPIO      39      /* < GPIO clock enable */
-#define CKEN_1WIRE     40      /* < 1-wire clock enable */
-#define CKEN_HSIO2     41      /* < HSIO2 clock enable */
-#define CKEN_MINI_IM   48      /* < Mini-IM */
-#define CKEN_MINI_LCD  49      /* < Mini LCD */
-
-#define CKEN_MMC3      5       /* < MMC3 Clock Enable */
-#define CKEN_MVED      43      /* < MVED clock enable */
-
-/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
-#define CKEN_PXA300_GCU                42      /* Graphics controller clock enable */
-#define CKEN_PXA320_GCU                7       /* Graphics controller clock enable */
-
 #endif /* __ASM_ARCH_PXA3XX_REGS_H */
index 4bd7da1..c29a7f0 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/clk.h>
 #include <linux/usb.h>
 #include <linux/usb/otg.h>
+#include <linux/soc/pxa/cpu.h>
 
-#include <mach/hardware.h>
 #include "regs-u2d.h"
 #include <linux/platform_data/usb-pxa3xx-ulpi.h>
 
index 5601606..979642a 100644 (file)
 #include <linux/syscore_ops.h>
 #include <linux/platform_data/i2c-pxa.h>
 #include <linux/platform_data/mmp_dma.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/clk/pxa.h>
 
 #include <asm/mach/map.h>
 #include <asm/suspend.h>
-#include <mach/hardware.h>
-#include <mach/pxa3xx-regs.h>
-#include <mach/reset.h>
+#include "pxa3xx-regs.h"
+#include "reset.h"
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include "pm.h"
-#include <mach/dma.h>
-#include <mach/smemc.h>
-#include <mach/irqs.h>
+#include "addr-map.h"
+#include "smemc.h"
+#include "irqs.h"
 
 #include "generic.h"
 #include "devices.h"
@@ -51,6 +52,10 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
 #define NDCR_ND_ARB_EN         (1 << 12)
 #define NDCR_ND_ARB_CNTL       (1 << 19)
 
+#define CKEN_BOOT              11      /* < Boot rom clock enable */
+#define CKEN_TPM               19      /* < TPM clock enable */
+#define CKEN_HSIO2             41      /* < HSIO2 clock enable */
+
 #ifdef CONFIG_PM
 
 #define ISRAM_START    0x5c000000
@@ -463,7 +468,7 @@ static int __init pxa3xx_init(void)
 
        if (cpu_is_pxa3xx()) {
 
-               reset_status = ARSR;
+               pxa_register_wdt(ARSR);
 
                /*
                 * clear RDH bit every time after reset
index 6d4502a..81825f7 100644 (file)
@@ -2,8 +2,8 @@
 #ifndef __MACH_PXA3XX_H        
 #define __MACH_PXA3XX_H
 
-#include <mach/hardware.h>
-#include <mach/pxa3xx-regs.h>
-#include <mach/irqs.h>
+#include "addr-map.h"
+#include "pxa3xx-regs.h"
+#include "irqs.h"
 
 #endif /* __MACH_PXA3XX_H */
index bf91de4..b9021a4 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/irq.h>
 #include <linux/gpio-pxa.h>
 #include <linux/platform_device.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include "pxa930.h"
 
similarity index 94%
rename from arch/arm/mach-pxa/include/mach/regs-ost.h
rename to arch/arm/mach-pxa/regs-ost.h
index deb564e..c8001cf 100644 (file)
@@ -2,11 +2,13 @@
 #ifndef __ASM_MACH_REGS_OST_H
 #define __ASM_MACH_REGS_OST_H
 
-#include <mach/hardware.h>
+#include "pxa-regs.h"
 
 /*
  * OS Timer & Match Registers
  */
+#define OST_PHYS       0x40A00000
+#define OST_LEN                0x00000020
 
 #define OSMR0          io_p2v(0x40A00000)  /* */
 #define OSMR1          io_p2v(0x40A00004)  /* */
index b1f9ff1..96255a0 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_MACH_REGS_RTC_H
 #define __ASM_MACH_REGS_RTC_H
 
-#include <mach/hardware.h>
+#include "pxa-regs.h"
 
 /*
  * Real Time Clock
index fe4c80a..ab517ba 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_ARCH_PXA3xx_U2D_H
 #define __ASM_ARCH_PXA3xx_U2D_H
 
-#include <mach/bitfield.h>
-
 /*
  * USB2 device controller registers and bits definitions
  */
similarity index 99%
rename from arch/arm/mach-pxa/include/mach/regs-uart.h
rename to arch/arm/mach-pxa/regs-uart.h
index 9a168f8..490e9ca 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __ASM_ARCH_REGS_UART_H
 #define __ASM_ARCH_REGS_UART_H
 
+#include "pxa-regs.h"
+
 /*
  * UARTs
  */
index af78405..f0be905 100644 (file)
@@ -7,12 +7,9 @@
 #include <asm/proc-fns.h>
 #include <asm/system_misc.h>
 
-#include <mach/regs-ost.h>
-#include <mach/reset.h>
-#include <mach/smemc.h>
-
-unsigned int reset_status;
-EXPORT_SYMBOL(reset_status);
+#include "regs-ost.h"
+#include "reset.h"
+#include "smemc.h"
 
 static void do_hw_reset(void);
 
similarity index 92%
rename from arch/arm/mach-pxa/include/mach/reset.h
rename to arch/arm/mach-pxa/reset.h
index e1c4d10..963dd19 100644 (file)
@@ -8,8 +8,8 @@
 #define RESET_STATUS_GPIO      (1 << 3)        /* GPIO Reset */
 #define RESET_STATUS_ALL       (0xf)
 
-extern unsigned int reset_status;
 extern void clear_reset_status(unsigned int mask);
+extern void pxa_register_wdt(unsigned int reset_status);
 
 /**
  * init_gpio_reset() - register GPIO as reset generator
index 83cfbb8..a829baf 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <asm/mach-types.h>
 #include "pm.h"
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
 #include "regs-rtc.h"
 #include "sharpsl_pm.h"
 
index 6c5b3ff..d58cf52 100644 (file)
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
-#include <mach/hardware.h>
-#include <mach/smemc.h>
-#include <mach/pxa2xx-regs.h>
+#include "smemc.h"
+#include "pxa2xx-regs.h"
 
 #define MDREFR_KDIV    0x200a4000      // all banks
 #define CCCR_SLEEP     0x00000107      // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
-
+#define CCCR_N_MASK     0x00000380
+#define CCCR_M_MASK     0x00000060
+#define CCCR_L_MASK     0x0000001f
                .text
 
 #ifdef CONFIG_PXA3xx
index 32e82cc..2d2a321 100644 (file)
@@ -8,9 +8,10 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/syscore_ops.h>
+#include <linux/soc/pxa/cpu.h>
 
-#include <mach/hardware.h>
-#include <mach/smemc.h>
+#include "smemc.h"
+#include <linux/soc/pxa/smemc.h>
 
 #ifdef CONFIG_PM
 static unsigned long msc[2];
@@ -70,3 +71,11 @@ static int __init smemc_init(void)
 }
 subsys_initcall(smemc_init);
 #endif
+
+static const unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
+unsigned int pxa3xx_smemc_get_memclkdiv(void)
+{
+       unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+
+       return  df_clkdiv[(memclkcfg >> 16) & 0x3];
+}
index a648e70..dd88953 100644 (file)
 
 #include "pxa27x.h"
 #include "pxa27x-udc.h"
-#include <mach/reset.h>
+#include "reset.h"
 #include <linux/platform_data/irda-pxaficp.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include <linux/platform_data/video-pxafb.h>
-#include <mach/spitz.h>
+#include "spitz.h"
 #include "sharpsl_pm.h"
-#include <mach/smemc.h>
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
@@ -962,11 +962,42 @@ static void __init spitz_i2c_init(void)
 static inline void spitz_i2c_init(void) {}
 #endif
 
+static struct gpiod_lookup_table spitz_audio_gpio_table = {
+       .dev_id = "spitz-audio",
+       .table = {
+               GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+                           "mute-l", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+                           "mute-r", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("sharp-scoop.1", SPITZ_GPIO_MIC_BIAS - SPITZ_SCP2_GPIO_BASE,
+                           "mic", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
+static struct gpiod_lookup_table akita_audio_gpio_table = {
+       .dev_id = "spitz-audio",
+       .table = {
+               GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+                           "mute-l", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+                           "mute-r", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("i2c-max7310", AKITA_GPIO_MIC_BIAS - AKITA_IOEXP_GPIO_BASE,
+                           "mic", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 /******************************************************************************
  * Audio devices
  ******************************************************************************/
 static inline void spitz_audio_init(void)
 {
+       if (machine_is_akita())
+               gpiod_add_lookup_table(&akita_audio_gpio_table);
+       else
+               gpiod_add_lookup_table(&spitz_audio_gpio_table);
+
        platform_device_register_simple("spitz-audio", -1, NULL, 0);
 }
 
index 25a1f8c..6689b67 100644 (file)
@@ -18,9 +18,8 @@
 
 #include <asm/irq.h>
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 
-#include <mach/spitz.h>
+#include "spitz.h"
 #include "pxa27x.h"
 #include "sharpsl_pm.h"
 
index eab1645..938310b 100644 (file)
@@ -11,9 +11,8 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
-#include <mach/hardware.h>
 
-#include <mach/pxa2xx-regs.h>
+#include "pxa2xx-regs.h"
 
                .text
 
index 4317097..6af8bc4 100644 (file)
 #include <asm/mach-types.h>
 
 #include "pxa25x.h"
-#include <mach/reset.h>
+#include "reset.h"
 #include <linux/platform_data/irda-pxaficp.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include "udc.h"
 #include "tosa_bt.h"
-#include <mach/audio.h>
-#include <mach/smemc.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "smemc.h"
 
 #include <asm/mach/arch.h>
-#include <mach/tosa.h>
+#include "tosa.h"
 
 #include <asm/hardware/scoop.h>
 #include <asm/mach/sharpsl_param.h>
@@ -296,9 +296,9 @@ static struct gpiod_lookup_table tosa_mci_gpio_table = {
        .table = {
                GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT,
                            "cd", GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP,
+               GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE,
                            "wp", GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON,
+               GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE,
                            "power", GPIO_ACTIVE_HIGH),
                { },
        },
@@ -616,6 +616,22 @@ static struct resource tc6393xb_resources[] = {
        },
 };
 
+static struct gpiod_lookup_table tosa_battery_gpio_table = {
+       .dev_id = "wm97xx-battery",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT0_CRG,
+                           "main battery full", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT1_CRG,
+                           "jacket battery full", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT0_LOW,
+                           "main battery low", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_BAT1_LOW,
+                           "jacket battery low", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_JACKET_DETECT,
+                           "jacket detect", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
 
 static int tosa_tc6393xb_enable(struct platform_device *dev)
 {
@@ -709,31 +725,6 @@ static struct tmio_nand_data tosa_tc6393xb_nand_config = {
        .part_parsers = probes,
 };
 
-static int tosa_tc6393xb_setup(struct platform_device *dev)
-{
-       int rc;
-
-       rc = gpio_request(TOSA_GPIO_CARD_VCC_ON, "CARD_VCC_ON");
-       if (rc)
-               goto err_req;
-
-       rc = gpio_direction_output(TOSA_GPIO_CARD_VCC_ON, 1);
-       if (rc)
-               goto err_dir;
-
-       return rc;
-
-err_dir:
-       gpio_free(TOSA_GPIO_CARD_VCC_ON);
-err_req:
-       return rc;
-}
-
-static void tosa_tc6393xb_teardown(struct platform_device *dev)
-{
-       gpio_free(TOSA_GPIO_CARD_VCC_ON);
-}
-
 #ifdef CONFIG_MFD_TC6393XB
 static struct fb_videomode tosa_tc6393xb_lcd_mode[] = {
        {
@@ -778,9 +769,6 @@ static struct tc6393xb_platform_data tosa_tc6393xb_data = {
        .scr_gper       = 0x3300,
 
        .irq_base       = IRQ_BOARD_START,
-       .gpio_base      = TOSA_TC6393XB_GPIO_BASE,
-       .setup          = tosa_tc6393xb_setup,
-       .teardown       = tosa_tc6393xb_teardown,
 
        .enable         = tosa_tc6393xb_enable,
        .disable        = tosa_tc6393xb_disable,
@@ -821,26 +809,6 @@ static struct pxa2xx_spi_controller pxa_ssp_master_info = {
        .num_chipselect = 1,
 };
 
-static struct gpiod_lookup_table tosa_lcd_gpio_table = {
-       .dev_id = "spi2.0",
-       .table = {
-               GPIO_LOOKUP("tc6393xb",
-                           TOSA_GPIO_TG_ON - TOSA_TC6393XB_GPIO_BASE,
-                           "tg #pwr", GPIO_ACTIVE_HIGH),
-               { },
-       },
-};
-
-static struct gpiod_lookup_table tosa_lcd_bl_gpio_table = {
-       .dev_id = "i2c-tosa-bl",
-       .table = {
-               GPIO_LOOKUP("tc6393xb",
-                           TOSA_GPIO_BL_C20MA - TOSA_TC6393XB_GPIO_BASE,
-                           "backlight", GPIO_ACTIVE_HIGH),
-               { },
-       },
-};
-
 static struct spi_board_info spi_board_info[] __initdata = {
        {
                .modalias       = "tosa-lcd",
@@ -943,6 +911,8 @@ static void __init tosa_init(void)
        /* enable batt_fault */
        PMCR = 0x01;
 
+       gpiod_add_lookup_table(&tosa_battery_gpio_table);
+
        gpiod_add_lookup_table(&tosa_mci_gpio_table);
        pxa_set_mci_info(&tosa_mci_platform_data);
        pxa_set_ficp_info(&tosa_ficp_platform_data);
@@ -951,8 +921,6 @@ static void __init tosa_init(void)
        platform_scoop_config = &tosa_pcmcia_config;
 
        pxa2xx_set_spi_info(2, &pxa_ssp_master_info);
-       gpiod_add_lookup_table(&tosa_lcd_gpio_table);
-       gpiod_add_lookup_table(&tosa_lcd_bl_gpio_table);
        spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
 
        clk_add_alias("CLK_CK3P6MI", tc6393xb_device.name, "GPIO11_CLK", NULL);
similarity index 88%
rename from arch/arm/mach-pxa/include/mach/tosa.h
rename to arch/arm/mach-pxa/tosa.h
index 8bfaca3..3b3efa0 100644 (file)
 #define TOSA_SCOOP_JC_IO_DIR (TOSA_SCOOP_JC_CARD_LIMIT_SEL)
 
 /*
- * TC6393XB GPIOs
- */
-#define TOSA_TC6393XB_GPIO_BASE                (PXA_NR_BUILTIN_GPIO + 2 * 12)
-
-#define TOSA_GPIO_TG_ON                        (TOSA_TC6393XB_GPIO_BASE + 0)
-#define TOSA_GPIO_L_MUTE               (TOSA_TC6393XB_GPIO_BASE + 1)
-#define TOSA_GPIO_BL_C20MA             (TOSA_TC6393XB_GPIO_BASE + 3)
-#define TOSA_GPIO_CARD_VCC_ON          (TOSA_TC6393XB_GPIO_BASE + 4)
-#define TOSA_GPIO_CHARGE_OFF           (TOSA_TC6393XB_GPIO_BASE + 6)
-#define TOSA_GPIO_CHARGE_OFF_JC                (TOSA_TC6393XB_GPIO_BASE + 7)
-#define TOSA_GPIO_BAT0_V_ON            (TOSA_TC6393XB_GPIO_BASE + 9)
-#define TOSA_GPIO_BAT1_V_ON            (TOSA_TC6393XB_GPIO_BASE + 10)
-#define TOSA_GPIO_BU_CHRG_ON           (TOSA_TC6393XB_GPIO_BASE + 11)
-#define TOSA_GPIO_BAT_SW_ON            (TOSA_TC6393XB_GPIO_BASE + 12)
-#define TOSA_GPIO_BAT0_TH_ON           (TOSA_TC6393XB_GPIO_BASE + 14)
-#define TOSA_GPIO_BAT1_TH_ON           (TOSA_TC6393XB_GPIO_BASE + 15)
-
-/*
  * PXA GPIOs
  */
 #define TOSA_GPIO_POWERON              (0)
similarity index 98%
rename from drivers/pcmcia/pxa2xx_trizeps4.c
rename to arch/arm/mach-pxa/trizeps4-pcmcia.c
index 6db8fe8..25e3637 100644 (file)
 #include <asm/mach-types.h>
 #include <asm/irq.h>
 
-#include <mach/pxa2xx-regs.h>
-#include <mach/trizeps4.h>
+#include "pxa2xx-regs.h"
+#include "trizeps4.h"
 
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
 
 extern void board_pcmcia_power(int power);
 
index f76f8be..716cce8 100644 (file)
 #include <asm/mach/flash.h>
 
 #include "pxa27x.h"
-#include <mach/trizeps4.h>
-#include <mach/audio.h>
+#include "trizeps4.h"
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/irda-pxaficp.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <mach/smemc.h>
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
similarity index 99%
rename from arch/arm/mach-pxa/include/mach/trizeps4.h
rename to arch/arm/mach-pxa/trizeps4.h
index 3cddb14..b6c19d1 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _TRIPEPS4_H_
 #define _TRIPEPS4_H_
 
+#include "addr-map.h"
 #include "irqs.h" /* PXA_GPIO_TO_IRQ */
 
 /* physical memory regions */
similarity index 97%
rename from drivers/pcmcia/pxa2xx_viper.c
rename to arch/arm/mach-pxa/viper-pcmcia.c
index 7ac6647..26599dc 100644 (file)
 #include <linux/gpio.h>
 
 #include <pcmcia/ss.h>
+#include <pcmcia/soc_common.h>
 
 #include <asm/irq.h>
 
-#include <linux/platform_data/pcmcia-pxa2xx_viper.h>
-
-#include "soc_common.h"
-#include "pxa2xx_base.h"
+#include "viper-pcmcia.h"
 
 static struct platform_device *arcom_pcmcia_dev;
 
index 3aa34e9..5b43351 100644 (file)
 #include <linux/syscore_ops.h>
 
 #include "pxa25x.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/video-pxafb.h>
-#include <mach/regs-uart.h>
-#include <linux/platform_data/pcmcia-pxa2xx_viper.h>
+#include "regs-uart.h"
+#include "viper-pcmcia.h"
 #include "viper.h"
 
 #include <asm/setup.h>
@@ -851,7 +851,7 @@ static void __init viper_init_vcore_gpios(void)
                goto err_dir;
 
        /* c/should assume redboot set the correct level ??? */
-       viper_set_core_cpu_voltage(get_clk_frequency_khz(0), 1);
+       viper_set_core_cpu_voltage(pxa25x_get_clk_frequency_khz(0), 1);
 
        return;
 
@@ -998,6 +998,18 @@ static struct map_desc viper_io_desc[] __initdata = {
                .length  = 0x00800000,
                .type    = MT_DEVICE,
        },
+       {
+               /*
+                * ISA I/O space mapping:
+                * -  ports 0x0000-0x0fff are PC/104
+                * -  ports 0x10000-0x10fff are PCMCIA slot 1
+                * -  ports 0x11000-0x11fff are PC/104
+                */
+               .virtual = PCI_IO_VIRT_BASE,
+               .pfn     = __phys_to_pfn(0x30000000),
+               .length  = 0x1000,
+               .type    = MT_DEVICE,
+       },
 };
 
 static void __init viper_map_io(void)
similarity index 98%
rename from drivers/pcmcia/pxa2xx_vpac270.c
rename to arch/arm/mach-pxa/vpac270-pcmcia.c
index 3565add..9fd990c 100644 (file)
@@ -13,9 +13,9 @@
 
 #include <asm/mach-types.h>
 
-#include <mach/vpac270.h>
+#include "vpac270.h"
 
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
 
 static struct gpio vpac270_pcmcia_gpios[] = {
        { GPIO107_VPAC270_PCMCIA_PPEN,  GPIOF_INIT_LOW, "PCMCIA PPEN" },
index 14505e8..8f74baf 100644 (file)
@@ -29,8 +29,8 @@
 #include <asm/mach/arch.h>
 
 #include "pxa27x.h"
-#include <mach/audio.h>
-#include <mach/vpac270.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "vpac270.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
index f485146..6bb02b6 100644 (file)
@@ -24,9 +24,9 @@
 #include <asm/mach/irq.h>
 #include <asm/mach/map.h>
 
-#include <mach/hardware.h>
 #include "pxa25x.h"
-#include <mach/smemc.h>
+#include "addr-map.h"
+#include "smemc.h"
 
 #include "generic.h"
 #include "devices.h"
index 7eaeda2..d035205 100644 (file)
@@ -34,7 +34,7 @@
 
 #include "pxa27x.h"
 #include "mfp-pxa27x.h"
-#include <mach/z2.h>
+#include "z2.h"
 #include <linux/platform_data/video-pxafb.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/keypad-pxa27x.h>
@@ -651,6 +651,15 @@ static void __init z2_spi_init(void)
 static inline void z2_spi_init(void) {}
 #endif
 
+static struct gpiod_lookup_table z2_audio_gpio_table = {
+       .dev_id = "soc-audio",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", GPIO37_ZIPITZ2_HEADSET_DETECT,
+                           "hsdet-gpio", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
 /******************************************************************************
  * Core power regulator
  ******************************************************************************/
@@ -755,6 +764,8 @@ static void __init z2_init(void)
        z2_keys_init();
        z2_pmic_init();
 
+       gpiod_add_lookup_table(&z2_audio_gpio_table);
+
        pm_power_off = z2_power_off;
 }
 
index 9770042..ff0d8bb 100644 (file)
 
 #include "pxa27x.h"
 #include "devices.h"
-#include <mach/regs-uart.h>
+#include "regs-uart.h"
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include <linux/platform_data/mmc-pxamci.h>
 #include "pxa27x-udc.h"
 #include "udc.h"
 #include <linux/platform_data/video-pxafb.h>
 #include "pm.h"
-#include <mach/audio.h>
-#include <linux/platform_data/pcmcia-pxa2xx_viper.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include "viper-pcmcia.h"
 #include "zeus.h"
-#include <mach/smemc.h>
+#include "smemc.h"
 
 #include "generic.h"
 
@@ -929,6 +929,18 @@ static struct map_desc zeus_io_desc[] __initdata = {
                .length  = 0x00800000,
                .type    = MT_DEVICE,
        },
+       {
+               /*
+                * ISA I/O space mapping:
+                * -  ports 0x0000-0x0fff are PC/104
+                * -  ports 0x10000-0x10fff are PCMCIA slot 1
+                * -  ports 0x11000-0x11fff are PC/104
+                */
+               .virtual = PCI_IO_VIRT_BASE,
+               .pfn     = __phys_to_pfn(ZEUS_PC104IO_PHYS),
+               .length  = 0x1000,
+               .type    = MT_DEVICE,
+       },
 };
 
 static void __init zeus_map_io(void)
index 79f0025..8ed75ac 100644 (file)
 #include <linux/pwm.h>
 #include <linux/pwm_backlight.h>
 #include <linux/smc91x.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include "pxa3xx.h"
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/video-pxafb.h>
 #include "zylonite.h"
 #include <linux/platform_data/mmc-pxamci.h>
 #include <linux/platform_data/usb-ohci-pxa27x.h>
 #include <linux/platform_data/keypad-pxa27x.h>
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include "mfp.h"
 
 #include "devices.h"
 #include "generic.h"
@@ -424,6 +426,35 @@ static void __init zylonite_init_ohci(void)
 static inline void zylonite_init_ohci(void) {}
 #endif /* CONFIG_USB_OHCI_HCD || CONFIG_USB_OHCI_HCD_MODULE */
 
+static struct gpiod_lookup_table zylonite_wm97xx_touch_gpio15_table = {
+       .dev_id = "wm97xx-touch.0",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", mfp_to_gpio(MFP_PIN_GPIO15),
+                           "touch", GPIO_ACTIVE_LOW),
+               { },
+       },
+};
+
+static struct gpiod_lookup_table zylonite_wm97xx_touch_gpio26_table = {
+       .dev_id = "wm97xx-touch.0",
+       .table = {
+               GPIO_LOOKUP("gpio-pxa", mfp_to_gpio(MFP_PIN_GPIO26),
+                           "touch", GPIO_ACTIVE_LOW),
+               { },
+       },
+};
+
+static void __init zylonite_init_wm97xx_touch(void)
+{
+       if (!IS_ENABLED(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE))
+               return;
+
+       if (cpu_is_pxa320())
+               gpiod_add_lookup_table(&zylonite_wm97xx_touch_gpio15_table);
+       else
+               gpiod_add_lookup_table(&zylonite_wm97xx_touch_gpio26_table);
+}
+
 static void __init zylonite_init(void)
 {
        pxa_set_ffuart_info(NULL);
@@ -449,6 +480,7 @@ static void __init zylonite_init(void)
        zylonite_init_nand();
        zylonite_init_leds();
        zylonite_init_ohci();
+       zylonite_init_wm97xx_touch();
 }
 
 MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)")
index 7300ec2..afe3efc 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __ASM_ARCH_ZYLONITE_H
 #define __ASM_ARCH_ZYLONITE_H
 
+#include <linux/soc/pxa/cpu.h>
+
 #define ZYLONITE_ETH_PHYS      0x14000000
 
 #define EXT_GPIO(x)            (128 + (x))
index 956fec1..50a8a35 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/platform_data/i2c-pxa.h>
 #include <linux/platform_data/pca953x.h>
 #include <linux/gpio.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include "pxa300.h"
 #include "devices.h"
index 94cb834..67cab4f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include "pxa320.h"
 #include "zylonite.h"
index 4dfb755..6c21f21 100644 (file)
@@ -39,9 +39,6 @@
 #include "generic.h"
 #include <clocksource/pxa.h>
 
-unsigned int reset_status;
-EXPORT_SYMBOL(reset_status);
-
 #define NR_FREQS       16
 
 /*
@@ -319,10 +316,13 @@ static struct platform_device *sa11x0_devices[] __initdata = {
 
 static int __init sa1100_init(void)
 {
+       struct resource wdt_res = DEFINE_RES_MEM(0x90000000, 0x20);
        pm_power_off = sa1100_power_off;
 
        regulator_has_full_constraints();
 
+       platform_device_register_simple("sa1100_wdt", -1, &wdt_res, 1);
+
        return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices));
 }
 
index 2769565..a6723d4 100644 (file)
@@ -10,7 +10,6 @@
 #define RESET_STATUS_GPIO      (1 << 3)        /* GPIO Reset */
 #define RESET_STATUS_ALL       (0xf)
 
-extern unsigned int reset_status;
 static inline void clear_reset_status(unsigned int mask)
 {
        RCSR = mask;
index 6f0909d..c86e796 100644 (file)
@@ -29,6 +29,7 @@ static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
        int tmp;
 
        asm volatile ("\
+.arch xscale                                   \n\
        pld     [%1, #0]                        \n\
        pld     [%1, #32]                       \n\
 1:     pld     [%1, #64]                       \n\
@@ -80,6 +81,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
        void *ptr, *kaddr = kmap_atomic(page);
        asm volatile ("\
+.arch xscale                                   \n\
        mov     r1, %2                          \n\
        mov     r2, #0                          \n\
        mov     r3, #0                          \n\
index 2907023..576c0e6 100644 (file)
@@ -455,7 +455,7 @@ void iounmap(volatile void __iomem *cookie)
 }
 EXPORT_SYMBOL(iounmap);
 
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
 static int pci_ioremap_mem_type = MT_DEVICE;
 
 void pci_ioremap_set_mem_type(int mem_type)
index a496884..1652a98 100644 (file)
@@ -2157,10 +2157,6 @@ config DMI
 
 endmenu # "Boot options"
 
-config SYSVIPC_COMPAT
-       def_bool y
-       depends on COMPAT && SYSVIPC
-
 menu "Power management options"
 
 source "kernel/power/Kconfig"
index 0b54774..c2a7238 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_socdk.dtb \
+dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_n6000.dtb \
+                               socfpga_agilex_socdk.dtb \
                                socfpga_agilex_socdk_nand.dtb \
                                socfpga_n5x_socdk.dtb
 dtb-$(CONFIG_ARCH_KEEMBAY) += keembay-evm.dtb
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_n6000.dts
new file mode 100644 (file)
index 0000000..6231a69
--- /dev/null
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021-2022, Intel Corporation
+ */
+#include "socfpga_agilex.dtsi"
+
+/ {
+       model = "SoCFPGA Agilex n6000";
+       compatible = "intel,socfpga-agilex-n6000", "intel,socfpga-agilex";
+
+       aliases {
+               serial0 = &uart1;
+               serial1 = &uart0;
+               ethernet0 = &gmac0;
+               ethernet1 = &gmac1;
+               ethernet2 = &gmac2;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
+       memory@0 {
+               device_type = "memory";
+               /* We expect the bootloader to fill in the reg */
+               reg = <0 0 0 0>;
+       };
+
+       soc {
+               bus@80000000 {
+                       compatible = "simple-bus";
+                       reg = <0x80000000 0x60000000>,
+                               <0xf9000000 0x00100000>;
+                       reg-names = "axi_h2f", "axi_h2f_lw";
+                       #address-cells = <2>;
+                       #size-cells = <1>;
+                       ranges = <0x00000000 0x00000000 0xf9000000 0x00001000>;
+
+                       dma-controller@0 {
+                               compatible = "intel,hps-copy-engine";
+                               reg = <0x00000000 0x00000000 0x00001000>;
+                               #dma-cells = <1>;
+                       };
+               };
+       };
+};
+
+&osc1 {
+       clock-frequency = <25000000>;
+};
+
+&uart0 {
+       status = "okay";
+};
+
+&uart1 {
+       status = "okay";
+};
+
+&watchdog0 {
+       status = "okay";
+};
+
+&fpga_mgr {
+       status = "disabled";
+};
index 56e54ce..49afbb1 100644 (file)
 &usb2 {
        status = "okay";
        extcon = <&usb2_id>;
+};
 
-       dwc3@7600000 {
-               extcon = <&usb2_id>;
-               dr_mode = "otg";
-               maximum-speed = "high-speed";
-       };
+&usb2_dwc3 {
+       extcon = <&usb2_id>;
+       dr_mode = "otg";
+       maximum-speed = "high-speed";
 };
 
 &usb3 {
        status = "okay";
        extcon = <&usb3_id>;
+};
 
-       dwc3@6a00000 {
-               extcon = <&usb3_id>;
-               dr_mode = "otg";
-       };
+&usb3_dwc3 {
+       extcon = <&usb3_id>;
+       dr_mode = "otg";
 };
 
 &usb3phy {
index a4d363c..c89499e 100644 (file)
                        status = "disabled";
                };
 
-               usb2: usb2@7000000 {
+               usb2: usb@70f8800 {
                        compatible = "qcom,ipq6018-dwc3", "qcom,dwc3";
                        reg = <0x0 0x070F8800 0x0 0x400>;
                        #address-cells = <2>;
                        clocks = <&gcc GCC_USB1_MASTER_CLK>,
                                 <&gcc GCC_USB1_SLEEP_CLK>,
                                 <&gcc GCC_USB1_MOCK_UTMI_CLK>;
-                       clock-names = "master",
+                       clock-names = "core",
                                      "sleep",
                                      "mock_utmi";
 
                        status = "disabled";
                };
 
-               usb3: usb3@8A00000 {
+               usb3: usb@8af8800 {
                        compatible = "qcom,ipq6018-dwc3", "qcom,dwc3";
                        reg = <0x0 0x8AF8800 0x0 0x400>;
                        #address-cells = <2>;
                                <&gcc GCC_USB0_MASTER_CLK>,
                                <&gcc GCC_USB0_SLEEP_CLK>,
                                <&gcc GCC_USB0_MOCK_UTMI_CLK>;
-                       clock-names = "sys_noc_axi",
-                               "master",
+                       clock-names = "cfg_noc",
+                               "core",
                                "sleep",
                                "mock_utmi";
 
                        resets = <&gcc GCC_USB0_BCR>;
                        status = "disabled";
 
-                       dwc_0: usb@8A00000 {
+                       dwc_0: usb@8a00000 {
                                compatible = "snps,dwc3";
                                reg = <0x0 0x8A00000 0x0 0xcd00>;
                                interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
index 943243d..4c38b15 100644 (file)
                };
 
                usb_0: usb@8af8800 {
-                       compatible = "qcom,dwc3";
+                       compatible = "qcom,ipq8074-dwc3", "qcom,dwc3";
                        reg = <0x08af8800 0x400>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                                <&gcc GCC_USB0_MASTER_CLK>,
                                <&gcc GCC_USB0_SLEEP_CLK>,
                                <&gcc GCC_USB0_MOCK_UTMI_CLK>;
-                       clock-names = "sys_noc_axi",
-                               "master",
+                       clock-names = "cfg_noc",
+                               "core",
                                "sleep",
                                "mock_utmi";
 
                        resets = <&gcc GCC_USB0_BCR>;
                        status = "disabled";
 
-                       dwc_0: dwc3@8a00000 {
+                       dwc_0: usb@8a00000 {
                                compatible = "snps,dwc3";
                                reg = <0x8a00000 0xcd00>;
                                interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
                };
 
                usb_1: usb@8cf8800 {
-                       compatible = "qcom,dwc3";
+                       compatible = "qcom,ipq8074-dwc3", "qcom,dwc3";
                        reg = <0x08cf8800 0x400>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                                <&gcc GCC_USB1_MASTER_CLK>,
                                <&gcc GCC_USB1_SLEEP_CLK>,
                                <&gcc GCC_USB1_MOCK_UTMI_CLK>;
-                       clock-names = "sys_noc_axi",
-                               "master",
+                       clock-names = "cfg_noc",
+                               "core",
                                "sleep",
                                "mock_utmi";
 
                        resets = <&gcc GCC_USB1_BCR>;
                        status = "disabled";
 
-                       dwc_1: dwc3@8c00000 {
+                       dwc_1: usb@8c00000 {
                                compatible = "snps,dwc3";
                                reg = <0x8c00000 0xcd00>;
                                interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
index 49903a6..ffc3ec2 100644 (file)
                        clocks = <&gcc GCC_USB_PHY_CFG_AHB_CLK>,
                                 <&gcc GCC_USB30_MASTER_CLK>,
                                 <&gcc GCC_PCNOC_USB3_AXI_CLK>,
-                                <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface",
-                                     "mock_utmi", "sleep";
+                                <&gcc GCC_USB30_SLEEP_CLK>,
+                                <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_MASTER_CLK>;
index 367ed91..0318d42 100644 (file)
                };
 
                usb3: usb@f92f8800 {
-                       compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
+                       compatible = "qcom,msm8994-dwc3", "qcom,dwc3";
                        reg = <0xf92f8800 0x400>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                                 <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
                                 <&gcc GCC_USB30_SLEEP_CLK>,
                                 <&gcc GCC_USB30_MOCK_UTMI_CLK>;
-                       clock-names = "core", "iface", "sleep", "mock_utmi", "ref", "xo";
+                       clock-names = "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_MASTER_CLK>;
index be4f643..a7090be 100644 (file)
        extcon = <&typec>;
 
        qcom,select-utmi-as-pipe-clk;
+};
 
-       dwc3@6a00000 {
-               extcon = <&typec>;
+&usb3_dwc3 {
+       extcon = <&typec>;
 
-               /* usb3-phy is not used on this device */
-               phys = <&hsusb_phy1>;
-               phy-names = "usb2-phy";
+       /* usb3-phy is not used on this device */
+       phys = <&hsusb_phy1>;
+       phy-names = "usb2-phy";
 
-               maximum-speed = "high-speed";
-               snps,is-utmi-l1-suspend;
-               snps,usb2-gadget-lpm-disable;
-               snps,hird-threshold = /bits/ 8 <0>;
-       };
+       maximum-speed = "high-speed";
+       snps,is-utmi-l1-suspend;
+       snps,usb2-gadget-lpm-disable;
+       snps,hird-threshold = /bits/ 8 <0>;
 };
 
 &hsusb_phy1 {
index 205af7b..9932186 100644 (file)
                        interrupt-names = "hs_phy_irq", "ss_phy_irq";
 
                        clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
-                               <&gcc GCC_USB30_MASTER_CLK>,
-                               <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
-                               <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-                               <&gcc GCC_USB30_SLEEP_CLK>,
-                               <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+                                <&gcc GCC_USB30_MASTER_CLK>,
+                                <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+                                <&gcc GCC_USB30_SLEEP_CLK>,
+                                <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_MASTER_CLK>;
                        power-domains = <&gcc USB30_GDSC>;
                        status = "disabled";
 
-                       usb3_dwc3: dwc3@6a00000 {
+                       usb3_dwc3: usb@6a00000 {
                                compatible = "snps,dwc3";
                                reg = <0x06a00000 0xcc00>;
                                interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
                                <&gcc GCC_USB20_MOCK_UTMI_CLK>,
                                <&gcc GCC_USB20_SLEEP_CLK>,
                                <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB20_MASTER_CLK>;
                        qcom,select-utmi-as-pipe-clk;
                        status = "disabled";
 
-                       dwc3@7600000 {
+                       usb2_dwc3: usb@7600000 {
                                compatible = "snps,dwc3";
                                reg = <0x07600000 0xcc00>;
                                interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
index 4a84de6..758c45b 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_AXI_CLK>,
                                 <&gcc GCC_USB30_MASTER_CLK>,
                                 <&gcc GCC_AGGRE1_USB3_AXI_CLK>,
-                                <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_SLEEP_CLK>,
+                                <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_MASTER_CLK>;
 
                        resets = <&gcc GCC_USB_30_BCR>;
 
-                       usb3_dwc3: dwc3@a800000 {
+                       usb3_dwc3: usb@a800000 {
                                compatible = "snps,dwc3";
                                reg = <0x0a800000 0xcd00>;
                                interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
index a80c578..2f3104a 100644 (file)
 &usb3 {
        status = "okay";
 
-       dwc3@7580000 {
-               dr_mode = "host";
-       };
+};
+
+&usb3_dwc3 {
+       dr_mode = "host";
 };
 
 &usb2_phy_prim {
index bc446c6..d912166 100644 (file)
                };
 
                usb3: usb@7678800 {
-                       compatible = "qcom,dwc3";
+                       compatible = "qcom,qcs404-dwc3", "qcom,dwc3";
                        reg = <0x07678800 0x400>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        assigned-clock-rates = <19200000>, <200000000>;
                        status = "disabled";
 
-                       dwc3@7580000 {
+                       usb3_dwc3: usb@7580000 {
                                compatible = "snps,dwc3";
                                reg = <0x07580000 0xcd00>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                };
 
                usb2: usb@79b8800 {
-                       compatible = "qcom,dwc3";
+                       compatible = "qcom,qcs404-dwc3", "qcom,dwc3";
                        reg = <0x079b8800 0x400>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        assigned-clock-rates = <19200000>, <133333333>;
                        status = "disabled";
 
-                       dwc3@78c0000 {
+                       usb@78c0000 {
                                compatible = "snps,dwc3";
                                reg = <0x078c0000 0xcc00>;
                                interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
index 82fa009..5dcaac2 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
                                        <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_1_dwc3: dwc3@a600000 {
+                       usb_1_dwc3: usb@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xe000>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
index f72451f..e66fc67 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
                                 <&gcc GCC_USB30_SEC_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
-                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_SEC_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface","mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_SEC_MASTER_CLK>;
                                phys = <&usb_2_hsphy>;
                                phy-names = "usb2-phy";
                                maximum-speed = "high-speed";
+                               usb-role-switch;
+                               port {
+                                       usb2_role_switch: endpoint {
+                                               remote-endpoint = <&eud_ep>;
+                                       };
+                               };
                        };
                };
 
                        interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
                };
 
+               eud: eud@88e0000 {
+                       compatible = "qcom,sc7280-eud","qcom,eud";
+                       reg = <0 0x88e0000 0 0x2000>,
+                             <0 0x88e2000 0 0x1000>;
+                       interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
+                       ports {
+                               port@0 {
+                                       eud_ep: endpoint {
+                                               remote-endpoint = <&usb2_role_switch>;
+                                       };
+                               };
+                               port@1 {
+                                       eud_con: endpoint {
+                                               remote-endpoint = <&con_eud>;
+                                       };
+                               };
+                       };
+               };
+
+               eud_typec: connector {
+                       compatible = "usb-c-connector";
+                       ports {
+                               port@0 {
+                                       con_eud: endpoint {
+                                               remote-endpoint = <&eud_con>;
+                                       };
+                               };
+                       };
+               };
+
                nsp_noc: interconnect@a0c0000 {
                        reg = <0 0x0a0c0000 0 0x10000>;
                        compatible = "qcom,sc7280-nsp-noc";
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
                        assigned-clock-rates = <19200000>, <200000000>;
 
                        interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
-                                             <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+                                             <&pdc 17 IRQ_TYPE_EDGE_BOTH>,
                                              <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
-                                             <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupt-names = "hs_phy_irq", "dp_hs_phy_irq",
-                                         "dm_hs_phy_irq", "ss_phy_irq";
+                                             <&pdc 14 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "hs_phy_irq",
+                                         "ss_phy_irq",
+                                         "dm_hs_phy_irq",
+                                         "dp_hs_phy_irq";
 
                        power-domains = <&gcc GCC_USB30_PRIM_GDSC>;
 
index 7f875bf..b72e8e6 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_AXI_CLK>,
                                 <&gcc GCC_USB30_MASTER_CLK>,
                                 <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
+                                <&gcc GCC_USB30_SLEEP_CLK>,
                                 <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "bus",
-                                     "mock_utmi", "sleep";
+                                <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "bus";
 
                        assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_MASTER_CLK>,
index 692cf4b..0692ae0 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
                                        <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_1_dwc3: dwc3@a600000 {
+                       usb_1_dwc3: usb@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
                                 <&gcc GCC_USB30_SEC_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
-                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_SEC_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_SEC_MASTER_CLK>;
                                        <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_2_dwc3: dwc3@a800000 {
+                       usb_2_dwc3: usb@a800000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a800000 0 0xcd00>;
                                interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
index e81b2a7..135e6e0 100644 (file)
                };
 
                usb3: usb@4ef8800 {
-                       compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
+                       compatible = "qcom,sm6125-dwc3", "qcom,dwc3";
                        reg = <0x04ef8800 0x400>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
 
-                       clocks = <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+                       clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+                                <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_SYS_NOC_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
                                 <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+                                <&gcc GCC_USB3_PRIM_CLKREF_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
index fb1a0f6..d4f8f33 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
                                              <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
index f70ae4c..8ea44c4 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB3_SEC_CLKREF_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep", "xo";
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
 
                        resets = <&gcc GCC_USB30_PRIM_BCR>;
 
-                       usb_1_dwc3: dwc3@a600000 {
+                       usb_1_dwc3: usb@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
                                 <&gcc GCC_USB30_SEC_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
-                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB3_SEC_CLKREF_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep", "xo";
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_SEC_MASTER_CLK>;
index dc25620..cf0c97b 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB3_SEC_CLKREF_EN>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep", "xo";
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
                        clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
                                 <&gcc GCC_USB30_SEC_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
-                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB3_SEC_CLKREF_EN>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep", "xo";
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_SEC_MASTER_CLK>;
index c0137bd..743cba9 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
-                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep";
+                                <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>;
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
                        clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
                                 <&gcc GCC_USB30_SEC_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
-                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+                                <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB3_SEC_CLKREF_EN>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep", "xo";
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_SEC_MASTER_CLK>;
index 7f52c3c..7d08fad 100644 (file)
                        clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
                                 <&gcc GCC_USB30_PRIM_MASTER_CLK>,
                                 <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
-                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB30_PRIM_SLEEP_CLK>,
+                                <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                 <&gcc GCC_USB3_0_CLKREF_EN>;
-                       clock-names = "cfg_noc", "core", "iface", "mock_utmi",
-                                     "sleep", "xo";
+                       clock-names = "cfg_noc",
+                                     "core",
+                                     "iface",
+                                     "sleep",
+                                     "mock_utmi",
+                                     "xo";
 
                        assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
                                          <&gcc GCC_USB30_PRIM_MASTER_CLK>;
                        assigned-clock-rates = <19200000>, <200000000>;
 
                        interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
-                                             <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+                                             <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
                                              <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
-                                             <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupt-names = "hs_phy_irq", "dp_hs_phy_irq",
-                                         "dm_hs_phy_irq", "ss_phy_irq";
+                                             <&pdc 14 IRQ_TYPE_EDGE_BOTH>;
+                       interrupt-names = "hs_phy_irq",
+                                         "ss_phy_irq",
+                                         "dm_hs_phy_irq",
+                                         "dp_hs_phy_irq";
 
                        power-domains = <&gcc USB30_PRIM_GDSC>;
 
index 1cbe212..2dfa67f 100644 (file)
        cru: clock-controller@ff500000 {
                compatible = "rockchip,rk3308-cru";
                reg = <0x0 0xff500000 0x0 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
+               rockchip,grf = <&grf>;
                #clock-cells = <1>;
                #reset-cells = <1>;
-               rockchip,grf = <&grf>;
-
                assigned-clocks = <&cru SCLK_RTC32K>;
                assigned-clock-rates = <32768>;
        };
index c99da90..4f0b5fe 100644 (file)
        cru: clock-controller@ff760000 {
                compatible = "rockchip,rk3368-cru";
                reg = <0x0 0xff760000 0x0 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                rockchip,grf = <&grf>;
                #clock-cells = <1>;
                #reset-cells = <1>;
index 141a433..1534e11 100644 (file)
                compatible = "brcm,bcm43438-bt";
                clocks = <&rk817 1>;
                clock-names = "lpo";
-               device-wake-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
-               host-wake-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
+               device-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
+               host-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
                shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
                pinctrl-names = "default";
                pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
index 1042e68..914f13c 100644 (file)
        cru: clock-controller@fdd20000 {
                compatible = "rockchip,rk3568-cru";
                reg = <0x0 0xfdd20000 0x0 0x1000>;
+               clocks = <&xin24m>;
+               clock-names = "xin24m";
                #clock-cells = <1>;
                #reset-cells = <1>;
                assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>;
index 79b9591..89d91ab 100644 (file)
                                reg = <0 0x20100000 0 0x4000>;
                                interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
                                #dma-cells = <1>;
+                               /* For backwards compatibility: */
                                #dma-channels = <32>;
+                               dma-channels = <32>;
                                clock-names = "enable";
                                clocks = <&apahb_gate CLK_DMA_EB>;
                        };
                                compatible = "sprd,sc9860-dma";
                                reg = <0 0x41580000 0 0x4000>;
                                #dma-cells = <1>;
+                               /* For backwards compatibility: */
                                #dma-channels = <32>;
+                               dma-channels = <32>;
                                clock-names = "enable", "ashb_eb";
                                clocks = <&agcp_gate CLK_AGCP_DMAAP_EB>,
                                       <&agcp_gate CLK_AGCP_AP_ASHB_EB>;
index eaa6ca0..9f36227 100644 (file)
@@ -8,6 +8,15 @@
 #define compat_mode_t compat_mode_t
 typedef u16            compat_mode_t;
 
+#define __compat_uid_t __compat_uid_t
+typedef u16            __compat_uid_t;
+typedef u16            __compat_gid_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16            compat_ipc_pid_t;
+
+#define compat_statfs  compat_statfs
+
 #include <asm-generic/compat.h>
 
 #ifdef CONFIG_COMPAT
@@ -19,21 +28,15 @@ typedef u16         compat_mode_t;
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 
-#define COMPAT_USER_HZ         100
 #ifdef __AARCH64EB__
 #define COMPAT_UTS_MACHINE     "armv8b\0\0"
 #else
 #define COMPAT_UTS_MACHINE     "armv8l\0\0"
 #endif
 
-typedef u16            __compat_uid_t;
-typedef u16            __compat_gid_t;
 typedef u16            __compat_uid16_t;
 typedef u16            __compat_gid16_t;
-typedef u32            compat_dev_t;
 typedef s32            compat_nlink_t;
-typedef u16            compat_ipc_pid_t;
-typedef __kernel_fsid_t        compat_fsid_t;
 
 struct compat_stat {
 #ifdef __AARCH64EB__
@@ -65,26 +68,6 @@ struct compat_stat {
        compat_ulong_t  __unused4[2];
 };
 
-struct compat_flock {
-       short           l_type;
-       short           l_whence;
-       compat_off_t    l_start;
-       compat_off_t    l_len;
-       compat_pid_t    l_pid;
-};
-
-#define F_GETLK64      12      /*  using 'struct flock64' */
-#define F_SETLK64      13
-#define F_SETLKW64     14
-
-struct compat_flock64 {
-       short           l_type;
-       short           l_whence;
-       compat_loff_t   l_start;
-       compat_loff_t   l_len;
-       compat_pid_t    l_pid;
-};
-
 struct compat_statfs {
        int             f_type;
        int             f_bsize;
@@ -107,64 +90,6 @@ struct compat_statfs {
 #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
 #define COMPAT_MINSIGSTKSZ     2048
 
-struct compat_ipc64_perm {
-       compat_key_t key;
-       __compat_uid32_t uid;
-       __compat_gid32_t gid;
-       __compat_uid32_t cuid;
-       __compat_gid32_t cgid;
-       unsigned short mode;
-       unsigned short __pad1;
-       unsigned short seq;
-       unsigned short __pad2;
-       compat_ulong_t unused1;
-       compat_ulong_t unused2;
-};
-
-struct compat_semid64_ds {
-       struct compat_ipc64_perm sem_perm;
-       compat_ulong_t sem_otime;
-       compat_ulong_t sem_otime_high;
-       compat_ulong_t sem_ctime;
-       compat_ulong_t sem_ctime_high;
-       compat_ulong_t sem_nsems;
-       compat_ulong_t __unused3;
-       compat_ulong_t __unused4;
-};
-
-struct compat_msqid64_ds {
-       struct compat_ipc64_perm msg_perm;
-       compat_ulong_t msg_stime;
-       compat_ulong_t msg_stime_high;
-       compat_ulong_t msg_rtime;
-       compat_ulong_t msg_rtime_high;
-       compat_ulong_t msg_ctime;
-       compat_ulong_t msg_ctime_high;
-       compat_ulong_t msg_cbytes;
-       compat_ulong_t msg_qnum;
-       compat_ulong_t msg_qbytes;
-       compat_pid_t   msg_lspid;
-       compat_pid_t   msg_lrpid;
-       compat_ulong_t __unused4;
-       compat_ulong_t __unused5;
-};
-
-struct compat_shmid64_ds {
-       struct compat_ipc64_perm shm_perm;
-       compat_size_t  shm_segsz;
-       compat_ulong_t shm_atime;
-       compat_ulong_t shm_atime_high;
-       compat_ulong_t shm_dtime;
-       compat_ulong_t shm_dtime_high;
-       compat_ulong_t shm_ctime;
-       compat_ulong_t shm_ctime_high;
-       compat_pid_t   shm_cpid;
-       compat_pid_t   shm_lpid;
-       compat_ulong_t shm_nattch;
-       compat_ulong_t __unused4;
-       compat_ulong_t __unused5;
-};
-
 static inline int is_compat_task(void)
 {
        return test_thread_flag(TIF_32BIT);
index 4e65da3..037feba 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (C) 2012 ARM Ltd.
  */
 #ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_STAT
 #define __ARCH_WANT_COMPAT_STAT64
 #define __ARCH_WANT_SYS_GETHOSTNAME
 #define __ARCH_WANT_SYS_PAUSE
index 6328308..2e24834 100644 (file)
@@ -427,7 +427,7 @@ int swsusp_arch_resume(void)
                return rc;
 
        /*
-        * We need a zero page that is zero before & after resume in order to
+        * We need a zero page that is zero before & after resume in order
         * to break before make on the ttbr1 page tables.
         */
        zero_page = (void *)get_safe_page(GFP_ATOMIC);
index 9734c9f..92bcc17 100644 (file)
@@ -111,8 +111,7 @@ void machine_power_off(void)
 {
        local_irq_disable();
        smp_send_stop();
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
 }
 
 /*
@@ -344,9 +343,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 
 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
 
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
-               unsigned long stk_sz, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long stack_start = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *childregs = task_pt_regs(p);
 
        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
@@ -362,7 +363,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
        ptrauth_thread_init_kernel(p);
 
-       if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
+       if (likely(!args->fn)) {
                *childregs = *current_pt_regs();
                childregs->regs[0] = 0;
 
@@ -400,8 +401,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
 
-               p->thread.cpu_context.x19 = stack_start;
-               p->thread.cpu_context.x20 = stk_sz;
+               p->thread.cpu_context.x19 = (unsigned long)args->fn;
+               p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
        }
        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
        p->thread.cpu_context.sp = (unsigned long)childregs;
index fea3223..cf3a759 100644 (file)
@@ -303,13 +303,14 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
        early_fixmap_init();
        early_ioremap_init();
 
-       setup_machine_fdt(__fdt_pointer);
-
        /*
         * Initialise the static keys early as they may be enabled by the
-        * cpufeature code and early parameters.
+        * cpufeature code, early parameters, and DT setup.
         */
        jump_label_init();
+
+       setup_machine_fdt(__fdt_pointer);
+
        parse_early_param();
 
        /*
index edb2d92..b0980fb 100644 (file)
@@ -385,7 +385,7 @@ static int preserve_za_context(struct za_context __user *ctx)
        return err ? -EFAULT : 0;
 }
 
-static int restore_za_context(struct user_ctxs __user *user)
+static int restore_za_context(struct user_ctxs *user)
 {
        int err;
        unsigned int vq;
index 923ee4e..86ee202 100644 (file)
@@ -9,16 +9,14 @@ EXPORT_SYMBOL(pm_power_off);
 void machine_power_off(void)
 {
        local_irq_disable();
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
        asm volatile ("bkpt");
 }
 
 void machine_halt(void)
 {
        local_irq_disable();
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
        asm volatile ("bkpt");
 }
 
index 5de0470..eedddb1 100644 (file)
@@ -29,12 +29,11 @@ asmlinkage void ret_from_kernel_thread(void);
  */
 void flush_thread(void){}
 
-int copy_thread(unsigned long clone_flags,
-               unsigned long usp,
-               unsigned long kthread_arg,
-               struct task_struct *p,
-               unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct switch_stack *childstack;
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -48,11 +47,11 @@ int copy_thread(unsigned long clone_flags,
        /* setup thread.sp for switch_to !!! */
        p->thread.sp = (unsigned long)childstack;
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                memset(childregs, 0, sizeof(struct pt_regs));
                childstack->r15 = (unsigned long) ret_from_kernel_thread;
-               childstack->r10 = kthread_arg;
-               childstack->r9 = usp;
+               childstack->r10 = (unsigned long) args->fn_arg;
+               childstack->r9 = (unsigned long) args->fn;
                childregs->sr = mfcr("psr");
        } else {
                *childregs = *(current_pt_regs());
index eab03c6..f0552f9 100644 (file)
@@ -50,9 +50,11 @@ void arch_cpu_idle(void)
 /*
  * Copy architecture-specific thread state
  */
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct thread_info *ti = task_thread_info(p);
        struct hexagon_switch_stack *ss;
        struct pt_regs *childregs;
@@ -73,11 +75,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
                                                    sizeof(*ss));
        ss->lr = (unsigned long)ret_from_fork;
        p->thread.switch_sp = ss;
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                memset(childregs, 0, sizeof(struct pt_regs));
                /* r24 <- fn, r25 <- arg */
-               ss->r24 = usp;
-               ss->r25 = arg;
+               ss->r24 = (unsigned long)args->fn;
+               ss->r25 = (unsigned long)args->fn_arg;
                pt_set_kmode(childregs);
                return 0;
        }
index a10a498..4028744 100644 (file)
@@ -139,10 +139,6 @@ static inline long regs_return_value(struct pt_regs *regs)
   #define arch_ptrace_stop_needed() \
        (!test_thread_flag(TIF_RESTORE_RSE))
 
-  extern void ptrace_attach_sync_user_rbs (struct task_struct *);
-  #define arch_ptrace_attach(child) \
-       ptrace_attach_sync_user_rbs(child)
-
   #define arch_has_single_step()  (1)
   #define arch_has_block_step()   (1)
 
index d7a256b..416305e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/notifier.h>
 #include <linux/personality.h>
+#include <linux/reboot.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/hotplug.h>
@@ -295,9 +296,12 @@ ia64_load_extra (struct task_struct *task)
  * so there is nothing to worry about.
  */
 int
-copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
-           unsigned long user_stack_size, struct task_struct *p, unsigned long tls)
+copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long user_stack_base = args->stack;
+       unsigned long user_stack_size = args->stack_size;
+       unsigned long tls = args->tls;
        extern char ia64_ret_from_clone;
        struct switch_stack *child_stack, *stack;
        unsigned long rbs, child_rbs, rbs_size;
@@ -338,14 +342,14 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
 
        ia64_drop_fpu(p);       /* don't pick up stale state from a CPU's fph */
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
-               if (unlikely(!user_stack_base)) {
+       if (unlikely(args->fn)) {
+               if (unlikely(args->idle)) {
                        /* fork_idle() called us */
                        return 0;
                }
                memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack));
-               child_stack->r4 = user_stack_base;      /* payload */
-               child_stack->r5 = user_stack_size;      /* argument */
+               child_stack->r4 = (unsigned long) args->fn;
+               child_stack->r5 = (unsigned long) args->fn_arg;
                /*
                 * Preserve PSR bits, except for bits 32-34 and 37-45,
                 * which we can't read.
@@ -599,8 +603,7 @@ machine_halt (void)
 void
 machine_power_off (void)
 {
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
        machine_halt();
 }
 
index 4fc6e38..ab8aeb3 100644 (file)
@@ -618,63 +618,6 @@ void ia64_sync_krbs(void)
 }
 
 /*
- * After PTRACE_ATTACH, a thread's register backing store area in user
- * space is assumed to contain correct data whenever the thread is
- * stopped.  arch_ptrace_stop takes care of this on tracing stops.
- * But if the child was already stopped for job control when we attach
- * to it, then it might not ever get into ptrace_stop by the time we
- * want to examine the user memory containing the RBS.
- */
-void
-ptrace_attach_sync_user_rbs (struct task_struct *child)
-{
-       int stopped = 0;
-       struct unw_frame_info info;
-
-       /*
-        * If the child is in TASK_STOPPED, we need to change that to
-        * TASK_TRACED momentarily while we operate on it.  This ensures
-        * that the child won't be woken up and return to user mode while
-        * we are doing the sync.  (It can only be woken up for SIGKILL.)
-        */
-
-       read_lock(&tasklist_lock);
-       if (child->sighand) {
-               spin_lock_irq(&child->sighand->siglock);
-               if (READ_ONCE(child->__state) == TASK_STOPPED &&
-                   !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
-                       set_notify_resume(child);
-
-                       WRITE_ONCE(child->__state, TASK_TRACED);
-                       stopped = 1;
-               }
-               spin_unlock_irq(&child->sighand->siglock);
-       }
-       read_unlock(&tasklist_lock);
-
-       if (!stopped)
-               return;
-
-       unw_init_from_blocked_task(&info, child);
-       do_sync_rbs(&info, ia64_sync_user_rbs);
-
-       /*
-        * Now move the child back into TASK_STOPPED if it should be in a
-        * job control stop, so that SIGCONT can be used to wake it up.
-        */
-       read_lock(&tasklist_lock);
-       if (child->sighand) {
-               spin_lock_irq(&child->sighand->siglock);
-               if (READ_ONCE(child->__state) == TASK_TRACED &&
-                   (child->signal->flags & SIGNAL_STOP_STOPPED)) {
-                       WRITE_ONCE(child->__state, TASK_STOPPED);
-               }
-               spin_unlock_irq(&child->sighand->siglock);
-       }
-       read_unlock(&tasklist_lock);
-}
-
-/*
  * Write f32-f127 back to task->thread.fph if it has been modified.
  */
 inline void
index 5010348..fd6301e 100644 (file)
@@ -572,7 +572,7 @@ setup_arch (char **cmdline_p)
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
        prefill_possible_map();
 #endif
-       per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
+       per_cpu_scan_finalize((cpumask_empty(&early_cpu_possible_map) ?
                32 : cpumask_weight(&early_cpu_possible_map)),
                additional_cpus > 0 ? additional_cpus : 0);
 #endif /* CONFIG_ACPI_NUMA */
index d10f780..d0e935c 100644 (file)
@@ -576,8 +576,6 @@ clear_cpu_sibling_map(int cpu)
 static void
 remove_siblinginfo(int cpu)
 {
-       int last = 0;
-
        if (cpu_data(cpu)->threads_per_core == 1 &&
            cpu_data(cpu)->cores_per_socket == 1) {
                cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
@@ -585,8 +583,6 @@ remove_siblinginfo(int cpu)
                return;
        }
 
-       last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
-
        /* remove it from all sibling map's */
        clear_cpu_sibling_map(cpu);
 }
diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild
new file mode 100644 (file)
index 0000000..ab5373d
--- /dev/null
@@ -0,0 +1,6 @@
+obj-y += kernel/
+obj-y += mm/
+obj-y += vdso/
+
+# for cleaning
+subdir- += boot
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
new file mode 100644 (file)
index 0000000..80657bf
--- /dev/null
@@ -0,0 +1,438 @@
+# SPDX-License-Identifier: GPL-2.0
+config LOONGARCH
+       bool
+       default y
+       select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+       select ARCH_BINFMT_ELF_STATE
+       select ARCH_ENABLE_MEMORY_HOTPLUG
+       select ARCH_ENABLE_MEMORY_HOTREMOVE
+       select ARCH_HAS_ACPI_TABLE_UPGRADE      if ACPI
+       select ARCH_HAS_PHYS_TO_DMA
+       select ARCH_HAS_PTE_SPECIAL
+       select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_INLINE_READ_LOCK if !PREEMPTION
+       select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
+       select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
+       select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
+       select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
+       select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_SPARSEMEM_ENABLE
+       select ARCH_SUPPORTS_ACPI
+       select ARCH_SUPPORTS_ATOMIC_RMW
+       select ARCH_SUPPORTS_HUGETLBFS
+       select ARCH_SUPPORTS_NUMA_BALANCING
+       select ARCH_USE_BUILTIN_BSWAP
+       select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_USE_QUEUED_RWLOCKS
+       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+       select ARCH_WANTS_NO_INSTR
+       select BUILDTIME_TABLE_SORT
+       select COMMON_CLK
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CMOS_UPDATE
+       select GENERIC_CPU_AUTOPROBE
+       select GENERIC_ENTRY
+       select GENERIC_FIND_FIRST_BIT
+       select GENERIC_GETTIMEOFDAY
+       select GENERIC_IRQ_MULTI_HANDLER
+       select GENERIC_IRQ_PROBE
+       select GENERIC_IRQ_SHOW
+       select GENERIC_LIB_ASHLDI3
+       select GENERIC_LIB_ASHRDI3
+       select GENERIC_LIB_CMPDI2
+       select GENERIC_LIB_LSHRDI3
+       select GENERIC_LIB_UCMPDI2
+       select GENERIC_PCI_IOMAP
+       select GENERIC_SCHED_CLOCK
+       select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_TIME_VSYSCALL
+       select GPIOLIB
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_COMPILER_H
+       select HAVE_ARCH_MMAP_RND_BITS if MMU
+       select HAVE_ARCH_SECCOMP_FILTER
+       select HAVE_ARCH_TRACEHOOK
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ASM_MODVERSIONS
+       select HAVE_CONTEXT_TRACKING
+       select HAVE_COPY_THREAD_TLS
+       select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_DMA_CONTIGUOUS
+       select HAVE_EXIT_THREAD
+       select HAVE_FAST_GUP
+       select HAVE_GENERIC_VDSO
+       select HAVE_IOREMAP_PROT
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK
+       select HAVE_IRQ_TIME_ACCOUNTING
+       select HAVE_MEMBLOCK
+       select HAVE_MEMBLOCK_NODE_MAP
+       select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_NMI
+       select HAVE_PERF_EVENTS
+       select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
+       select HAVE_SETUP_PER_CPU_AREA if NUMA
+       select HAVE_SYSCALL_TRACEPOINTS
+       select HAVE_TIF_NOHZ
+       select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+       select IRQ_FORCED_THREADING
+       select IRQ_LOONGARCH_CPU
+       select MODULES_USE_ELF_RELA if MODULES
+       select NEED_PER_CPU_EMBED_FIRST_CHUNK
+       select NEED_PER_CPU_PAGE_FIRST_CHUNK
+       select OF
+       select OF_EARLY_FLATTREE
+       select PERF_USE_VMALLOC
+       select RTC_LIB
+       select SPARSE_IRQ
+       select SYSCTL_EXCEPTION_TRACE
+       select SWIOTLB
+       select TRACE_IRQFLAGS_SUPPORT
+       select USE_PERCPU_NUMA_NODE_ID
+       select ZONE_DMA32
+
+config 32BIT
+       bool
+
+config 64BIT
+       def_bool y
+
+config CPU_HAS_FPU
+       bool
+       default y
+
+config CPU_HAS_PREFETCH
+       bool
+       default y
+
+config GENERIC_CALIBRATE_DELAY
+       def_bool y
+
+config GENERIC_CSUM
+       def_bool y
+
+config GENERIC_HWEIGHT
+       def_bool y
+
+config L1_CACHE_SHIFT
+       int
+       default "6"
+
+config LOCKDEP_SUPPORT
+       bool
+       default y
+
+# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
+# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
+# are shared between architectures, and specifically expecting the symbols.
+config MACH_LOONGSON32
+       def_bool 32BIT
+
+config MACH_LOONGSON64
+       def_bool 64BIT
+
+config PAGE_SIZE_4KB
+       bool
+
+config PAGE_SIZE_16KB
+       bool
+
+config PAGE_SIZE_64KB
+       bool
+
+config PGTABLE_2LEVEL
+       bool
+
+config PGTABLE_3LEVEL
+       bool
+
+config PGTABLE_4LEVEL
+       bool
+
+config PGTABLE_LEVELS
+       int
+       default 2 if PGTABLE_2LEVEL
+       default 3 if PGTABLE_3LEVEL
+       default 4 if PGTABLE_4LEVEL
+
+config SCHED_OMIT_FRAME_POINTER
+       bool
+       default y
+
+menu "Kernel type and options"
+
+source "kernel/Kconfig.hz"
+
+choice
+       prompt "Page Table Layout"
+       default 16KB_2LEVEL if 32BIT
+       default 16KB_3LEVEL if 64BIT
+       help
+         Allows choosing the page table layout, which is a combination
+         of page size and page table levels. The size of virtual memory
+         address space are determined by the page table layout.
+
+config 4KB_3LEVEL
+       bool "4KB with 3 levels"
+       select PAGE_SIZE_4KB
+       select PGTABLE_3LEVEL
+       help
+         This option selects 4KB page size with 3 level page tables, which
+         support a maximum of 39 bits of application virtual memory.
+
+config 4KB_4LEVEL
+       bool "4KB with 4 levels"
+       select PAGE_SIZE_4KB
+       select PGTABLE_4LEVEL
+       help
+         This option selects 4KB page size with 4 level page tables, which
+         support a maximum of 48 bits of application virtual memory.
+
+config 16KB_2LEVEL
+       bool "16KB with 2 levels"
+       select PAGE_SIZE_16KB
+       select PGTABLE_2LEVEL
+       help
+         This option selects 16KB page size with 2 level page tables, which
+         support a maximum of 36 bits of application virtual memory.
+
+config 16KB_3LEVEL
+       bool "16KB with 3 levels"
+       select PAGE_SIZE_16KB
+       select PGTABLE_3LEVEL
+       help
+         This option selects 16KB page size with 3 level page tables, which
+         support a maximum of 47 bits of application virtual memory.
+
+config 64KB_2LEVEL
+       bool "64KB with 2 levels"
+       select PAGE_SIZE_64KB
+       select PGTABLE_2LEVEL
+       help
+         This option selects 64KB page size with 2 level page tables, which
+         support a maximum of 42 bits of application virtual memory.
+
+config 64KB_3LEVEL
+       bool "64KB with 3 levels"
+       select PAGE_SIZE_64KB
+       select PGTABLE_3LEVEL
+       help
+         This option selects 64KB page size with 3 level page tables, which
+         support a maximum of 55 bits of application virtual memory.
+
+endchoice
+
+config CMDLINE
+       string "Built-in kernel command line"
+       help
+         For most platforms, the arguments for the kernel's command line
+         are provided at run-time, during boot. However, there are cases
+         where either no arguments are being provided or the provided
+         arguments are insufficient or even invalid.
+
+         When that occurs, it is possible to define a built-in command
+         line here and choose how the kernel should use it later on.
+
+choice
+       prompt "Kernel command line type"
+       default CMDLINE_BOOTLOADER
+       help
+         Choose how the kernel will handle the provided built-in command
+         line.
+
+config CMDLINE_BOOTLOADER
+       bool "Use bootloader kernel arguments if available"
+       help
+         Prefer the command-line passed by the boot loader if available.
+         Use the built-in command line as fallback in case we get nothing
+         during boot. This is the default behaviour.
+
+config CMDLINE_EXTEND
+       bool "Use built-in to extend bootloader kernel arguments"
+       help
+         The command-line arguments provided during boot will be
+         appended to the built-in command line. This is useful in
+         cases where the provided arguments are insufficient and
+         you don't want to or cannot modify them.
+
+config CMDLINE_FORCE
+       bool "Always use the built-in kernel command string"
+       help
+         Always use the built-in command line, even if we get one during
+         boot. This is useful in case you need to override the provided
+         command line on systems where you don't have or want control
+         over it.
+
+endchoice
+
+config DMI
+       bool "Enable DMI scanning"
+       select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+       default y
+       help
+         This enables SMBIOS/DMI feature for systems, and scanning of
+         DMI to identify machine quirks.
+
+config EFI
+       bool "EFI runtime service support"
+       select UCS2_STRING
+       select EFI_PARAMS_FROM_FDT
+       select EFI_RUNTIME_WRAPPERS
+       help
+         This enables the kernel to use EFI runtime services that are
+         available (such as the EFI variable services).
+
+config SMP
+       bool "Multi-Processing support"
+       help
+         This enables support for systems with more than one CPU. If you have
+         a system with only one CPU, say N. If you have a system with more
+         than one CPU, say Y.
+
+         If you say N here, the kernel will run on uni- and multiprocessor
+         machines, but will use only one CPU of a multiprocessor machine. If
+         you say Y here, the kernel will run on many, but not all,
+         uniprocessor machines. On a uniprocessor machine, the kernel
+         will run faster if you say N here.
+
+         See also the SMP-HOWTO available at <http://www.tldp.org/docs.html#howto>.
+
+         If you don't know what to do here, say N.
+
+config HOTPLUG_CPU
+       bool "Support for hot-pluggable CPUs"
+       depends on SMP
+       select GENERIC_IRQ_MIGRATION
+       help
+         Say Y here to allow turning CPUs off and on. CPUs can be
+         controlled through /sys/devices/system/cpu.
+         (Note: power management support will enable this option
+           automatically on SMP systems. )
+         Say N if you want to disable CPU hotplug.
+
+config NR_CPUS
+       int "Maximum number of CPUs (2-256)"
+       range 2 256
+       depends on SMP
+       default "64"
+       help
+         This allows you to specify the maximum number of CPUs which this
+         kernel will support.
+
+config NUMA
+       bool "NUMA Support"
+       select ACPI_NUMA if ACPI
+       help
+         Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
+         support.  This option improves performance on systems with more
+         than one NUMA node; on single node systems it is generally better
+         to leave it disabled.
+
+config NODES_SHIFT
+       int
+       default "6"
+       depends on NUMA
+
+config FORCE_MAX_ZONEORDER
+       int "Maximum zone order"
+       range 14 64 if PAGE_SIZE_64KB
+       default "14" if PAGE_SIZE_64KB
+       range 12 64 if PAGE_SIZE_16KB
+       default "12" if PAGE_SIZE_16KB
+       range 11 64
+       default "11"
+       help
+         The kernel memory allocator divides physically contiguous memory
+         blocks into "zones", where each zone is a power of two number of
+         pages.  This option selects the largest power of two that the kernel
+         keeps in the memory allocator.  If you need to allocate very large
+         blocks of physically contiguous memory, then you may need to
+         increase this value.
+
+         This config option is actually maximum order plus one. For example,
+         a value of 11 means that the largest free memory block is 2^10 pages.
+
+         The page size is not necessarily 4KB.  Keep this in mind
+         when choosing a value for this option.
+
+config SECCOMP
+       bool "Enable seccomp to safely compute untrusted bytecode"
+       depends on PROC_FS
+       default y
+       help
+         This kernel feature is useful for number crunching applications
+         that may need to compute untrusted bytecode during their
+         execution. By using pipes or other transports made available to
+         the process as file descriptors supporting the read/write
+         syscalls, it's possible to isolate those applications in
+         their own address space using seccomp. Once seccomp is
+         enabled via /proc/<pid>/seccomp, it cannot be disabled
+         and the task is only allowed to execute a few safe syscalls
+         defined by each seccomp mode.
+
+         If unsure, say Y. Only embedded should say N here.
+
+endmenu
+
+config ARCH_SELECT_MEMORY_MODEL
+       def_bool y
+
+config ARCH_FLATMEM_ENABLE
+       def_bool y
+       depends on !NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+       def_bool y
+       help
+         Say Y to support efficient handling of sparse physical memory,
+         for architectures which are either NUMA (Non-Uniform Memory Access)
+         or have huge holes in the physical address space for other reasons.
+         See <file:Documentation/vm/numa.rst> for more.
+
+config ARCH_ENABLE_THP_MIGRATION
+       def_bool y
+       depends on TRANSPARENT_HUGEPAGE
+
+config ARCH_MEMORY_PROBE
+       def_bool y
+       depends on MEMORY_HOTPLUG
+
+config MMU
+       bool
+       default y
+
+config ARCH_MMAP_RND_BITS_MIN
+       default 12
+
+config ARCH_MMAP_RND_BITS_MAX
+       default 18
+
+menu "Power management options"
+
+source "drivers/acpi/Kconfig"
+
+endmenu
+
+source "drivers/firmware/Kconfig"
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
new file mode 100644 (file)
index 0000000..fbe4277
--- /dev/null
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Author: Huacai Chen <chenhuacai@loongson.cn>
+# Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+
+boot   := arch/loongarch/boot
+
+KBUILD_DEFCONFIG := loongson3_defconfig
+
+KBUILD_IMAGE   = $(boot)/vmlinux
+
+#
+# Select the object file format to substitute into the linker script.
+#
+64bit-tool-archpref    = loongarch64
+32bit-bfd              = elf32-loongarch
+64bit-bfd              = elf64-loongarch
+32bit-emul             = elf32loongarch
+64bit-emul             = elf64loongarch
+
+ifdef CONFIG_64BIT
+tool-archpref          = $(64bit-tool-archpref)
+UTS_MACHINE            := loongarch64
+endif
+
+ifneq ($(SUBARCH),$(ARCH))
+  ifeq ($(CROSS_COMPILE),)
+    CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux-  $(tool-archpref)-linux-gnu-  $(tool-archpref)-unknown-linux-gnu-)
+  endif
+endif
+
+ifdef CONFIG_64BIT
+ld-emul                        = $(64bit-emul)
+cflags-y               += -mabi=lp64s
+endif
+
+cflags-y                       += -G0 -pipe -msoft-float
+LDFLAGS_vmlinux                        += -G0 -static -n -nostdlib
+KBUILD_AFLAGS_KERNEL           += -Wa,-mla-global-with-pcrel
+KBUILD_CFLAGS_KERNEL           += -Wa,-mla-global-with-pcrel
+KBUILD_AFLAGS_MODULE           += -Wa,-mla-global-with-abs
+KBUILD_CFLAGS_MODULE           += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
+
+cflags-y += -ffreestanding
+cflags-y += $(call cc-option, -mno-check-zero-division)
+
+load-y         = 0x9000000000200000
+bootvars-y     = VMLINUX_LOAD_ADDRESS=$(load-y)
+
+KBUILD_AFLAGS  += $(cflags-y)
+KBUILD_CFLAGS  += $(cflags-y)
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+
+# This is required to get dwarf unwinding tables into .debug_frame
+# instead of .eh_frame so we don't discard them.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
+# Don't emit unaligned accesses.
+# Not all LoongArch cores support unaligned access, and as kernel we can't
+# rely on others to provide emulation for these accesses.
+KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+
+KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include)
+
+KBUILD_LDFLAGS += -m $(ld-emul)
+
+ifdef CONFIG_LOONGARCH
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+       egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+       sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
+endif
+
+head-y := arch/loongarch/kernel/head.o
+
+libs-y += arch/loongarch/lib/
+
+ifeq ($(KBUILD_EXTMOD),)
+prepare: vdso_prepare
+vdso_prepare: prepare0
+       $(Q)$(MAKE) $(build)=arch/loongarch/vdso include/generated/vdso-offsets.h
+endif
+
+PHONY += vdso_install
+vdso_install:
+       $(Q)$(MAKE) $(build)=arch/loongarch/vdso $@
+
+all:   $(KBUILD_IMAGE)
+
+$(KBUILD_IMAGE): vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $@
+
+install:
+       $(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
+       $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
+       $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+
+define archhelp
+       echo '  install              - install kernel into $(INSTALL_PATH)'
+       echo
+endef
similarity index 57%
rename from arch/arm/mach-pxa/Makefile.boot
rename to arch/loongarch/boot/.gitignore
index bb6e353..49423ee 100644 (file)
@@ -1,3 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0-only
-   zreladdr-y  += 0xa0008000
-
+vmlinux*
diff --git a/arch/loongarch/boot/Makefile b/arch/loongarch/boot/Makefile
new file mode 100644 (file)
index 0000000..0125b17
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# arch/loongarch/boot/Makefile
+#
+# Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+#
+
+drop-sections := .comment .note .options .note.gnu.build-id
+strip-flags   := $(addprefix --remove-section=,$(drop-sections)) -S
+OBJCOPYFLAGS_vmlinux.efi := -O binary $(strip-flags)
+
+targets := vmlinux
+quiet_cmd_strip = STRIP          $@
+      cmd_strip = $(STRIP) -s -o $@ $<
+
+$(obj)/vmlinux: vmlinux FORCE
+       $(call if_changed,strip)
diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile
new file mode 100644 (file)
index 0000000..5f1f55e
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+dtstree        := $(srctree)/$(src)
+
+dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
new file mode 100644 (file)
index 0000000..eb91497
--- /dev/null
@@ -0,0 +1,771 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_USERFAULTFD=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_LOONGARCH=y
+CONFIG_64BIT=y
+CONFIG_MACH_LOONGSON64=y
+CONFIG_DMI=y
+CONFIG_EFI=y
+CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_NR_CPUS=64
+CONFIG_NUMA=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_250=y
+CONFIG_ACPI=y
+CONFIG_ACPI_SPCR_TABLE=y
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_TAD=y
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_IPMI=m
+CONFIG_ACPI_PCI_SLOT=y
+CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_EFI_TEST=m
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_ZSWAP=y
+CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
+CONFIG_ZPOOL=y
+CONFIG_ZBUD=y
+CONFIG_Z3FOLD=y
+CONFIG_ZSMALLOC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_INET_ESP=m
+CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BBR=m
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_OBJREF=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
+CONFIG_NFT_REJECT=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
+CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_ARP=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_SRH=m
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BPFILTER=y
+CONFIG_IP_SCTP=m
+CONFIG_RDS=y
+CONFIG_L2TP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_CGROUP=m
+CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_BPF=m
+CONFIG_OPENVSWITCH=m
+CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_BT=m
+CONFIG_BT_HCIBTUSB=m
+# CONFIG_BT_HCIBTUSB_BCM is not set
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=y
+CONFIG_CEPH_LIB=m
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_SHPC=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+CONFIG_RAPIDIO=y
+CONFIG_RAPIDIO_TSI721=y
+CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
+CONFIG_RAPIDIO_ENUM_BASIC=m
+CONFIG_RAPIDIO_CHMAN=m
+CONFIG_RAPIDIO_MPORT_CDEV=m
+CONFIG_UEVENT_HELPER=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_SERIAL=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_ZRAM=m
+CONFIG_ZRAM_DEF_COMP_ZSTD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RBD=m
+CONFIG_BLK_DEV_NVME=y
+CONFIG_EEPROM_AT24=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_MVSAS=y
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_MVSAS_TASKLET=y
+CONFIG_SCSI_MVUMI=y
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_FCOE=m
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA_FC=m
+CONFIG_TCM_QLA2XXX=m
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_LPFC=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_PATA_PCMCIA=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BCACHE=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
+CONFIG_DM_WRITECACHE=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_TCM_USER2=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
+CONFIG_VXLAN=y
+CONFIG_RIONET=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+CONFIG_BNX2=y
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+CONFIG_CHELSIO_T1=m
+CONFIG_CHELSIO_T1_1G=y
+CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_HUAWEI_CDC_NCM=m
+CONFIG_USB_NET_CDC_MBIM=m
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_ATH9K=m
+CONFIG_ATH9K_HTC=m
+CONFIG_IWLWIFI=m
+CONFIG_IWLDVM=m
+CONFIG_IWLMVM=m
+CONFIG_IWLWIFI_BCAST_FILTERING=y
+CONFIG_HOSTAP=m
+CONFIG_MT7601U=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192SE=m
+CONFIG_RTL8192DE=m
+CONFIG_RTL8723AE=m
+CONFIG_RTL8723BE=m
+CONFIG_RTL8188EE=m
+CONFIG_RTL8192EE=m
+CONFIG_RTL8821AE=m
+CONFIG_RTL8192CU=m
+# CONFIG_RTLWIFI_DEBUG is not set
+CONFIG_RTL8XXXU=m
+CONFIG_ZD1211RW=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_RAW=m
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_RUNTIME_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_PRINTER=m
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_PIIX4=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_LOONGSON=y
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_W83795=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_RC_CORE=m
+CONFIG_LIRC=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_SANYO_DECODER=m
+CONFIG_IR_SHARP_DECODER=m
+CONFIG_IR_MCE_KBD_DECODER=m
+CONFIG_IR_XMP_DECODER=m
+CONFIG_IR_IMON_DECODER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_MEDIA_PCI_SUPPORT=y
+CONFIG_VIDEO_BT848=m
+CONFIG_DVB_BT8XX=m
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_RADEON_USERPTR=y
+CONFIG_DRM_AMDGPU=m
+CONFIG_DRM_AMDGPU_SI=y
+CONFIG_DRM_AMDGPU_CIK=y
+CONFIG_DRM_AMDGPU_USERPTR=y
+CONFIG_DRM_AST=y
+CONFIG_FB=y
+CONFIG_FB_EFI=y
+CONFIG_FB_RADEON=y
+CONFIG_LCD_PLATFORM=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+# CONFIG_SND_ISA is not set
+CONFIG_SND_BT87X=m
+CONFIG_SND_BT87X_OVERCLOCK=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_HIDRAW=y
+CONFIG_UHID=m
+CONFIG_HID_A4TECH=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_REALTEK=m
+CONFIG_USB_UAS=m
+CONFIG_USB_DWC2=y
+CONFIG_USB_DWC2_HOST=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_GADGET=y
+CONFIG_INFINIBAND=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_PCI_GENERIC=m
+# CONFIG_VIRTIO_MENU is not set
+CONFIG_COMEDI=m
+CONFIG_COMEDI_PCI_DRIVERS=m
+CONFIG_COMEDI_8255_PCI=m
+CONFIG_COMEDI_ADL_PCI6208=m
+CONFIG_COMEDI_ADL_PCI7X3X=m
+CONFIG_COMEDI_ADL_PCI8164=m
+CONFIG_COMEDI_ADL_PCI9111=m
+CONFIG_COMEDI_ADL_PCI9118=m
+CONFIG_COMEDI_ADV_PCI1710=m
+CONFIG_COMEDI_ADV_PCI1720=m
+CONFIG_COMEDI_ADV_PCI1723=m
+CONFIG_COMEDI_ADV_PCI1724=m
+CONFIG_COMEDI_ADV_PCI1760=m
+CONFIG_COMEDI_ADV_PCI_DIO=m
+CONFIG_COMEDI_NI_LABPC_PCI=m
+CONFIG_COMEDI_NI_PCIDIO=m
+CONFIG_COMEDI_NI_PCIMIO=m
+CONFIG_STAGING=y
+CONFIG_R8188EU=m
+# CONFIG_88EU_AP_MODE is not set
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_PWM=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_BTRFS_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_OVERLAY_FS=y
+CONFIG_OVERLAY_FS_INDEX=y
+CONFIG_OVERLAY_FS_XINO_AUTO=y
+CONFIG_OVERLAY_FS_METACOPY=y
+CONFIG_FSCACHE=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_BLOCKLAYOUT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_UTF8=y
+CONFIG_KEY_DH_OPERATIONS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
new file mode 100644 (file)
index 0000000..83bc068
--- /dev/null
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += dma-contiguous.h
+generic-y += export.h
+generic-y += parport.h
+generic-y += early_ioremap.h
+generic-y += qrwlock.h
+generic-y += qrwlock_types.h
+generic-y += spinlock.h
+generic-y += spinlock_types.h
+generic-y += rwsem.h
+generic-y += segment.h
+generic-y += user.h
+generic-y += stat.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += statfs.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += termios.h
+generic-y += termbits.h
+generic-y += poll.h
+generic-y += param.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += kvm_para.h
diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h
new file mode 100644 (file)
index 0000000..52f298f
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch specific ACPICA environments and implementation
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_ACENV_H
+#define _ASM_LOONGARCH_ACENV_H
+
+/*
+ * This header is required by ACPI core, but we have nothing to fill in
+ * right now. Will be updated later when needed.
+ */
+
+#endif /* _ASM_LOONGARCH_ACENV_H */
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
new file mode 100644 (file)
index 0000000..62044cd
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_ACPI_H
+#define _ASM_LOONGARCH_ACPI_H
+
+#ifdef CONFIG_ACPI
+extern int acpi_strict;
+extern int acpi_disabled;
+extern int acpi_pci_disabled;
+extern int acpi_noirq;
+
+#define acpi_os_ioremap acpi_os_ioremap
+void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+
+static inline void disable_acpi(void)
+{
+       acpi_disabled = 1;
+       acpi_pci_disabled = 1;
+       acpi_noirq = 1;
+}
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+       return true;
+}
+
+extern struct list_head acpi_wakeup_device_list;
+
+#endif /* !CONFIG_ACPI */
+
+#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
+
+#endif /* _ASM_LOONGARCH_ACPI_H */
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
new file mode 100644 (file)
index 0000000..b91e073
--- /dev/null
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 99 Ralf Baechle
+ * Copyright (C) 2000, 2002  Maciej W. Rozycki
+ * Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ADDRSPACE_H
+#define _ASM_ADDRSPACE_H
+
+#include <linux/const.h>
+
+#include <asm/loongarch.h>
+
+/*
+ * This gives the physical RAM offset.
+ */
+#ifndef __ASSEMBLY__
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET    _AC(0, UL)
+#endif
+extern unsigned long vm_map_base;
+#endif /* __ASSEMBLY__ */
+
+#ifndef IO_BASE
+#define IO_BASE                        CSR_DMW0_BASE
+#endif
+
+#ifndef CACHE_BASE
+#define CACHE_BASE             CSR_DMW1_BASE
+#endif
+
+#ifndef UNCACHE_BASE
+#define UNCACHE_BASE           CSR_DMW0_BASE
+#endif
+
+#define DMW_PABITS     48
+#define TO_PHYS_MASK   ((1ULL << DMW_PABITS) - 1)
+
+/*
+ * Memory above this physical address will be considered highmem.
+ */
+#ifndef HIGHMEM_START
+#define HIGHMEM_START          (_AC(1, UL) << _AC(DMW_PABITS, UL))
+#endif
+
+#define TO_PHYS(x)             (               ((x) & TO_PHYS_MASK))
+#define TO_CACHE(x)            (CACHE_BASE   | ((x) & TO_PHYS_MASK))
+#define TO_UNCACHE(x)          (UNCACHE_BASE | ((x) & TO_PHYS_MASK))
+
+/*
+ * This handles the memory map.
+ */
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET            (CACHE_BASE + PHYS_OFFSET)
+#endif
+
+#ifndef FIXADDR_TOP
+#define FIXADDR_TOP            ((unsigned long)(long)(int)0xfffe0000)
+#endif
+
+#ifdef __ASSEMBLY__
+#define _ATYPE_
+#define _ATYPE32_
+#define _ATYPE64_
+#define _CONST64_(x)   x
+#else
+#define _ATYPE_                __PTRDIFF_TYPE__
+#define _ATYPE32_      int
+#define _ATYPE64_      __s64
+#ifdef CONFIG_64BIT
+#define _CONST64_(x)   x ## L
+#else
+#define _CONST64_(x)   x ## LL
+#endif
+#endif
+
+/*
+ *  32/64-bit LoongArch address spaces
+ */
+#ifdef __ASSEMBLY__
+#define _ACAST32_
+#define _ACAST64_
+#else
+#define _ACAST32_              (_ATYPE_)(_ATYPE32_)    /* widen if necessary */
+#define _ACAST64_              (_ATYPE64_)             /* do _not_ narrow */
+#endif
+
+#ifdef CONFIG_32BIT
+
+#define UVRANGE                        0x00000000
+#define KPRANGE0               0x80000000
+#define KPRANGE1               0xa0000000
+#define KVRANGE                        0xc0000000
+
+#else
+
+#define XUVRANGE               _CONST64_(0x0000000000000000)
+#define XSPRANGE               _CONST64_(0x4000000000000000)
+#define XKPRANGE               _CONST64_(0x8000000000000000)
+#define XKVRANGE               _CONST64_(0xc000000000000000)
+
+#endif
+
+/*
+ * Returns the physical address of a KPRANGEx / XKPRANGE address
+ */
+#define PHYSADDR(a)            ((_ACAST64_(a)) & TO_PHYS_MASK)
+
+#endif /* _ASM_ADDRSPACE_H */
diff --git a/arch/loongarch/include/asm/asm-offsets.h b/arch/loongarch/include/asm/asm-offsets.h
new file mode 100644 (file)
index 0000000..d9ad88d
--- /dev/null
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <generated/asm-offsets.h>
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..ed06d39
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h
new file mode 100644 (file)
index 0000000..40eea6a
--- /dev/null
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Some useful macros for LoongArch assembler code
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2002  Maciej W. Rozycki
+ */
+#ifndef __ASM_ASM_H
+#define __ASM_ASM_H
+
+/* LoongArch pref instruction. */
+#ifdef CONFIG_CPU_HAS_PREFETCH
+
+#define PREF(hint, addr, offs)                         \
+               preld   hint, addr, offs;               \
+
+#define PREFX(hint, addr, index)                       \
+               preldx  hint, addr, index;              \
+
+#else /* !CONFIG_CPU_HAS_PREFETCH */
+
+#define PREF(hint, addr, offs)
+#define PREFX(hint, addr, index)
+
+#endif /* !CONFIG_CPU_HAS_PREFETCH */
+
+/*
+ * Stack alignment
+ */
+#define STACK_ALIGN    ~(0xf)
+
+/*
+ * Macros to handle different pointer/register sizes for 32/64-bit code
+ */
+
+/*
+ * Size of a register
+ */
+#ifndef __loongarch64
+#define SZREG  4
+#else
+#define SZREG  8
+#endif
+
+/*
+ * Use the following macros in assemblercode to load/store registers,
+ * pointers etc.
+ */
+#if (SZREG == 4)
+#define REG_L          ld.w
+#define REG_S          st.w
+#define REG_ADD                add.w
+#define REG_SUB                sub.w
+#else /* SZREG == 8 */
+#define REG_L          ld.d
+#define REG_S          st.d
+#define REG_ADD                add.d
+#define REG_SUB                sub.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C int variables.
+ */
+#if (__SIZEOF_INT__ == 4)
+#define INT_ADD                add.w
+#define INT_ADDI       addi.w
+#define INT_SUB                sub.w
+#define INT_L          ld.w
+#define INT_S          st.w
+#define INT_SLL                slli.w
+#define INT_SLLV       sll.w
+#define INT_SRL                srli.w
+#define INT_SRLV       srl.w
+#define INT_SRA                srai.w
+#define INT_SRAV       sra.w
+#endif
+
+#if (__SIZEOF_INT__ == 8)
+#define INT_ADD                add.d
+#define INT_ADDI       addi.d
+#define INT_SUB                sub.d
+#define INT_L          ld.d
+#define INT_S          st.d
+#define INT_SLL                slli.d
+#define INT_SLLV       sll.d
+#define INT_SRL                srli.d
+#define INT_SRLV       srl.d
+#define INT_SRA                srai.d
+#define INT_SRAV       sra.d
+#endif
+
+/*
+ * How to add/sub/load/store/shift C long variables.
+ */
+#if (__SIZEOF_LONG__ == 4)
+#define LONG_ADD       add.w
+#define LONG_ADDI      addi.w
+#define LONG_SUB       sub.w
+#define LONG_L         ld.w
+#define LONG_S         st.w
+#define LONG_SLL       slli.w
+#define LONG_SLLV      sll.w
+#define LONG_SRL       srli.w
+#define LONG_SRLV      srl.w
+#define LONG_SRA       srai.w
+#define LONG_SRAV      sra.w
+
+#ifdef __ASSEMBLY__
+#define LONG           .word
+#endif
+#define LONGSIZE       4
+#define LONGMASK       3
+#define LONGLOG                2
+#endif
+
+#if (__SIZEOF_LONG__ == 8)
+#define LONG_ADD       add.d
+#define LONG_ADDI      addi.d
+#define LONG_SUB       sub.d
+#define LONG_L         ld.d
+#define LONG_S         st.d
+#define LONG_SLL       slli.d
+#define LONG_SLLV      sll.d
+#define LONG_SRL       srli.d
+#define LONG_SRLV      srl.d
+#define LONG_SRA       srai.d
+#define LONG_SRAV      sra.d
+
+#ifdef __ASSEMBLY__
+#define LONG           .dword
+#endif
+#define LONGSIZE       8
+#define LONGMASK       7
+#define LONGLOG                3
+#endif
+
+/*
+ * How to add/sub/load/store/shift pointers.
+ */
+#if (__SIZEOF_POINTER__ == 4)
+#define PTR_ADD                add.w
+#define PTR_ADDI       addi.w
+#define PTR_SUB                sub.w
+#define PTR_L          ld.w
+#define PTR_S          st.w
+#define PTR_LI         li.w
+#define PTR_SLL                slli.w
+#define PTR_SLLV       sll.w
+#define PTR_SRL                srli.w
+#define PTR_SRLV       srl.w
+#define PTR_SRA                srai.w
+#define PTR_SRAV       sra.w
+
+#define PTR_SCALESHIFT 2
+
+#ifdef __ASSEMBLY__
+#define PTR            .word
+#endif
+#define PTRSIZE                4
+#define PTRLOG         2
+#endif
+
+#if (__SIZEOF_POINTER__ == 8)
+#define PTR_ADD                add.d
+#define PTR_ADDI       addi.d
+#define PTR_SUB                sub.d
+#define PTR_L          ld.d
+#define PTR_S          st.d
+#define PTR_LI         li.d
+#define PTR_SLL                slli.d
+#define PTR_SLLV       sll.d
+#define PTR_SRL                srli.d
+#define PTR_SRLV       srl.d
+#define PTR_SRA                srai.d
+#define PTR_SRAV       sra.d
+
+#define PTR_SCALESHIFT 3
+
+#ifdef __ASSEMBLY__
+#define PTR            .dword
+#endif
+#define PTRSIZE                8
+#define PTRLOG         3
+#endif
+
+#endif /* __ASM_ASM_H */
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
new file mode 100644 (file)
index 0000000..a1a0408
--- /dev/null
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ASMMACRO_H
+#define _ASM_ASMMACRO_H
+
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/loongarch.h>
+
+       .macro  parse_v var val
+       \var    = \val
+       .endm
+
+       .macro  parse_r var r
+       \var    = -1
+       .ifc    \r, $r0
+       \var    = 0
+       .endif
+       .ifc    \r, $r1
+       \var    = 1
+       .endif
+       .ifc    \r, $r2
+       \var    = 2
+       .endif
+       .ifc    \r, $r3
+       \var    = 3
+       .endif
+       .ifc    \r, $r4
+       \var    = 4
+       .endif
+       .ifc    \r, $r5
+       \var    = 5
+       .endif
+       .ifc    \r, $r6
+       \var    = 6
+       .endif
+       .ifc    \r, $r7
+       \var    = 7
+       .endif
+       .ifc    \r, $r8
+       \var    = 8
+       .endif
+       .ifc    \r, $r9
+       \var    = 9
+       .endif
+       .ifc    \r, $r10
+       \var    = 10
+       .endif
+       .ifc    \r, $r11
+       \var    = 11
+       .endif
+       .ifc    \r, $r12
+       \var    = 12
+       .endif
+       .ifc    \r, $r13
+       \var    = 13
+       .endif
+       .ifc    \r, $r14
+       \var    = 14
+       .endif
+       .ifc    \r, $r15
+       \var    = 15
+       .endif
+       .ifc    \r, $r16
+       \var    = 16
+       .endif
+       .ifc    \r, $r17
+       \var    = 17
+       .endif
+       .ifc    \r, $r18
+       \var    = 18
+       .endif
+       .ifc    \r, $r19
+       \var    = 19
+       .endif
+       .ifc    \r, $r20
+       \var    = 20
+       .endif
+       .ifc    \r, $r21
+       \var    = 21
+       .endif
+       .ifc    \r, $r22
+       \var    = 22
+       .endif
+       .ifc    \r, $r23
+       \var    = 23
+       .endif
+       .ifc    \r, $r24
+       \var    = 24
+       .endif
+       .ifc    \r, $r25
+       \var    = 25
+       .endif
+       .ifc    \r, $r26
+       \var    = 26
+       .endif
+       .ifc    \r, $r27
+       \var    = 27
+       .endif
+       .ifc    \r, $r28
+       \var    = 28
+       .endif
+       .ifc    \r, $r29
+       \var    = 29
+       .endif
+       .ifc    \r, $r30
+       \var    = 30
+       .endif
+       .ifc    \r, $r31
+       \var    = 31
+       .endif
+       .iflt   \var
+       .error  "Unable to parse register name \r"
+       .endif
+       .endm
+
+       .macro  cpu_save_nonscratch thread
+       stptr.d s0, \thread, THREAD_REG23
+       stptr.d s1, \thread, THREAD_REG24
+       stptr.d s2, \thread, THREAD_REG25
+       stptr.d s3, \thread, THREAD_REG26
+       stptr.d s4, \thread, THREAD_REG27
+       stptr.d s5, \thread, THREAD_REG28
+       stptr.d s6, \thread, THREAD_REG29
+       stptr.d s7, \thread, THREAD_REG30
+       stptr.d s8, \thread, THREAD_REG31
+       stptr.d sp, \thread, THREAD_REG03
+       stptr.d fp, \thread, THREAD_REG22
+       .endm
+
+       .macro  cpu_restore_nonscratch thread
+       ldptr.d s0, \thread, THREAD_REG23
+       ldptr.d s1, \thread, THREAD_REG24
+       ldptr.d s2, \thread, THREAD_REG25
+       ldptr.d s3, \thread, THREAD_REG26
+       ldptr.d s4, \thread, THREAD_REG27
+       ldptr.d s5, \thread, THREAD_REG28
+       ldptr.d s6, \thread, THREAD_REG29
+       ldptr.d s7, \thread, THREAD_REG30
+       ldptr.d s8, \thread, THREAD_REG31
+       ldptr.d ra, \thread, THREAD_REG01
+       ldptr.d sp, \thread, THREAD_REG03
+       ldptr.d fp, \thread, THREAD_REG22
+       .endm
+
+       .macro fpu_save_csr thread tmp
+       movfcsr2gr      \tmp, fcsr0
+       stptr.w \tmp, \thread, THREAD_FCSR
+       .endm
+
+       .macro fpu_restore_csr thread tmp
+       ldptr.w \tmp, \thread, THREAD_FCSR
+       movgr2fcsr      fcsr0, \tmp
+       .endm
+
+       .macro fpu_save_cc thread tmp0 tmp1
+       movcf2gr        \tmp0, $fcc0
+       move    \tmp1, \tmp0
+       movcf2gr        \tmp0, $fcc1
+       bstrins.d       \tmp1, \tmp0, 15, 8
+       movcf2gr        \tmp0, $fcc2
+       bstrins.d       \tmp1, \tmp0, 23, 16
+       movcf2gr        \tmp0, $fcc3
+       bstrins.d       \tmp1, \tmp0, 31, 24
+       movcf2gr        \tmp0, $fcc4
+       bstrins.d       \tmp1, \tmp0, 39, 32
+       movcf2gr        \tmp0, $fcc5
+       bstrins.d       \tmp1, \tmp0, 47, 40
+       movcf2gr        \tmp0, $fcc6
+       bstrins.d       \tmp1, \tmp0, 55, 48
+       movcf2gr        \tmp0, $fcc7
+       bstrins.d       \tmp1, \tmp0, 63, 56
+       stptr.d         \tmp1, \thread, THREAD_FCC
+       .endm
+
+       .macro fpu_restore_cc thread tmp0 tmp1
+       ldptr.d \tmp0, \thread, THREAD_FCC
+       bstrpick.d      \tmp1, \tmp0, 7, 0
+       movgr2cf        $fcc0, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 15, 8
+       movgr2cf        $fcc1, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 23, 16
+       movgr2cf        $fcc2, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 31, 24
+       movgr2cf        $fcc3, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 39, 32
+       movgr2cf        $fcc4, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 47, 40
+       movgr2cf        $fcc5, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 55, 48
+       movgr2cf        $fcc6, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 63, 56
+       movgr2cf        $fcc7, \tmp1
+       .endm
+
+       .macro  fpu_save_double thread tmp
+       li.w    \tmp, THREAD_FPR0
+       PTR_ADD \tmp, \tmp, \thread
+       fst.d   $f0, \tmp, THREAD_FPR0  - THREAD_FPR0
+       fst.d   $f1, \tmp, THREAD_FPR1  - THREAD_FPR0
+       fst.d   $f2, \tmp, THREAD_FPR2  - THREAD_FPR0
+       fst.d   $f3, \tmp, THREAD_FPR3  - THREAD_FPR0
+       fst.d   $f4, \tmp, THREAD_FPR4  - THREAD_FPR0
+       fst.d   $f5, \tmp, THREAD_FPR5  - THREAD_FPR0
+       fst.d   $f6, \tmp, THREAD_FPR6  - THREAD_FPR0
+       fst.d   $f7, \tmp, THREAD_FPR7  - THREAD_FPR0
+       fst.d   $f8, \tmp, THREAD_FPR8  - THREAD_FPR0
+       fst.d   $f9, \tmp, THREAD_FPR9  - THREAD_FPR0
+       fst.d   $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
+       fst.d   $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
+       fst.d   $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
+       fst.d   $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
+       fst.d   $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
+       fst.d   $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
+       fst.d   $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
+       fst.d   $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
+       fst.d   $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
+       fst.d   $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
+       fst.d   $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
+       fst.d   $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
+       fst.d   $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
+       fst.d   $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
+       fst.d   $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
+       fst.d   $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
+       fst.d   $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
+       fst.d   $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
+       fst.d   $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
+       fst.d   $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
+       fst.d   $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
+       fst.d   $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
+       .endm
+
+       .macro  fpu_restore_double thread tmp
+       li.w    \tmp, THREAD_FPR0
+       PTR_ADD \tmp, \tmp, \thread
+       fld.d   $f0, \tmp, THREAD_FPR0  - THREAD_FPR0
+       fld.d   $f1, \tmp, THREAD_FPR1  - THREAD_FPR0
+       fld.d   $f2, \tmp, THREAD_FPR2  - THREAD_FPR0
+       fld.d   $f3, \tmp, THREAD_FPR3  - THREAD_FPR0
+       fld.d   $f4, \tmp, THREAD_FPR4  - THREAD_FPR0
+       fld.d   $f5, \tmp, THREAD_FPR5  - THREAD_FPR0
+       fld.d   $f6, \tmp, THREAD_FPR6  - THREAD_FPR0
+       fld.d   $f7, \tmp, THREAD_FPR7  - THREAD_FPR0
+       fld.d   $f8, \tmp, THREAD_FPR8  - THREAD_FPR0
+       fld.d   $f9, \tmp, THREAD_FPR9  - THREAD_FPR0
+       fld.d   $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
+       fld.d   $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
+       fld.d   $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
+       fld.d   $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
+       fld.d   $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
+       fld.d   $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
+       fld.d   $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
+       fld.d   $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
+       fld.d   $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
+       fld.d   $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
+       fld.d   $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
+       fld.d   $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
+       fld.d   $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
+       fld.d   $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
+       fld.d   $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
+       fld.d   $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
+       fld.d   $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
+       fld.d   $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
+       fld.d   $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
+       fld.d   $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
+       fld.d   $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
+       fld.d   $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
+       .endm
+
+.macro not dst src
+       nor     \dst, \src, zero
+.endm
+
+.macro bgt r0 r1 label
+       blt     \r1, \r0, \label
+.endm
+
+.macro bltz r0 label
+       blt     \r0, zero, \label
+.endm
+
+.macro bgez r0 label
+       bge     \r0, zero, \label
+.endm
+
+#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
new file mode 100644 (file)
index 0000000..979367a
--- /dev/null
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Atomic operations.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
+
+#if __SIZEOF_LONG__ == 4
+#define __LL           "ll.w   "
+#define __SC           "sc.w   "
+#define __AMADD                "amadd.w        "
+#define __AMAND_DB     "amand_db.w     "
+#define __AMOR_DB      "amor_db.w      "
+#define __AMXOR_DB     "amxor_db.w     "
+#elif __SIZEOF_LONG__ == 8
+#define __LL           "ll.d   "
+#define __SC           "sc.d   "
+#define __AMADD                "amadd.d        "
+#define __AMAND_DB     "amand_db.d     "
+#define __AMOR_DB      "amor_db.d      "
+#define __AMXOR_DB     "amxor_db.d     "
+#endif
+
+#define ATOMIC_INIT(i)   { (i) }
+
+/*
+ * arch_atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define arch_atomic_read(v)    READ_ONCE((v)->counter)
+
+/*
+ * arch_atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define arch_atomic_set(v, i)  WRITE_ONCE((v)->counter, (i))
+
+#define ATOMIC_OP(op, I, asm_op)                                       \
+static inline void arch_atomic_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       __asm__ __volatile__(                                           \
+       "am"#asm_op"_db.w" " $zero, %1, %0      \n"                     \
+       : "+ZB" (v->counter)                                            \
+       : "r" (I)                                                       \
+       : "memory");                                                    \
+}
+
+#define ATOMIC_OP_RETURN(op, I, asm_op, c_op)                          \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)        \
+{                                                                      \
+       int result;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "am"#asm_op"_db.w" " %1, %2, %0         \n"                     \
+       : "+ZB" (v->counter), "=&r" (result)                            \
+       : "r" (I)                                                       \
+       : "memory");                                                    \
+                                                                       \
+       return result c_op I;                                           \
+}
+
+#define ATOMIC_FETCH_OP(op, I, asm_op)                                 \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{                                                                      \
+       int result;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "am"#asm_op"_db.w" " %1, %2, %0         \n"                     \
+       : "+ZB" (v->counter), "=&r" (result)                            \
+       : "r" (I)                                                       \
+       : "memory");                                                    \
+                                                                       \
+       return result;                                                  \
+}
+
+#define ATOMIC_OPS(op, I, asm_op, c_op)                                        \
+       ATOMIC_OP(op, I, asm_op)                                        \
+       ATOMIC_OP_RETURN(op, I, asm_op, c_op)                           \
+       ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(add, i, add, +)
+ATOMIC_OPS(sub, -i, add, +)
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed  arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed  arch_atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+
+#define ATOMIC_OPS(op, I, asm_op)                                      \
+       ATOMIC_OP(op, I, asm_op)                                        \
+       ATOMIC_FETCH_OP(op, I, asm_op)
+
+ATOMIC_OPS(and, i, and)
+ATOMIC_OPS(or, i, or)
+ATOMIC_OPS(xor, i, xor)
+
+#define arch_atomic_fetch_and_relaxed  arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed   arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed  arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       int prev, rc;
+
+       __asm__ __volatile__ (
+               "0:     ll.w    %[p],  %[c]\n"
+               "       beq     %[p],  %[u], 1f\n"
+               "       add.w   %[rc], %[p], %[a]\n"
+               "       sc.w    %[rc], %[c]\n"
+               "       beqz    %[rc], 0b\n"
+               "       b       2f\n"
+               "1:\n"
+               __WEAK_LLSC_MB
+               "2:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc),
+                 [c]"=ZB" (v->counter)
+               : [a]"r" (a), [u]"r" (u)
+               : "memory");
+
+       return prev;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+/*
+ * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
+{
+       int result;
+       int temp;
+
+       if (__builtin_constant_p(i)) {
+               __asm__ __volatile__(
+               "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
+               "       addi.w  %0, %1, %3                              \n"
+               "       or      %1, %0, $zero                           \n"
+               "       blt     %0, $zero, 2f                           \n"
+               "       sc.w    %1, %2                                  \n"
+               "       beq     $zero, %1, 1b                           \n"
+               "2:                                                     \n"
+               __WEAK_LLSC_MB
+               : "=&r" (result), "=&r" (temp),
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "I" (-i));
+       } else {
+               __asm__ __volatile__(
+               "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
+               "       sub.w   %0, %1, %3                              \n"
+               "       or      %1, %0, $zero                           \n"
+               "       blt     %0, $zero, 2f                           \n"
+               "       sc.w    %1, %2                                  \n"
+               "       beq     $zero, %1, 1b                           \n"
+               "2:                                                     \n"
+               __WEAK_LLSC_MB
+               : "=&r" (result), "=&r" (temp),
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "r" (i));
+       }
+
+       return result;
+}
+
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
+
+/*
+ * arch_atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ */
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
+
+#ifdef CONFIG_64BIT
+
+#define ATOMIC64_INIT(i)    { (i) }
+
+/*
+ * arch_atomic64_read - read atomic variable
+ * @v: pointer of type atomic64_t
+ *
+ */
+#define arch_atomic64_read(v)  READ_ONCE((v)->counter)
+
+/*
+ * arch_atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+#define arch_atomic64_set(v, i)        WRITE_ONCE((v)->counter, (i))
+
+#define ATOMIC64_OP(op, I, asm_op)                                     \
+static inline void arch_atomic64_##op(long i, atomic64_t *v)           \
+{                                                                      \
+       __asm__ __volatile__(                                           \
+       "am"#asm_op"_db.d " " $zero, %1, %0     \n"                     \
+       : "+ZB" (v->counter)                                            \
+       : "r" (I)                                                       \
+       : "memory");                                                    \
+}
+
+#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op)                                        \
+static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)  \
+{                                                                              \
+       long result;                                                            \
+       __asm__ __volatile__(                                                   \
+       "am"#asm_op"_db.d " " %1, %2, %0                \n"                     \
+       : "+ZB" (v->counter), "=&r" (result)                                    \
+       : "r" (I)                                                               \
+       : "memory");                                                            \
+                                                                               \
+       return result c_op I;                                                   \
+}
+
+#define ATOMIC64_FETCH_OP(op, I, asm_op)                                       \
+static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v)   \
+{                                                                              \
+       long result;                                                            \
+                                                                               \
+       __asm__ __volatile__(                                                   \
+       "am"#asm_op"_db.d " " %1, %2, %0                \n"                     \
+       : "+ZB" (v->counter), "=&r" (result)                                    \
+       : "r" (I)                                                               \
+       : "memory");                                                            \
+                                                                               \
+       return result;                                                          \
+}
+
+#define ATOMIC64_OPS(op, I, asm_op, c_op)                                    \
+       ATOMIC64_OP(op, I, asm_op)                                            \
+       ATOMIC64_OP_RETURN(op, I, asm_op, c_op)                               \
+       ATOMIC64_FETCH_OP(op, I, asm_op)
+
+ATOMIC64_OPS(add, i, add, +)
+ATOMIC64_OPS(sub, -i, add, +)
+
+#define arch_atomic64_add_return_relaxed       arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed       arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed                arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed                arch_atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+
+#define ATOMIC64_OPS(op, I, asm_op)                                          \
+       ATOMIC64_OP(op, I, asm_op)                                            \
+       ATOMIC64_FETCH_OP(op, I, asm_op)
+
+ATOMIC64_OPS(and, i, and)
+ATOMIC64_OPS(or, i, or)
+ATOMIC64_OPS(xor, i, xor)
+
+#define arch_atomic64_fetch_and_relaxed        arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed        arch_atomic64_fetch_xor_relaxed
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
+{
+       long prev, rc;
+
+       __asm__ __volatile__ (
+               "0:     ll.d    %[p],  %[c]\n"
+               "       beq     %[p],  %[u], 1f\n"
+               "       add.d   %[rc], %[p], %[a]\n"
+               "       sc.d    %[rc], %[c]\n"
+               "       beqz    %[rc], 0b\n"
+               "       b       2f\n"
+               "1:\n"
+               __WEAK_LLSC_MB
+               "2:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc),
+                 [c] "=ZB" (v->counter)
+               : [a]"r" (a), [u]"r" (u)
+               : "memory");
+
+       return prev;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+
+/*
+ * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
+{
+       long result;
+       long temp;
+
+       if (__builtin_constant_p(i)) {
+               __asm__ __volatile__(
+               "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
+               "       addi.d  %0, %1, %3                              \n"
+               "       or      %1, %0, $zero                           \n"
+               "       blt     %0, $zero, 2f                           \n"
+               "       sc.d    %1, %2                                  \n"
+               "       beq     %1, $zero, 1b                           \n"
+               "2:                                                     \n"
+               __WEAK_LLSC_MB
+               : "=&r" (result), "=&r" (temp),
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "I" (-i));
+       } else {
+               __asm__ __volatile__(
+               "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
+               "       sub.d   %0, %1, %3                              \n"
+               "       or      %1, %0, $zero                           \n"
+               "       blt     %0, $zero, 2f                           \n"
+               "       sc.d    %1, %2                                  \n"
+               "       beq     %1, $zero, 1b                           \n"
+               "2:                                                     \n"
+               __WEAK_LLSC_MB
+               : "=&r" (result), "=&r" (temp),
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "r" (i));
+       }
+
+       return result;
+}
+
+#define arch_atomic64_cmpxchg(v, o, n) \
+       ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
+
+/*
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ */
+#define arch_atomic64_dec_if_positive(v)       arch_atomic64_sub_if_positive(1, v)
+
+#endif /* CONFIG_64BIT */
+
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/loongarch/include/asm/barrier.h b/arch/loongarch/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..b6517ee
--- /dev/null
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define __sync()       __asm__ __volatile__("dbar 0" : : : "memory")
+
+#define fast_wmb()     __sync()
+#define fast_rmb()     __sync()
+#define fast_mb()      __sync()
+#define fast_iob()     __sync()
+#define wbflush()      __sync()
+
+#define wmb()          fast_wmb()
+#define rmb()          fast_rmb()
+#define mb()           fast_mb()
+#define iob()          fast_iob()
+
+#define __smp_mb()     __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_rmb()    __asm__ __volatile__("dbar 0" : : : "memory")
+#define __smp_wmb()    __asm__ __volatile__("dbar 0" : : : "memory")
+
+#ifdef CONFIG_SMP
+#define __WEAK_LLSC_MB         "       dbar 0  \n"
+#else
+#define __WEAK_LLSC_MB         "               \n"
+#endif
+
+#define __smp_mb__before_atomic()      barrier()
+#define __smp_mb__after_atomic()       barrier()
+
+/**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ *
+ * Returns:
+ *     0 - (@index < @size)
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+                                                   unsigned long size)
+{
+       unsigned long mask;
+
+       __asm__ __volatile__(
+               "sltu   %0, %1, %2\n\t"
+#if (__SIZEOF_LONG__ == 4)
+               "sub.w  %0, $r0, %0\n\t"
+#elif (__SIZEOF_LONG__ == 8)
+               "sub.d  %0, $r0, %0\n\t"
+#endif
+               : "=r" (mask)
+               : "r" (index), "r" (size)
+               :);
+
+       return mask;
+}
+
+#define __smp_load_acquire(p)                                                  \
+({                                                                             \
+       union { typeof(*p) __val; char __c[1]; } __u;                           \
+       unsigned long __tmp = 0;                                                        \
+       compiletime_assert_atomic_type(*p);                                     \
+       switch (sizeof(*p)) {                                                   \
+       case 1:                                                                 \
+               *(__u8 *)__u.__c = *(volatile __u8 *)p;                         \
+               __smp_mb();                                                     \
+               break;                                                          \
+       case 2:                                                                 \
+               *(__u16 *)__u.__c = *(volatile __u16 *)p;                       \
+               __smp_mb();                                                     \
+               break;                                                          \
+       case 4:                                                                 \
+               __asm__ __volatile__(                                           \
+               "amor_db.w %[val], %[tmp], %[mem]       \n"                             \
+               : [val] "=&r" (*(__u32 *)__u.__c)                               \
+               : [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp)                    \
+               : "memory");                                                    \
+               break;                                                          \
+       case 8:                                                                 \
+               __asm__ __volatile__(                                           \
+               "amor_db.d %[val], %[tmp], %[mem]       \n"                             \
+               : [val] "=&r" (*(__u64 *)__u.__c)                               \
+               : [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp)                    \
+               : "memory");                                                    \
+               break;                                                          \
+       }                                                                       \
+       (typeof(*p))__u.__val;                                                          \
+})
+
+#define __smp_store_release(p, v)                                              \
+do {                                                                           \
+       union { typeof(*p) __val; char __c[1]; } __u =                          \
+               { .__val = (__force typeof(*p)) (v) };                          \
+       unsigned long __tmp;                                                    \
+       compiletime_assert_atomic_type(*p);                                     \
+       switch (sizeof(*p)) {                                                   \
+       case 1:                                                                 \
+               __smp_mb();                                                     \
+               *(volatile __u8 *)p = *(__u8 *)__u.__c;                         \
+               break;                                                          \
+       case 2:                                                                 \
+               __smp_mb();                                                     \
+               *(volatile __u16 *)p = *(__u16 *)__u.__c;                       \
+               break;                                                          \
+       case 4:                                                                 \
+               __asm__ __volatile__(                                           \
+               "amswap_db.w %[tmp], %[val], %[mem]     \n"                     \
+               : [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp)                  \
+               : [val] "r" (*(__u32 *)__u.__c)                                 \
+               : );                                                            \
+               break;                                                          \
+       case 8:                                                                 \
+               __asm__ __volatile__(                                           \
+               "amswap_db.d %[tmp], %[val], %[mem]     \n"                     \
+               : [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp)                  \
+               : [val] "r" (*(__u64 *)__u.__c)                                 \
+               : );                                                            \
+               break;                                                          \
+       }                                                                       \
+} while (0)
+
+#define __smp_store_mb(p, v)                                                   \
+do {                                                                           \
+       union { typeof(p) __val; char __c[1]; } __u =                           \
+               { .__val = (__force typeof(p)) (v) };                           \
+       unsigned long __tmp;                                                    \
+       switch (sizeof(p)) {                                                    \
+       case 1:                                                                 \
+               *(volatile __u8 *)&p = *(__u8 *)__u.__c;                        \
+               __smp_mb();                                                     \
+               break;                                                          \
+       case 2:                                                                 \
+               *(volatile __u16 *)&p = *(__u16 *)__u.__c;                      \
+               __smp_mb();                                                     \
+               break;                                                          \
+       case 4:                                                                 \
+               __asm__ __volatile__(                                           \
+               "amswap_db.w %[tmp], %[val], %[mem]     \n"                     \
+               : [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp)                 \
+               : [val] "r" (*(__u32 *)__u.__c)                                 \
+               : );                                                            \
+               break;                                                          \
+       case 8:                                                                 \
+               __asm__ __volatile__(                                           \
+               "amswap_db.d %[tmp], %[val], %[mem]     \n"                     \
+               : [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp)                 \
+               : [val] "r" (*(__u64 *)__u.__c)                                 \
+               : );                                                            \
+               break;                                                          \
+       }                                                                       \
+} while (0)
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/loongarch/include/asm/bitops.h b/arch/loongarch/include/asm/bitops.h
new file mode 100644 (file)
index 0000000..69e00f8
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#include <linux/compiler.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-__fls.h>
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* _ASM_BITOPS_H */
diff --git a/arch/loongarch/include/asm/bitrev.h b/arch/loongarch/include/asm/bitrev.h
new file mode 100644 (file)
index 0000000..46f275b
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __LOONGARCH_ASM_BITREV_H__
+#define __LOONGARCH_ASM_BITREV_H__
+
+#include <linux/swab.h>
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+       u32 ret;
+
+       asm("bitrev.4b  %0, %1" : "=r"(ret) : "r"(__swab32(x)));
+       return ret;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+       u16 ret;
+
+       asm("bitrev.4b  %0, %1" : "=r"(ret) : "r"(__swab16(x)));
+       return ret;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+       u8 ret;
+
+       asm("bitrev.4b  %0, %1" : "=r"(ret) : "r"(x));
+       return ret;
+}
+
+#endif /* __LOONGARCH_ASM_BITREV_H__ */
diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
new file mode 100644 (file)
index 0000000..9b8d49d
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BOOTINFO_H
+#define _ASM_BOOTINFO_H
+
+#include <linux/types.h>
+#include <asm/setup.h>
+
+const char *get_system_type(void);
+
+extern void init_environ(void);
+extern void memblock_init(void);
+extern void platform_init(void);
+extern void plat_swiotlb_setup(void);
+extern int __init init_numa_memory(void);
+
+struct loongson_board_info {
+       int bios_size;
+       const char *bios_vendor;
+       const char *bios_version;
+       const char *bios_release_date;
+       const char *board_name;
+       const char *board_vendor;
+};
+
+struct loongson_system_configuration {
+       int nr_cpus;
+       int nr_nodes;
+       int nr_io_pics;
+       int boot_cpu_id;
+       int cores_per_node;
+       int cores_per_package;
+       const char *cpuname;
+};
+
+extern u64 efi_system_table;
+extern unsigned long fw_arg0, fw_arg1;
+extern struct loongson_board_info b_info;
+extern struct loongson_system_configuration loongson_sysconf;
+
+#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/loongarch/include/asm/branch.h b/arch/loongarch/include/asm/branch.h
new file mode 100644 (file)
index 0000000..3f33c89
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BRANCH_H
+#define _ASM_BRANCH_H
+
+#include <asm/ptrace.h>
+
+static inline unsigned long exception_era(struct pt_regs *regs)
+{
+       return regs->csr_era;
+}
+
+static inline int compute_return_era(struct pt_regs *regs)
+{
+       regs->csr_era += 4;
+       return 0;
+}
+
+#endif /* _ASM_BRANCH_H */
diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h
new file mode 100644 (file)
index 0000000..bda4910
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BUG_H
+#define __ASM_BUG_H
+
+#include <linux/compiler.h>
+
+#ifdef CONFIG_BUG
+
+#include <asm/break.h>
+
+static inline void __noreturn BUG(void)
+{
+       __asm__ __volatile__("break %0" : : "i" (BRK_BUG));
+       unreachable();
+}
+
+#define HAVE_ARCH_BUG
+
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif /* __ASM_BUG_H */
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
new file mode 100644 (file)
index 0000000..1b6d096
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CACHE_H
+#define _ASM_CACHE_H
+
+#define L1_CACHE_SHIFT         CONFIG_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __section(".data..read_mostly")
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h
new file mode 100644 (file)
index 0000000..6709001
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/cpu-features.h>
+#include <asm/cacheops.h>
+
+extern void local_flush_icache_range(unsigned long start, unsigned long end);
+
+#define flush_icache_range     local_flush_icache_range
+#define flush_icache_user_range        local_flush_icache_range
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+
+#define flush_cache_all()                              do { } while (0)
+#define flush_cache_mm(mm)                             do { } while (0)
+#define flush_cache_dup_mm(mm)                         do { } while (0)
+#define flush_cache_range(vma, start, end)             do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)             do { } while (0)
+#define flush_cache_vmap(start, end)                   do { } while (0)
+#define flush_cache_vunmap(start, end)                 do { } while (0)
+#define flush_icache_page(vma, page)                   do { } while (0)
+#define flush_icache_user_page(vma, page, addr, len)   do { } while (0)
+#define flush_dcache_page(page)                                do { } while (0)
+#define flush_dcache_mmap_lock(mapping)                        do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)              do { } while (0)
+
+#define cache_op(op, addr)                                             \
+       __asm__ __volatile__(                                           \
+       "       cacop   %0, %1                                  \n"     \
+       :                                                               \
+       : "i" (op), "ZC" (*(unsigned char *)(addr)))
+
+static inline void flush_icache_line_indexed(unsigned long addr)
+{
+       cache_op(Index_Invalidate_I, addr);
+}
+
+static inline void flush_dcache_line_indexed(unsigned long addr)
+{
+       cache_op(Index_Writeback_Inv_D, addr);
+}
+
+static inline void flush_vcache_line_indexed(unsigned long addr)
+{
+       cache_op(Index_Writeback_Inv_V, addr);
+}
+
+static inline void flush_scache_line_indexed(unsigned long addr)
+{
+       cache_op(Index_Writeback_Inv_S, addr);
+}
+
+static inline void flush_icache_line(unsigned long addr)
+{
+       cache_op(Hit_Invalidate_I, addr);
+}
+
+static inline void flush_dcache_line(unsigned long addr)
+{
+       cache_op(Hit_Writeback_Inv_D, addr);
+}
+
+static inline void flush_vcache_line(unsigned long addr)
+{
+       cache_op(Hit_Writeback_Inv_V, addr);
+}
+
+static inline void flush_scache_line(unsigned long addr)
+{
+       cache_op(Hit_Writeback_Inv_S, addr);
+}
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/loongarch/include/asm/cacheops.h b/arch/loongarch/include/asm/cacheops.h
new file mode 100644 (file)
index 0000000..dc280ef
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cache operations for the cache instruction.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CACHEOPS_H
+#define __ASM_CACHEOPS_H
+
+/*
+ * Most cache ops are split into a 2 bit field identifying the cache, and a 3
+ * bit field identifying the cache operation.
+ */
+#define CacheOp_Cache                  0x03
+#define CacheOp_Op                     0x1c
+
+#define Cache_I                                0x00
+#define Cache_D                                0x01
+#define Cache_V                                0x02
+#define Cache_S                                0x03
+
+#define Index_Invalidate               0x08
+#define Index_Writeback_Inv            0x08
+#define Hit_Invalidate                 0x10
+#define Hit_Writeback_Inv              0x10
+#define CacheOp_User_Defined           0x18
+
+#define Index_Invalidate_I             (Cache_I | Index_Invalidate)
+#define Index_Writeback_Inv_D          (Cache_D | Index_Writeback_Inv)
+#define Index_Writeback_Inv_V          (Cache_V | Index_Writeback_Inv)
+#define Index_Writeback_Inv_S          (Cache_S | Index_Writeback_Inv)
+#define Hit_Invalidate_I               (Cache_I | Hit_Invalidate)
+#define Hit_Writeback_Inv_D            (Cache_D | Hit_Writeback_Inv)
+#define Hit_Writeback_Inv_V            (Cache_V | Hit_Writeback_Inv)
+#define Hit_Writeback_Inv_S            (Cache_S | Hit_Writeback_Inv)
+
+#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/loongarch/include/asm/clocksource.h b/arch/loongarch/include/asm/clocksource.h
new file mode 100644 (file)
index 0000000..58e64aa
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_CLOCKSOURCE_H
+#define __ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* __ASM_CLOCKSOURCE_H */
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
new file mode 100644 (file)
index 0000000..75b3a44
--- /dev/null
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <asm/barrier.h>
+#include <linux/build_bug.h>
+
+#define __xchg_asm(amswap_db, m, val)          \
+({                                             \
+               __typeof(val) __ret;            \
+                                               \
+               __asm__ __volatile__ (          \
+               " "amswap_db" %1, %z2, %0 \n"   \
+               : "+ZB" (*m), "=&r" (__ret)     \
+               : "Jr" (val)                    \
+               : "memory");                    \
+                                               \
+               __ret;                          \
+})
+
+static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+                                  int size)
+{
+       switch (size) {
+       case 4:
+               return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
+
+       case 8:
+               return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
+
+       default:
+               BUILD_BUG();
+       }
+
+       return 0;
+}
+
+#define arch_xchg(ptr, x)                                              \
+({                                                                     \
+       __typeof__(*(ptr)) __res;                                       \
+                                                                       \
+       __res = (__typeof__(*(ptr)))                                    \
+               __xchg((ptr), (unsigned long)(x), sizeof(*(ptr)));      \
+                                                                       \
+       __res;                                                          \
+})
+
+#define __cmpxchg_asm(ld, st, m, old, new)                             \
+({                                                                     \
+       __typeof(old) __ret;                                            \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     " ld "  %0, %2          # __cmpxchg_asm \n"             \
+       "       bne     %0, %z3, 2f                     \n"             \
+       "       or      $t0, %z4, $zero                 \n"             \
+       "       " st "  $t0, %1                         \n"             \
+       "       beq     $zero, $t0, 1b                  \n"             \
+       "2:                                             \n"             \
+       __WEAK_LLSC_MB                                                  \
+       : "=&r" (__ret), "=ZB"(*m)                                      \
+       : "ZB"(*m), "Jr" (old), "Jr" (new)                              \
+       : "t0", "memory");                                              \
+                                                                       \
+       __ret;                                                          \
+})
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                     unsigned long new, unsigned int size)
+{
+       switch (size) {
+       case 4:
+               return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
+                                    (u32)old, new);
+
+       case 8:
+               return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
+                                    (u64)old, new);
+
+       default:
+               BUILD_BUG();
+       }
+
+       return 0;
+}
+
+#define arch_cmpxchg_local(ptr, old, new)                              \
+       ((__typeof__(*(ptr)))                                           \
+               __cmpxchg((ptr),                                        \
+                         (unsigned long)(__typeof__(*(ptr)))(old),     \
+                         (unsigned long)(__typeof__(*(ptr)))(new),     \
+                         sizeof(*(ptr))))
+
+#define arch_cmpxchg(ptr, old, new)                                    \
+({                                                                     \
+       __typeof__(*(ptr)) __res;                                       \
+                                                                       \
+       __res = arch_cmpxchg_local((ptr), (old), (new));                \
+                                                                       \
+       __res;                                                          \
+})
+
+#ifdef CONFIG_64BIT
+#define arch_cmpxchg64_local(ptr, o, n)                                        \
+  ({                                                                   \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       arch_cmpxchg_local((ptr), (o), (n));                            \
+  })
+
+#define arch_cmpxchg64(ptr, o, n)                                      \
+  ({                                                                   \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       arch_cmpxchg((ptr), (o), (n));                                  \
+  })
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
+#endif
+
+#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/loongarch/include/asm/compiler.h b/arch/loongarch/include/asm/compiler.h
new file mode 100644 (file)
index 0000000..657cebe
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_COMPILER_H
+#define _ASM_COMPILER_H
+
+#define GCC_OFF_SMALL_ASM() "ZC"
+
+#define LOONGARCH_ISA_LEVEL "loongarch"
+#define LOONGARCH_ISA_ARCH_LEVEL "arch=loongarch"
+#define LOONGARCH_ISA_LEVEL_RAW loongarch
+#define LOONGARCH_ISA_ARCH_LEVEL_RAW LOONGARCH_ISA_LEVEL_RAW
+
+#endif /* _ASM_COMPILER_H */
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
new file mode 100644 (file)
index 0000000..a8d87c4
--- /dev/null
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004  Maciej W. Rozycki
+ */
+#ifndef __ASM_CPU_FEATURES_H
+#define __ASM_CPU_FEATURES_H
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+
+#define cpu_opt(opt)                   (cpu_data[0].options & (opt))
+#define cpu_has(feat)                  (cpu_data[0].options & BIT_ULL(feat))
+
+#define cpu_has_loongarch              (cpu_has_loongarch32 | cpu_has_loongarch64)
+#define cpu_has_loongarch32            (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
+#define cpu_has_loongarch64            (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
+
+#define cpu_icache_line_size()         cpu_data[0].icache.linesz
+#define cpu_dcache_line_size()         cpu_data[0].dcache.linesz
+#define cpu_vcache_line_size()         cpu_data[0].vcache.linesz
+#define cpu_scache_line_size()         cpu_data[0].scache.linesz
+
+#ifdef CONFIG_32BIT
+# define cpu_has_64bits                        (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
+# define cpu_vabits                    31
+# define cpu_pabits                    31
+#endif
+
+#ifdef CONFIG_64BIT
+# define cpu_has_64bits                        1
+# define cpu_vabits                    cpu_data[0].vabits
+# define cpu_pabits                    cpu_data[0].pabits
+# define __NEED_ADDRBITS_PROBE
+#endif
+
+/*
+ * SMP assumption: Options of CPU 0 are a superset of all processors.
+ * This is true for all known LoongArch systems.
+ */
+#define cpu_has_cpucfg         cpu_opt(LOONGARCH_CPU_CPUCFG)
+#define cpu_has_lam            cpu_opt(LOONGARCH_CPU_LAM)
+#define cpu_has_ual            cpu_opt(LOONGARCH_CPU_UAL)
+#define cpu_has_fpu            cpu_opt(LOONGARCH_CPU_FPU)
+#define cpu_has_lsx            cpu_opt(LOONGARCH_CPU_LSX)
+#define cpu_has_lasx           cpu_opt(LOONGARCH_CPU_LASX)
+#define cpu_has_complex                cpu_opt(LOONGARCH_CPU_COMPLEX)
+#define cpu_has_crypto         cpu_opt(LOONGARCH_CPU_CRYPTO)
+#define cpu_has_lvz            cpu_opt(LOONGARCH_CPU_LVZ)
+#define cpu_has_lbt_x86                cpu_opt(LOONGARCH_CPU_LBT_X86)
+#define cpu_has_lbt_arm                cpu_opt(LOONGARCH_CPU_LBT_ARM)
+#define cpu_has_lbt_mips       cpu_opt(LOONGARCH_CPU_LBT_MIPS)
+#define cpu_has_lbt            (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
+#define cpu_has_csr            cpu_opt(LOONGARCH_CPU_CSR)
+#define cpu_has_tlb            cpu_opt(LOONGARCH_CPU_TLB)
+#define cpu_has_watch          cpu_opt(LOONGARCH_CPU_WATCH)
+#define cpu_has_vint           cpu_opt(LOONGARCH_CPU_VINT)
+#define cpu_has_csripi         cpu_opt(LOONGARCH_CPU_CSRIPI)
+#define cpu_has_extioi         cpu_opt(LOONGARCH_CPU_EXTIOI)
+#define cpu_has_prefetch       cpu_opt(LOONGARCH_CPU_PREFETCH)
+#define cpu_has_pmp            cpu_opt(LOONGARCH_CPU_PMP)
+#define cpu_has_perf           cpu_opt(LOONGARCH_CPU_PMP)
+#define cpu_has_scalefreq      cpu_opt(LOONGARCH_CPU_SCALEFREQ)
+#define cpu_has_flatmode       cpu_opt(LOONGARCH_CPU_FLATMODE)
+#define cpu_has_eiodecode      cpu_opt(LOONGARCH_CPU_EIODECODE)
+#define cpu_has_guestid                cpu_opt(LOONGARCH_CPU_GUESTID)
+#define cpu_has_hypervisor     cpu_opt(LOONGARCH_CPU_HYPERVISOR)
+
+
+#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h
new file mode 100644 (file)
index 0000000..b6c4f96
--- /dev/null
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_CPU_INFO_H
+#define __ASM_CPU_INFO_H
+
+#include <linux/cache.h>
+#include <linux/types.h>
+
+#include <asm/loongarch.h>
+
+/*
+ * Descriptor for a cache
+ */
+struct cache_desc {
+       unsigned int waysize;   /* Bytes per way */
+       unsigned short sets;    /* Number of lines per set */
+       unsigned char ways;     /* Number of ways */
+       unsigned char linesz;   /* Size of line in bytes */
+       unsigned char waybit;   /* Bits to select in a cache set */
+       unsigned char flags;    /* Flags describing cache properties */
+};
+
+struct cpuinfo_loongarch {
+       u64                     asid_cache;
+       unsigned long           asid_mask;
+
+       /*
+        * Capability and feature descriptor structure for LoongArch CPU
+        */
+       unsigned long long      options;
+       unsigned int            processor_id;
+       unsigned int            fpu_vers;
+       unsigned int            fpu_csr0;
+       unsigned int            fpu_mask;
+       unsigned int            cputype;
+       int                     isa_level;
+       int                     tlbsize;
+       int                     tlbsizemtlb;
+       int                     tlbsizestlbsets;
+       int                     tlbsizestlbways;
+       struct cache_desc       icache; /* Primary I-cache */
+       struct cache_desc       dcache; /* Primary D or combined I/D cache */
+       struct cache_desc       vcache; /* Victim cache, between pcache and scache */
+       struct cache_desc       scache; /* Secondary cache */
+       struct cache_desc       tcache; /* Tertiary/split secondary cache */
+       int                     core;   /* physical core number in package */
+       int                     package;/* physical package number */
+       int                     vabits; /* Virtual Address size in bits */
+       int                     pabits; /* Physical Address size in bits */
+       unsigned int            ksave_mask; /* Usable KSave mask. */
+       unsigned int            watch_dreg_count;   /* Number data breakpoints */
+       unsigned int            watch_ireg_count;   /* Number instruction breakpoints */
+       unsigned int            watch_reg_use_cnt; /* min(NUM_WATCH_REGS, watch_dreg_count + watch_ireg_count), Usable by ptrace */
+} __aligned(SMP_CACHE_BYTES);
+
+extern struct cpuinfo_loongarch cpu_data[];
+#define boot_cpu_data cpu_data[0]
+#define current_cpu_data cpu_data[smp_processor_id()]
+#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+
+extern void cpu_probe(void);
+
+extern const char *__cpu_family[];
+extern const char *__cpu_full_name[];
+#define cpu_family_string()    __cpu_family[raw_smp_processor_id()]
+#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
+
+struct seq_file;
+struct notifier_block;
+
+extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
+extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
+
+#define proc_cpuinfo_notifier(fn, pri)                                 \
+({                                                                     \
+       static struct notifier_block fn##_nb = {                        \
+               .notifier_call = fn,                                    \
+               .priority = pri                                         \
+       };                                                              \
+                                                                       \
+       register_proc_cpuinfo_notifier(&fn##_nb);                       \
+})
+
+struct proc_cpuinfo_notifier_args {
+       struct seq_file *m;
+       unsigned long n;
+};
+
+static inline bool cpus_are_siblings(int cpua, int cpub)
+{
+       struct cpuinfo_loongarch *infoa = &cpu_data[cpua];
+       struct cpuinfo_loongarch *infob = &cpu_data[cpub];
+
+       if (infoa->package != infob->package)
+               return false;
+
+       if (infoa->core != infob->core)
+               return false;
+
+       return true;
+}
+
+static inline unsigned long cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo)
+{
+       return cpuinfo->asid_mask;
+}
+
+static inline void set_cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo,
+                                    unsigned long asid_mask)
+{
+       cpuinfo->asid_mask = asid_mask;
+}
+
+#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
new file mode 100644 (file)
index 0000000..754f285
--- /dev/null
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cpu.h: Values of the PRID register used to match up
+ *       various LoongArch CPU types.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+/*
+ * As described in LoongArch specs from Loongson Technology, the PRID register
+ * (CPUCFG.00) has the following layout:
+ *
+ * +---------------+----------------+------------+--------------------+
+ * | Reserved      | Company ID     | Series ID  |  Product ID        |
+ * +---------------+----------------+------------+--------------------+
+ *  31          24 23            16 15        12 11                 0
+ */
+
+/*
+ * Assigned Company values for bits 23:16 of the PRID register.
+ */
+
+#define PRID_COMP_MASK         0xff0000
+
+#define PRID_COMP_LOONGSON     0x140000
+
+/*
+ * Assigned Series ID values for bits 15:12 of the PRID register. In order
+ * to detect a certain CPU type exactly eventually additional registers may
+ * need to be examined.
+ */
+
+#define PRID_SERIES_MASK       0xf000
+
+#define PRID_SERIES_LA132      0x8000  /* Loongson 32bit */
+#define PRID_SERIES_LA264      0xa000  /* Loongson 64bit, 2-issue */
+#define PRID_SERIES_LA364      0xb000  /* Loongson 64bit,3-issue */
+#define PRID_SERIES_LA464      0xc000  /* Loongson 64bit, 4-issue */
+#define PRID_SERIES_LA664      0xd000  /* Loongson 64bit, 6-issue */
+
+/*
+ * Particular Product ID values for bits 11:0 of the PRID register.
+ */
+
+#define PRID_PRODUCT_MASK      0x0fff
+
+#if !defined(__ASSEMBLY__)
+
+enum cpu_type_enum {
+       CPU_UNKNOWN,
+       CPU_LOONGSON32,
+       CPU_LOONGSON64,
+       CPU_LAST
+};
+
+#endif /* !__ASSEMBLY */
+
+/*
+ * ISA Level encodings
+ *
+ */
+
+#define LOONGARCH_CPU_ISA_LA32R 0x00000001
+#define LOONGARCH_CPU_ISA_LA32S 0x00000002
+#define LOONGARCH_CPU_ISA_LA64  0x00000004
+
+#define LOONGARCH_CPU_ISA_32BIT (LOONGARCH_CPU_ISA_LA32R | LOONGARCH_CPU_ISA_LA32S)
+#define LOONGARCH_CPU_ISA_64BIT LOONGARCH_CPU_ISA_LA64
+
+/*
+ * CPU Option encodings
+ */
+#define CPU_FEATURE_CPUCFG             0       /* CPU has CPUCFG */
+#define CPU_FEATURE_LAM                        1       /* CPU has Atomic instructions */
+#define CPU_FEATURE_UAL                        2       /* CPU supports unaligned access */
+#define CPU_FEATURE_FPU                        3       /* CPU has FPU */
+#define CPU_FEATURE_LSX                        4       /* CPU has LSX (128-bit SIMD) */
+#define CPU_FEATURE_LASX               5       /* CPU has LASX (256-bit SIMD) */
+#define CPU_FEATURE_COMPLEX            6       /* CPU has Complex instructions */
+#define CPU_FEATURE_CRYPTO             7       /* CPU has Crypto instructions */
+#define CPU_FEATURE_LVZ                        8       /* CPU has Virtualization extension */
+#define CPU_FEATURE_LBT_X86            9       /* CPU has X86 Binary Translation */
+#define CPU_FEATURE_LBT_ARM            10      /* CPU has ARM Binary Translation */
+#define CPU_FEATURE_LBT_MIPS           11      /* CPU has MIPS Binary Translation */
+#define CPU_FEATURE_TLB                        12      /* CPU has TLB */
+#define CPU_FEATURE_CSR                        13      /* CPU has CSR */
+#define CPU_FEATURE_WATCH              14      /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT               15      /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI             16      /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI             17      /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH           18      /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP                        19      /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ          20      /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE           21      /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE          22      /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID            23      /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR         24      /* CPU has hypervisor (running in VM) */
+
+#define LOONGARCH_CPU_CPUCFG           BIT_ULL(CPU_FEATURE_CPUCFG)
+#define LOONGARCH_CPU_LAM              BIT_ULL(CPU_FEATURE_LAM)
+#define LOONGARCH_CPU_UAL              BIT_ULL(CPU_FEATURE_UAL)
+#define LOONGARCH_CPU_FPU              BIT_ULL(CPU_FEATURE_FPU)
+#define LOONGARCH_CPU_LSX              BIT_ULL(CPU_FEATURE_LSX)
+#define LOONGARCH_CPU_LASX             BIT_ULL(CPU_FEATURE_LASX)
+#define LOONGARCH_CPU_COMPLEX          BIT_ULL(CPU_FEATURE_COMPLEX)
+#define LOONGARCH_CPU_CRYPTO           BIT_ULL(CPU_FEATURE_CRYPTO)
+#define LOONGARCH_CPU_LVZ              BIT_ULL(CPU_FEATURE_LVZ)
+#define LOONGARCH_CPU_LBT_X86          BIT_ULL(CPU_FEATURE_LBT_X86)
+#define LOONGARCH_CPU_LBT_ARM          BIT_ULL(CPU_FEATURE_LBT_ARM)
+#define LOONGARCH_CPU_LBT_MIPS         BIT_ULL(CPU_FEATURE_LBT_MIPS)
+#define LOONGARCH_CPU_TLB              BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_CSR              BIT_ULL(CPU_FEATURE_CSR)
+#define LOONGARCH_CPU_WATCH            BIT_ULL(CPU_FEATURE_WATCH)
+#define LOONGARCH_CPU_VINT             BIT_ULL(CPU_FEATURE_VINT)
+#define LOONGARCH_CPU_CSRIPI           BIT_ULL(CPU_FEATURE_CSRIPI)
+#define LOONGARCH_CPU_EXTIOI           BIT_ULL(CPU_FEATURE_EXTIOI)
+#define LOONGARCH_CPU_PREFETCH         BIT_ULL(CPU_FEATURE_PREFETCH)
+#define LOONGARCH_CPU_PMP              BIT_ULL(CPU_FEATURE_PMP)
+#define LOONGARCH_CPU_SCALEFREQ                BIT_ULL(CPU_FEATURE_SCALEFREQ)
+#define LOONGARCH_CPU_FLATMODE         BIT_ULL(CPU_FEATURE_FLATMODE)
+#define LOONGARCH_CPU_EIODECODE                BIT_ULL(CPU_FEATURE_EIODECODE)
+#define LOONGARCH_CPU_GUESTID          BIT_ULL(CPU_FEATURE_GUESTID)
+#define LOONGARCH_CPU_HYPERVISOR       BIT_ULL(CPU_FEATURE_HYPERVISOR)
+
+#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/cpufeature.h b/arch/loongarch/include/asm/cpufeature.h
new file mode 100644 (file)
index 0000000..4da22a8
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see uapi/asm/hwcap.h for LoongArch CPU features.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <uapi/asm/hwcap.h>
+#include <asm/elf.h>
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+
+#define cpu_feature(x)         ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+       return elf_hwcap & (1UL << num);
+}
+
+#endif /* __ASM_CPUFEATURE_H */
diff --git a/arch/loongarch/include/asm/delay.h b/arch/loongarch/include/asm/delay.h
new file mode 100644 (file)
index 0000000..36d7751
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_DELAY_H
+#define _ASM_DELAY_H
+
+#include <linux/param.h>
+
+extern void __delay(unsigned long cycles);
+extern void __ndelay(unsigned long ns);
+extern void __udelay(unsigned long us);
+
+#define ndelay(ns) __ndelay(ns)
+#define udelay(us) __udelay(us)
+
+/* make sure "usecs *= ..." in udelay do not overflow. */
+#if HZ >= 1000
+#define MAX_UDELAY_MS  1
+#elif HZ <= 200
+#define MAX_UDELAY_MS  5
+#else
+#define MAX_UDELAY_MS  (1000 / HZ)
+#endif
+
+#endif /* _ASM_DELAY_H */
diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
new file mode 100644 (file)
index 0000000..75ccd80
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _LOONGARCH_DMA_DIRECT_H
+#define _LOONGARCH_DMA_DIRECT_H
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _LOONGARCH_DMA_DIRECT_H */
diff --git a/arch/loongarch/include/asm/dmi.h b/arch/loongarch/include/asm/dmi.h
new file mode 100644 (file)
index 0000000..6054934
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#define dmi_early_remap(x, l)  dmi_remap(x, l)
+#define dmi_early_unmap(x, l)  dmi_unmap(x)
+#define dmi_alloc(l)           memblock_alloc(l, PAGE_SIZE)
+
+static inline void *dmi_remap(u64 phys_addr, unsigned long size)
+{
+       return ((void *)TO_CACHE(phys_addr));
+}
+
+static inline void dmi_unmap(void *addr)
+{
+}
+
+#endif /* _ASM_DMI_H */
diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h
new file mode 100644 (file)
index 0000000..0127d84
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_EFI_H
+#define _ASM_LOONGARCH_EFI_H
+
+#include <linux/efi.h>
+
+void __init efi_init(void);
+void __init efi_runtime_init(void);
+void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+#define ARCH_EFI_IRQ_FLAGS_MASK  0x00000004  /* Bit 2: CSR.CRMD.IE */
+
+#define arch_efi_call_virt_setup()               \
+({                                               \
+})
+
+#define arch_efi_call_virt(p, f, args...)        \
+({                                               \
+       efi_##f##_t * __f;                       \
+       __f = p->f;                              \
+       __f(args);                               \
+})
+
+#define arch_efi_call_virt_teardown()            \
+({                                               \
+})
+
+#define EFI_ALLOC_ALIGN                SZ_64K
+
+struct screen_info *alloc_screen_info(void);
+void free_screen_info(struct screen_info *si);
+
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+       return ULONG_MAX;
+}
+
+#endif /* _ASM_LOONGARCH_EFI_H */
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
new file mode 100644 (file)
index 0000000..f3960b1
--- /dev/null
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_ELF_H
+#define _ASM_ELF_H
+
+#include <linux/auxvec.h>
+#include <linux/fs.h>
+#include <uapi/linux/elf.h>
+
+#include <asm/current.h>
+#include <asm/vdso.h>
+
+/* The ABI of a file. */
+#define EF_LOONGARCH_ABI_LP64_SOFT_FLOAT       0x1
+#define EF_LOONGARCH_ABI_LP64_SINGLE_FLOAT     0x2
+#define EF_LOONGARCH_ABI_LP64_DOUBLE_FLOAT     0x3
+
+#define EF_LOONGARCH_ABI_ILP32_SOFT_FLOAT      0x5
+#define EF_LOONGARCH_ABI_ILP32_SINGLE_FLOAT    0x6
+#define EF_LOONGARCH_ABI_ILP32_DOUBLE_FLOAT    0x7
+
+/* LoongArch relocation types used by the dynamic linker */
+#define R_LARCH_NONE                           0
+#define R_LARCH_32                             1
+#define R_LARCH_64                             2
+#define R_LARCH_RELATIVE                       3
+#define R_LARCH_COPY                           4
+#define R_LARCH_JUMP_SLOT                      5
+#define R_LARCH_TLS_DTPMOD32                   6
+#define R_LARCH_TLS_DTPMOD64                   7
+#define R_LARCH_TLS_DTPREL32                   8
+#define R_LARCH_TLS_DTPREL64                   9
+#define R_LARCH_TLS_TPREL32                    10
+#define R_LARCH_TLS_TPREL64                    11
+#define R_LARCH_IRELATIVE                      12
+#define R_LARCH_MARK_LA                                20
+#define R_LARCH_MARK_PCREL                     21
+#define R_LARCH_SOP_PUSH_PCREL                 22
+#define R_LARCH_SOP_PUSH_ABSOLUTE              23
+#define R_LARCH_SOP_PUSH_DUP                   24
+#define R_LARCH_SOP_PUSH_GPREL                 25
+#define R_LARCH_SOP_PUSH_TLS_TPREL             26
+#define R_LARCH_SOP_PUSH_TLS_GOT               27
+#define R_LARCH_SOP_PUSH_TLS_GD                        28
+#define R_LARCH_SOP_PUSH_PLT_PCREL             29
+#define R_LARCH_SOP_ASSERT                     30
+#define R_LARCH_SOP_NOT                                31
+#define R_LARCH_SOP_SUB                                32
+#define R_LARCH_SOP_SL                         33
+#define R_LARCH_SOP_SR                         34
+#define R_LARCH_SOP_ADD                                35
+#define R_LARCH_SOP_AND                                36
+#define R_LARCH_SOP_IF_ELSE                    37
+#define R_LARCH_SOP_POP_32_S_10_5              38
+#define R_LARCH_SOP_POP_32_U_10_12             39
+#define R_LARCH_SOP_POP_32_S_10_12             40
+#define R_LARCH_SOP_POP_32_S_10_16             41
+#define R_LARCH_SOP_POP_32_S_10_16_S2          42
+#define R_LARCH_SOP_POP_32_S_5_20              43
+#define R_LARCH_SOP_POP_32_S_0_5_10_16_S2      44
+#define R_LARCH_SOP_POP_32_S_0_10_10_16_S2     45
+#define R_LARCH_SOP_POP_32_U                   46
+#define R_LARCH_ADD8                           47
+#define R_LARCH_ADD16                          48
+#define R_LARCH_ADD24                          49
+#define R_LARCH_ADD32                          50
+#define R_LARCH_ADD64                          51
+#define R_LARCH_SUB8                           52
+#define R_LARCH_SUB16                          53
+#define R_LARCH_SUB24                          54
+#define R_LARCH_SUB32                          55
+#define R_LARCH_SUB64                          56
+#define R_LARCH_GNU_VTINHERIT                  57
+#define R_LARCH_GNU_VTENTRY                    58
+
+#ifndef ELF_ARCH
+
+/* ELF register definitions */
+
+/*
+ * General purpose have the following registers:
+ *     Register        Number
+ *     GPRs            32
+ *     ORIG_A0         1
+ *     ERA             1
+ *     BADVADDR        1
+ *     CRMD            1
+ *     PRMD            1
+ *     EUEN            1
+ *     ECFG            1
+ *     ESTAT           1
+ *     Reserved        5
+ */
+#define ELF_NGREG      45
+
+/*
+ * Floating point have the following registers:
+ *     Register        Number
+ *     FPR             32
+ *     FCC             1
+ *     FCSR            1
+ */
+#define ELF_NFPREG     34
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
+
+#ifdef CONFIG_32BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elf32_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS      ELFCLASS32
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+       loongarch_dump_regs32((u32 *)&(dest), (regs));
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elf64_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS      ELFCLASS64
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+       loongarch_dump_regs64((u64 *)&(dest), (regs));
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_DATA       ELFDATA2LSB
+#define ELF_ARCH       EM_LOONGARCH
+
+#endif /* !defined(ELF_ARCH) */
+
+#define loongarch_elf_check_machine(x) ((x)->e_machine == EM_LOONGARCH)
+
+#define vmcore_elf32_check_arch loongarch_elf_check_machine
+#define vmcore_elf64_check_arch loongarch_elf_check_machine
+
+/*
+ * Return non-zero if HDR identifies an 32bit ELF binary.
+ */
+#define elf32_check_arch(hdr)                                          \
+({                                                                     \
+       int __res = 1;                                                  \
+       struct elfhdr *__h = (hdr);                                     \
+                                                                       \
+       if (!loongarch_elf_check_machine(__h))                          \
+               __res = 0;                                              \
+       if (__h->e_ident[EI_CLASS] != ELFCLASS32)                       \
+               __res = 0;                                              \
+                                                                       \
+       __res;                                                          \
+})
+
+/*
+ * Return non-zero if HDR identifies an 64bit ELF binary.
+ */
+#define elf64_check_arch(hdr)                                          \
+({                                                                     \
+       int __res = 1;                                                  \
+       struct elfhdr *__h = (hdr);                                     \
+                                                                       \
+       if (!loongarch_elf_check_machine(__h))                          \
+               __res = 0;                                              \
+       if (__h->e_ident[EI_CLASS] != ELFCLASS64)                       \
+               __res = 0;                                              \
+                                                                       \
+       __res;                                                          \
+})
+
+#ifdef CONFIG_32BIT
+
+#define SET_PERSONALITY2(ex, state)                                    \
+do {                                                                   \
+       current->thread.vdso = &vdso_info;                              \
+                                                                       \
+       loongarch_set_personality_fcsr(state);                          \
+                                                                       \
+       if (personality(current->personality) != PER_LINUX)             \
+               set_personality(PER_LINUX);                             \
+} while (0)
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+
+#define SET_PERSONALITY2(ex, state)                                    \
+do {                                                                   \
+       unsigned int p;                                                 \
+                                                                       \
+       clear_thread_flag(TIF_32BIT_REGS);                              \
+       clear_thread_flag(TIF_32BIT_ADDR);                              \
+                                                                       \
+       current->thread.vdso = &vdso_info;                              \
+       loongarch_set_personality_fcsr(state);                          \
+                                                                       \
+       p = personality(current->personality);                          \
+       if (p != PER_LINUX32 && p != PER_LINUX)                         \
+               set_personality(PER_LINUX);                             \
+} while (0)
+
+#endif /* CONFIG_64BIT */
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE      PAGE_SIZE
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports. This could be done in userspace,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP      (elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization.         This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+
+#define ELF_PLATFORM  __elf_platform
+extern const char *__elf_platform;
+
+#define ELF_PLAT_INIT(_r, load_addr)   do { \
+       _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0;      \
+       _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0;      \
+       _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0;   \
+       _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0;  \
+       _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0;  \
+       _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0;  \
+       _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0;  \
+       _r->regs[29] = _r->regs[30] = _r->regs[31] = 0;                 \
+} while (0)
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+#define ARCH_DLINFO                                                    \
+do {                                                                   \
+       NEW_AUX_ENT(AT_SYSINFO_EHDR,                                    \
+                   (unsigned long)current->mm->context.vdso);          \
+} while (0)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+                                      int uses_interp);
+
+struct arch_elf_state {
+       int fp_abi;
+       int interp_fp_abi;
+};
+
+#define LOONGARCH_ABI_FP_ANY   (0)
+
+#define INIT_ARCH_ELF_STATE {                  \
+       .fp_abi = LOONGARCH_ABI_FP_ANY,         \
+       .interp_fp_abi = LOONGARCH_ABI_FP_ANY,  \
+}
+
+#define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
+
+extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+                           bool is_interp, struct arch_elf_state *state);
+
+extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
+                         struct arch_elf_state *state);
+
+extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
+
+#endif /* _ASM_ELF_H */
diff --git a/arch/loongarch/include/asm/entry-common.h b/arch/loongarch/include/asm/entry-common.h
new file mode 100644 (file)
index 0000000..0fe2a09
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H
+#define ARCH_LOONGARCH_ENTRY_COMMON_H
+
+#include <linux/sched.h>
+#include <linux/processor.h>
+
+static inline bool on_thread_stack(void)
+{
+       return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
+#endif
diff --git a/arch/loongarch/include/asm/exec.h b/arch/loongarch/include/asm/exec.h
new file mode 100644 (file)
index 0000000..ba02208
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_EXEC_H
+#define _ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_EXEC_H */
diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/fb.h
new file mode 100644 (file)
index 0000000..3116bde
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+                               unsigned long off)
+{
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+       return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/loongarch/include/asm/fixmap.h b/arch/loongarch/include/asm/fixmap.h
new file mode 100644 (file)
index 0000000..b3541df
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#define NR_FIX_BTMAPS 64
+
+#endif
diff --git a/arch/loongarch/include/asm/fpregdef.h b/arch/loongarch/include/asm/fpregdef.h
new file mode 100644 (file)
index 0000000..adb16e4
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the FPU register names
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FPREGDEF_H
+#define _ASM_FPREGDEF_H
+
+#define fa0    $f0     /* argument registers, fa0/fa1 reused as fv0/fv1 for return value */
+#define fa1    $f1
+#define fa2    $f2
+#define fa3    $f3
+#define fa4    $f4
+#define fa5    $f5
+#define fa6    $f6
+#define fa7    $f7
+#define ft0    $f8     /* caller saved */
+#define ft1    $f9
+#define ft2    $f10
+#define ft3    $f11
+#define ft4    $f12
+#define ft5    $f13
+#define ft6    $f14
+#define ft7    $f15
+#define ft8    $f16
+#define ft9    $f17
+#define ft10   $f18
+#define ft11   $f19
+#define ft12   $f20
+#define ft13   $f21
+#define ft14   $f22
+#define ft15   $f23
+#define fs0    $f24    /* callee saved */
+#define fs1    $f25
+#define fs2    $f26
+#define fs3    $f27
+#define fs4    $f28
+#define fs5    $f29
+#define fs6    $f30
+#define fs7    $f31
+
+/*
+ * Current binutils expects *GPRs* at FCSR position for the FCSR
+ * operation instructions, so define aliases for those used.
+ */
+#define fcsr0  $r0
+#define fcsr1  $r1
+#define fcsr2  $r2
+#define fcsr3  $r3
+#define vcsr16 $r16
+
+#endif /* _ASM_FPREGDEF_H */
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
new file mode 100644 (file)
index 0000000..358b254
--- /dev/null
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FPU_H
+#define _ASM_FPU_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
+#include <linux/thread_info.h>
+#include <linux/bitops.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/current.h>
+#include <asm/loongarch.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+struct sigcontext;
+
+extern void _init_fpu(unsigned int);
+extern void _save_fp(struct loongarch_fpu *);
+extern void _restore_fp(struct loongarch_fpu *);
+
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcsr_x(unsigned long fcsr)
+{
+       return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
+                       (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
+}
+
+static inline int is_fp_enabled(void)
+{
+       return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
+               1 : 0;
+}
+
+#define enable_fpu()           set_csr_euen(CSR_EUEN_FPEN)
+
+#define disable_fpu()          clear_csr_euen(CSR_EUEN_FPEN)
+
+#define clear_fpu_owner()      clear_thread_flag(TIF_USEDFPU)
+
+static inline int is_fpu_owner(void)
+{
+       return test_thread_flag(TIF_USEDFPU);
+}
+
+static inline void __own_fpu(void)
+{
+       enable_fpu();
+       set_thread_flag(TIF_USEDFPU);
+       KSTK_EUEN(current) |= CSR_EUEN_FPEN;
+}
+
+static inline void own_fpu_inatomic(int restore)
+{
+       if (cpu_has_fpu && !is_fpu_owner()) {
+               __own_fpu();
+               if (restore)
+                       _restore_fp(&current->thread.fpu);
+       }
+}
+
+static inline void own_fpu(int restore)
+{
+       preempt_disable();
+       own_fpu_inatomic(restore);
+       preempt_enable();
+}
+
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
+{
+       if (is_fpu_owner()) {
+               if (save)
+                       _save_fp(&tsk->thread.fpu);
+               disable_fpu();
+               clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+       }
+       KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
+}
+
+static inline void lose_fpu(int save)
+{
+       preempt_disable();
+       lose_fpu_inatomic(save, current);
+       preempt_enable();
+}
+
+static inline void init_fpu(void)
+{
+       unsigned int fcsr = current->thread.fpu.fcsr;
+
+       __own_fpu();
+       _init_fpu(fcsr);
+       set_used_math();
+}
+
+static inline void save_fp(struct task_struct *tsk)
+{
+       if (cpu_has_fpu)
+               _save_fp(&tsk->thread.fpu);
+}
+
+static inline void restore_fp(struct task_struct *tsk)
+{
+       if (cpu_has_fpu)
+               _restore_fp(&tsk->thread.fpu);
+}
+
+static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
+{
+       if (tsk == current) {
+               preempt_disable();
+               if (is_fpu_owner())
+                       _save_fp(&current->thread.fpu);
+               preempt_enable();
+       }
+
+       return tsk->thread.fpu.fpr;
+}
+
+#endif /* _ASM_FPU_H */
diff --git a/arch/loongarch/include/asm/futex.h b/arch/loongarch/include/asm/futex.h
new file mode 100644 (file)
index 0000000..9de8231
--- /dev/null
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/barrier.h>
+#include <asm/compiler.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
+{                                                                      \
+       __asm__ __volatile__(                                           \
+       "1:     ll.w    %1, %4 # __futex_atomic_op\n"           \
+       "       " insn  "                               \n"     \
+       "2:     sc.w    $t0, %2                         \n"     \
+       "       beq     $t0, $zero, 1b                  \n"     \
+       "3:                                             \n"     \
+       "       .section .fixup,\"ax\"                  \n"     \
+       "4:     li.w    %0, %6                          \n"     \
+       "       b       3b                              \n"     \
+       "       .previous                               \n"     \
+       "       .section __ex_table,\"a\"               \n"     \
+       "       "__UA_ADDR "\t1b, 4b                    \n"     \
+       "       "__UA_ADDR "\t2b, 4b                    \n"     \
+       "       .previous                               \n"     \
+       : "=r" (ret), "=&r" (oldval),                           \
+         "=ZC" (*uaddr)                                        \
+       : "0" (0), "ZC" (*uaddr), "Jr" (oparg),                 \
+         "i" (-EFAULT)                                         \
+       : "memory", "t0");                                      \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+       int oldval = 0, ret = 0;
+
+       pagefault_disable();
+
+       switch (op) {
+       case FUTEX_OP_SET:
+               __futex_atomic_op("move $t0, %z5", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+               __futex_atomic_op("add.w $t0, %1, %z5", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+               __futex_atomic_op("or   $t0, %1, %z5", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+               __futex_atomic_op("and  $t0, %1, %z5", ret, oldval, uaddr, ~oparg);
+               break;
+       case FUTEX_OP_XOR:
+               __futex_atomic_op("xor  $t0, %1, %z5", ret, oldval, uaddr, oparg);
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       pagefault_enable();
+
+       if (!ret)
+               *oval = oldval;
+
+       return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
+{
+       int ret = 0;
+       u32 val = 0;
+
+       if (!access_ok(uaddr, sizeof(u32)))
+               return -EFAULT;
+
+       __asm__ __volatile__(
+       "# futex_atomic_cmpxchg_inatomic                        \n"
+       "1:     ll.w    %1, %3                                  \n"
+       "       bne     %1, %z4, 3f                             \n"
+       "       or      $t0, %z5, $zero                         \n"
+       "2:     sc.w    $t0, %2                                 \n"
+       "       beq     $zero, $t0, 1b                          \n"
+       "3:                                                     \n"
+       __WEAK_LLSC_MB
+       "       .section .fixup,\"ax\"                          \n"
+       "4:     li.d    %0, %6                                  \n"
+       "       b       3b                                      \n"
+       "       .previous                                       \n"
+       "       .section __ex_table,\"a\"                       \n"
+       "       "__UA_ADDR "\t1b, 4b                            \n"
+       "       "__UA_ADDR "\t2b, 4b                            \n"
+       "       .previous                                       \n"
+       : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+       : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+         "i" (-EFAULT)
+       : "memory", "t0");
+
+       *uval = val;
+
+       return ret;
+}
+
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h
new file mode 100644 (file)
index 0000000..befe818
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_HARDIRQ_H
+#define _ASM_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+extern void ack_bad_irq(unsigned int irq);
+#define ack_bad_irq ack_bad_irq
+
+#define NR_IPI 2
+
+typedef struct {
+       unsigned int ipi_irqs[NR_IPI];
+       unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+
+#endif /* _ASM_HARDIRQ_H */
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
new file mode 100644 (file)
index 0000000..aa44b3f
--- /dev/null
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_HUGETLB_H
+#define __ASM_HUGETLB_H
+
+#include <asm/page.h>
+
+uint64_t pmd_to_entrylo(unsigned long pmd_val);
+
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+                                        unsigned long addr,
+                                        unsigned long len)
+{
+       unsigned long task_size = STACK_TOP;
+       struct hstate *h = hstate_file(file);
+
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (addr & ~huge_page_mask(h))
+               return -EINVAL;
+       if (len > task_size)
+               return -ENOMEM;
+       if (task_size - len < addr)
+               return -EINVAL;
+       return 0;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       pte_t clear;
+       pte_t pte = *ptep;
+
+       pte_val(clear) = (unsigned long)invalid_pte_table;
+       set_pte_at(mm, addr, ptep, clear);
+       return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                         unsigned long addr, pte_t *ptep)
+{
+       pte_t pte;
+
+       pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+       flush_tlb_page(vma, addr);
+       return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTE_NONE
+static inline int huge_pte_none(pte_t pte)
+{
+       unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
+       return !val || (val == (unsigned long)invalid_pte_table);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr,
+                                            pte_t *ptep, pte_t pte,
+                                            int dirty)
+{
+       int changed = !pte_same(*ptep, pte);
+
+       if (changed) {
+               set_pte_at(vma->vm_mm, addr, ptep, pte);
+               /*
+                * There could be some standard sized pages in there,
+                * get them all.
+                */
+               flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+       }
+       return changed;
+}
+
+#include <asm-generic/hugetlb.h>
+
+#endif /* __ASM_HUGETLB_H */
diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
new file mode 100644 (file)
index 0000000..af4f4e8
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_HW_IRQ_H
+#define __ASM_HW_IRQ_H
+
+#include <linux/atomic.h>
+
+extern atomic_t irq_err_count;
+
+/*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+ * machines, we'll see ...
+ */
+
+#endif /* __ASM_HW_IRQ_H */
diff --git a/arch/loongarch/include/asm/idle.h b/arch/loongarch/include/asm/idle.h
new file mode 100644 (file)
index 0000000..f7f2b7d
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IDLE_H
+#define __ASM_IDLE_H
+
+#include <linux/linkage.h>
+
+extern asmlinkage void __arch_cpu_idle(void);
+
+#endif /* __ASM_IDLE_H  */
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
new file mode 100644 (file)
index 0000000..575d1bb
--- /dev/null
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_INST_H
+#define _ASM_INST_H
+
+#include <linux/types.h>
+#include <asm/asm.h>
+
+#define ADDR_IMMMASK_LU52ID    0xFFF0000000000000
+#define ADDR_IMMMASK_LU32ID    0x000FFFFF00000000
+#define ADDR_IMMMASK_ADDU16ID  0x00000000FFFF0000
+
+#define ADDR_IMMSHIFT_LU52ID   52
+#define ADDR_IMMSHIFT_LU32ID   32
+#define ADDR_IMMSHIFT_ADDU16ID 16
+
+#define ADDR_IMM(addr, INSN)   ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
+
+enum reg1i20_op {
+       lu12iw_op       = 0x0a,
+       lu32id_op       = 0x0b,
+};
+
+enum reg2i12_op {
+       lu52id_op       = 0x0c,
+};
+
+enum reg2i16_op {
+       jirl_op         = 0x13,
+};
+
+struct reg0i26_format {
+       unsigned int immediate_h : 10;
+       unsigned int immediate_l : 16;
+       unsigned int opcode : 6;
+};
+
+struct reg1i20_format {
+       unsigned int rd : 5;
+       unsigned int immediate : 20;
+       unsigned int opcode : 7;
+};
+
+struct reg1i21_format {
+       unsigned int immediate_h  : 5;
+       unsigned int rj : 5;
+       unsigned int immediate_l : 16;
+       unsigned int opcode : 6;
+};
+
+struct reg2i12_format {
+       unsigned int rd : 5;
+       unsigned int rj : 5;
+       unsigned int immediate : 12;
+       unsigned int opcode : 10;
+};
+
+struct reg2i16_format {
+       unsigned int rd : 5;
+       unsigned int rj : 5;
+       unsigned int immediate : 16;
+       unsigned int opcode : 6;
+};
+
+union loongarch_instruction {
+       unsigned int word;
+       struct reg0i26_format reg0i26_format;
+       struct reg1i20_format reg1i20_format;
+       struct reg1i21_format reg1i21_format;
+       struct reg2i12_format reg2i12_format;
+       struct reg2i16_format reg2i16_format;
+};
+
+#define LOONGARCH_INSN_SIZE    sizeof(union loongarch_instruction)
+
+enum loongarch_gpr {
+       LOONGARCH_GPR_ZERO = 0,
+       LOONGARCH_GPR_RA = 1,
+       LOONGARCH_GPR_TP = 2,
+       LOONGARCH_GPR_SP = 3,
+       LOONGARCH_GPR_A0 = 4,   /* Reused as V0 for return value */
+       LOONGARCH_GPR_A1,       /* Reused as V1 for return value */
+       LOONGARCH_GPR_A2,
+       LOONGARCH_GPR_A3,
+       LOONGARCH_GPR_A4,
+       LOONGARCH_GPR_A5,
+       LOONGARCH_GPR_A6,
+       LOONGARCH_GPR_A7,
+       LOONGARCH_GPR_T0 = 12,
+       LOONGARCH_GPR_T1,
+       LOONGARCH_GPR_T2,
+       LOONGARCH_GPR_T3,
+       LOONGARCH_GPR_T4,
+       LOONGARCH_GPR_T5,
+       LOONGARCH_GPR_T6,
+       LOONGARCH_GPR_T7,
+       LOONGARCH_GPR_T8,
+       LOONGARCH_GPR_FP = 22,
+       LOONGARCH_GPR_S0 = 23,
+       LOONGARCH_GPR_S1,
+       LOONGARCH_GPR_S2,
+       LOONGARCH_GPR_S3,
+       LOONGARCH_GPR_S4,
+       LOONGARCH_GPR_S5,
+       LOONGARCH_GPR_S6,
+       LOONGARCH_GPR_S7,
+       LOONGARCH_GPR_S8,
+       LOONGARCH_GPR_MAX
+};
+
+u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
+u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
+u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
+
+#endif /* _ASM_INST_H */
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
new file mode 100644 (file)
index 0000000..8845997
--- /dev/null
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#define ARCH_HAS_IOREMAP_WC
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/bug.h>
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/page.h>
+#include <asm/pgtable-bits.h>
+#include <asm/string.h>
+
+/*
+ * On LoongArch, I/O ports mappring is following:
+ *
+ *              |         ....          |
+ *              |-----------------------|
+ *              | pci io ports(64K~32M) |
+ *              |-----------------------|
+ *              | isa io ports(0  ~16K) |
+ * PCI_IOBASE ->|-----------------------|
+ *              |         ....          |
+ */
+#define PCI_IOBASE     ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
+#define PCI_IOSIZE     SZ_32M
+#define ISA_IOSIZE     SZ_16K
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page)     ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
+extern void __init early_iounmap(void __iomem *addr, unsigned long size);
+
+#define early_memremap early_ioremap
+#define early_memunmap early_iounmap
+
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+                                        unsigned long prot_val)
+{
+       if (prot_val == _CACHE_CC)
+               return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
+       else
+               return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
+}
+
+/*
+ * ioremap -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+#define ioremap(offset, size)                                  \
+       ioremap_prot((offset), (size), _CACHE_SUC)
+
+/*
+ * ioremap_wc - map bus memory into CPU space
+ * @offset:     bus address of the memory
+ * @size:       size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support WUC, the method shall fall-back to the
+ * _CACHE_SUC option (see cpu_probe() method).
+ */
+#define ioremap_wc(offset, size)                               \
+       ioremap_prot((offset), (size), _CACHE_WUC)
+
+/*
+ * ioremap_cache -  map bus memory into CPU space
+ * @offset:        bus address of the memory
+ * @size:          size of the resource to map
+ *
+ * ioremap_cache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked cachable by
+ * the CPU.  Also enables full write-combining.         Useful for some
+ * memory-like regions on I/O busses.
+ */
+#define ioremap_cache(offset, size)                            \
+       ioremap_prot((offset), (size), _CACHE_CC)
+
+static inline void iounmap(const volatile void __iomem *addr)
+{
+}
+
+#define mmiowb() asm volatile ("dbar 0" ::: "memory")
+
+/*
+ * String version of I/O memory access operations.
+ */
+extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
+extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
+extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
+#define memset_io(c, v, l)     __memset_io((c), (v), (l))
+#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
+#define memcpy_toio(c, a, l)   __memcpy_toio((c), (a), (l))
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_IO_H */
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
new file mode 100644 (file)
index 0000000..ace3ea6
--- /dev/null
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
+
+#define IRQ_STACK_SIZE                 THREAD_SIZE
+#define IRQ_STACK_START                        (IRQ_STACK_SIZE - 16)
+
+DECLARE_PER_CPU(unsigned long, irq_stack);
+
+/*
+ * The highest address on the IRQ stack contains a dummy frame which is
+ * structured as follows:
+ *
+ *   top ------------
+ *       | task sp  | <- irq_stack[cpu] + IRQ_STACK_START
+ *       ------------
+ *       |          | <- First frame of IRQ context
+ *       ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
+static inline bool on_irq_stack(int cpu, unsigned long sp)
+{
+       unsigned long low = per_cpu(irq_stack, cpu);
+       unsigned long high = low + IRQ_STACK_SIZE;
+
+       return (low <= sp && sp <= high);
+}
+
+int get_ipi_irq(void);
+int get_pmc_irq(void);
+int get_timer_irq(void);
+void spurious_interrupt(void);
+
+#define NR_IRQS_LEGACY 16
+
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self);
+
+#define MAX_IO_PICS 2
+#define NR_IRQS        (64 + (256 * MAX_IO_PICS))
+
+#define CORES_PER_EIO_NODE     4
+
+#define LOONGSON_CPU_UART0_VEC         10 /* CPU UART0 */
+#define LOONGSON_CPU_THSENS_VEC                14 /* CPU Thsens */
+#define LOONGSON_CPU_HT0_VEC           16 /* CPU HT0 irq vector base number */
+#define LOONGSON_CPU_HT1_VEC           24 /* CPU HT1 irq vector base number */
+
+/* IRQ number definitions */
+#define LOONGSON_LPC_IRQ_BASE          0
+#define LOONGSON_LPC_LAST_IRQ          (LOONGSON_LPC_IRQ_BASE + 15)
+
+#define LOONGSON_CPU_IRQ_BASE          16
+#define LOONGSON_CPU_LAST_IRQ          (LOONGSON_CPU_IRQ_BASE + 14)
+
+#define LOONGSON_PCH_IRQ_BASE          64
+#define LOONGSON_PCH_ACPI_IRQ          (LOONGSON_PCH_IRQ_BASE + 47)
+#define LOONGSON_PCH_LAST_IRQ          (LOONGSON_PCH_IRQ_BASE + 64 - 1)
+
+#define LOONGSON_MSI_IRQ_BASE          (LOONGSON_PCH_IRQ_BASE + 64)
+#define LOONGSON_MSI_LAST_IRQ          (LOONGSON_PCH_IRQ_BASE + 256 - 1)
+
+#define GSI_MIN_LPC_IRQ                LOONGSON_LPC_IRQ_BASE
+#define GSI_MAX_LPC_IRQ                (LOONGSON_LPC_IRQ_BASE + 16 - 1)
+#define GSI_MIN_CPU_IRQ                LOONGSON_CPU_IRQ_BASE
+#define GSI_MAX_CPU_IRQ                (LOONGSON_CPU_IRQ_BASE + 48 - 1)
+#define GSI_MIN_PCH_IRQ                LOONGSON_PCH_IRQ_BASE
+#define GSI_MAX_PCH_IRQ                (LOONGSON_PCH_IRQ_BASE + 256 - 1)
+
+extern int find_pch_pic(u32 gsi);
+extern int eiointc_get_node(int id);
+
+static inline void eiointc_enable(void)
+{
+       uint64_t misc;
+
+       misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+       misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
+       iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
+}
+
+struct acpi_madt_lio_pic;
+struct acpi_madt_eio_pic;
+struct acpi_madt_ht_pic;
+struct acpi_madt_bio_pic;
+struct acpi_madt_msi_pic;
+struct acpi_madt_lpc_pic;
+
+struct irq_domain *loongarch_cpu_irq_init(void);
+
+struct irq_domain *liointc_acpi_init(struct irq_domain *parent,
+                                       struct acpi_madt_lio_pic *acpi_liointc);
+struct irq_domain *eiointc_acpi_init(struct irq_domain *parent,
+                                       struct acpi_madt_eio_pic *acpi_eiointc);
+
+struct irq_domain *htvec_acpi_init(struct irq_domain *parent,
+                                       struct acpi_madt_ht_pic *acpi_htvec);
+struct irq_domain *pch_lpc_acpi_init(struct irq_domain *parent,
+                                       struct acpi_madt_lpc_pic *acpi_pchlpc);
+struct irq_domain *pch_msi_acpi_init(struct irq_domain *parent,
+                                       struct acpi_madt_msi_pic *acpi_pchmsi);
+struct irq_domain *pch_pic_acpi_init(struct irq_domain *parent,
+                                       struct acpi_madt_bio_pic *acpi_pchpic);
+
+extern struct acpi_madt_lio_pic *acpi_liointc;
+extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS];
+
+extern struct acpi_madt_ht_pic *acpi_htintc;
+extern struct acpi_madt_lpc_pic *acpi_pchlpc;
+extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS];
+extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS];
+
+extern struct irq_domain *cpu_domain;
+extern struct irq_domain *liointc_domain;
+extern struct irq_domain *pch_lpc_domain;
+extern struct irq_domain *pch_msi_domain[MAX_IO_PICS];
+extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
+
+extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
+
+#include <asm-generic/irq.h>
+
+#endif /* _ASM_IRQ_H */
diff --git a/arch/loongarch/include/asm/irq_regs.h b/arch/loongarch/include/asm/irq_regs.h
new file mode 100644 (file)
index 0000000..3d62d81
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_IRQ_REGS_H
+#define __ASM_IRQ_REGS_H
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+#include <linux/thread_info.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+       return current_thread_info()->regs;
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+       struct pt_regs *old_regs;
+
+       old_regs = get_irq_regs();
+       current_thread_info()->regs = new_regs;
+
+       return old_regs;
+}
+
+#endif /* __ASM_IRQ_REGS_H */
diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h
new file mode 100644 (file)
index 0000000..52121cd
--- /dev/null
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <asm/compiler.h>
+#include <asm/loongarch.h>
+
+static inline void arch_local_irq_enable(void)
+{
+       u32 flags = CSR_CRMD_IE;
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+}
+
+static inline void arch_local_irq_disable(void)
+{
+       u32 flags = 0;
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+       u32 flags = 0;
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+       return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       __asm__ __volatile__(
+               "csrxchg %[val], %[mask], %[reg]\n\t"
+               : [val] "+r" (flags)
+               : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+       u32 flags;
+       __asm__ __volatile__(
+               "csrrd %[val], %[reg]\n\t"
+               : [val] "=r" (flags)
+               : [reg] "i" (LOONGARCH_CSR_CRMD)
+               : "memory");
+       return flags;
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+       return !(flags & CSR_CRMD_IE);
+}
+
+static inline int arch_irqs_disabled(void)
+{
+       return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* #ifndef __ASSEMBLY__ */
+
+#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/loongarch/include/asm/kdebug.h b/arch/loongarch/include/asm/kdebug.h
new file mode 100644 (file)
index 0000000..d721b4b
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_KDEBUG_H
+#define _ASM_LOONGARCH_KDEBUG_H
+
+#include <linux/notifier.h>
+
+enum die_val {
+       DIE_OOPS = 1,
+       DIE_RI,
+       DIE_FP,
+       DIE_SIMD,
+       DIE_TRAP,
+       DIE_PAGE_FAULT,
+       DIE_BREAK,
+       DIE_SSTEPBP,
+       DIE_UPROBE,
+       DIE_UPROBE_XOL,
+};
+
+#endif /* _ASM_LOONGARCH_KDEBUG_H */
diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h
new file mode 100644 (file)
index 0000000..81b0c4c
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN                .align 2
+#define __ALIGN_STR    __stringify(__ALIGN)
+
+#define SYM_FUNC_START(name)                           \
+       SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)      \
+       .cfi_startproc;
+
+#define SYM_FUNC_START_NOALIGN(name)                   \
+       SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)       \
+       .cfi_startproc;
+
+#define SYM_FUNC_START_LOCAL(name)                     \
+       SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)       \
+       .cfi_startproc;
+
+#define SYM_FUNC_START_LOCAL_NOALIGN(name)             \
+       SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)        \
+       .cfi_startproc;
+
+#define SYM_FUNC_START_WEAK(name)                      \
+       SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)        \
+       .cfi_startproc;
+
+#define SYM_FUNC_START_WEAK_NOALIGN(name)              \
+       SYM_START(name, SYM_L_WEAK, SYM_A_NONE)         \
+       .cfi_startproc;
+
+#define SYM_FUNC_END(name)                             \
+       .cfi_endproc;                                   \
+       SYM_END(name, SYM_T_FUNC)
+
+#endif
diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
new file mode 100644 (file)
index 0000000..2052a22
--- /dev/null
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ARCH_LOONGARCH_LOCAL_H
+#define _ARCH_LOONGARCH_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/atomic.h>
+#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
+
+typedef struct {
+       atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i)  { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l)  atomic_long_read(&(l)->a)
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
+
+#define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_inc(l)   atomic_long_inc(&(l)->a)
+#define local_dec(l)   atomic_long_dec(&(l)->a)
+
+/*
+ * Same as above, but return the result value
+ */
+static inline long local_add_return(long i, local_t *l)
+{
+       unsigned long result;
+
+       __asm__ __volatile__(
+       "   " __AMADD " %1, %2, %0      \n"
+       : "+ZB" (l->a.counter), "=&r" (result)
+       : "r" (i)
+       : "memory");
+       result = result + i;
+
+       return result;
+}
+
+static inline long local_sub_return(long i, local_t *l)
+{
+       unsigned long result;
+
+       __asm__ __volatile__(
+       "   " __AMADD "%1, %2, %0       \n"
+       : "+ZB" (l->a.counter), "=&r" (result)
+       : "r" (-i)
+       : "memory");
+
+       result = result - i;
+
+       return result;
+}
+
+#define local_cmpxchg(l, o, n) \
+       ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u)                              \
+({                                                             \
+       long c, old;                                            \
+       c = local_read(l);                                      \
+       while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
+               c = old;                                        \
+       c != (u);                                               \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_dec_return(l) local_sub_return(1, (l))
+#define local_inc_return(l) local_add_return(1, (l))
+
+/*
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+/*
+ * local_dec_and_test - decrement by 1 and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
+
+/*
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define local_add_negative(i, l) (local_add_return(i, (l)) < 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations.  Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l)         ((l)->a.counter++)
+#define __local_dec(l)         ((l)->a.counter++)
+#define __local_add(i, l)      ((l)->a.counter += (i))
+#define __local_sub(i, l)      ((l)->a.counter -= (i))
+
+#endif /* _ARCH_LOONGARCH_LOCAL_H */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
new file mode 100644 (file)
index 0000000..3ba4f7e
--- /dev/null
@@ -0,0 +1,1516 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LOONGARCH_H
+#define _ASM_LOONGARCH_H
+
+#include <linux/bits.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+#include <larchintrin.h>
+
+/*
+ * parse_r var, r - Helper assembler macro for parsing register names.
+ *
+ * This converts the register name in $n form provided in \r to the
+ * corresponding register number, which is assigned to the variable \var. It is
+ * needed to allow explicit encoding of instructions in inline assembly where
+ * registers are chosen by the compiler in $n form, allowing us to avoid using
+ * fixed register numbers.
+ *
+ * It also allows newer instructions (not implemented by the assembler) to be
+ * transparently implemented using assembler macros, instead of needing separate
+ * cases depending on toolchain support.
+ *
+ * Simple usage example:
+ * __asm__ __volatile__("parse_r addr, %0\n\t"
+ *                     "#invtlb op, 0, %0\n\t"
+ *                     ".word ((0x6498000) | (addr << 10) | (0 << 5) | op)"
+ *                     : "=r" (status);
+ */
+
+/* Match an individual register number and assign to \var */
+#define _IFC_REG(n)                            \
+       ".ifc   \\r, $r" #n "\n\t"              \
+       "\\var  = " #n "\n\t"                   \
+       ".endif\n\t"
+
+__asm__(".macro        parse_r var r\n\t"
+       "\\var  = -1\n\t"
+       _IFC_REG(0)  _IFC_REG(1)  _IFC_REG(2)  _IFC_REG(3)
+       _IFC_REG(4)  _IFC_REG(5)  _IFC_REG(6)  _IFC_REG(7)
+       _IFC_REG(8)  _IFC_REG(9)  _IFC_REG(10) _IFC_REG(11)
+       _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15)
+       _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19)
+       _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23)
+       _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27)
+       _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)
+       ".iflt  \\var\n\t"
+       ".error \"Unable to parse register name \\r\"\n\t"
+       ".endif\n\t"
+       ".endm");
+
+#undef _IFC_REG
+
+/* CPUCFG */
+static inline u32 read_cpucfg(u32 reg)
+{
+       return __cpucfg(reg);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+
+/* LoongArch Registers */
+#define REG_ZERO       0x0
+#define REG_RA         0x1
+#define REG_TP         0x2
+#define REG_SP         0x3
+#define REG_A0         0x4 /* Reused as V0 for return value */
+#define REG_A1         0x5 /* Reused as V1 for return value */
+#define REG_A2         0x6
+#define REG_A3         0x7
+#define REG_A4         0x8
+#define REG_A5         0x9
+#define REG_A6         0xa
+#define REG_A7         0xb
+#define REG_T0         0xc
+#define REG_T1         0xd
+#define REG_T2         0xe
+#define REG_T3         0xf
+#define REG_T4         0x10
+#define REG_T5         0x11
+#define REG_T6         0x12
+#define REG_T7         0x13
+#define REG_T8         0x14
+#define REG_U0         0x15 /* Kernel uses it as percpu base */
+#define REG_FP         0x16
+#define REG_S0         0x17
+#define REG_S1         0x18
+#define REG_S2         0x19
+#define REG_S3         0x1a
+#define REG_S4         0x1b
+#define REG_S5         0x1c
+#define REG_S6         0x1d
+#define REG_S7         0x1e
+#define REG_S8         0x1f
+
+#endif /* __ASSEMBLY__ */
+
+/* Bit fields for CPUCFG registers */
+#define LOONGARCH_CPUCFG0              0x0
+#define  CPUCFG0_PRID                  GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG1              0x1
+#define  CPUCFG1_ISGR32                        BIT(0)
+#define  CPUCFG1_ISGR64                        BIT(1)
+#define  CPUCFG1_PAGING                        BIT(2)
+#define  CPUCFG1_IOCSR                 BIT(3)
+#define  CPUCFG1_PABITS                        GENMASK(11, 4)
+#define  CPUCFG1_VABITS                        GENMASK(19, 12)
+#define  CPUCFG1_UAL                   BIT(20)
+#define  CPUCFG1_RI                    BIT(21)
+#define  CPUCFG1_EP                    BIT(22)
+#define  CPUCFG1_RPLV                  BIT(23)
+#define  CPUCFG1_HUGEPG                        BIT(24)
+#define  CPUCFG1_IOCSRBRD              BIT(25)
+#define  CPUCFG1_MSGINT                        BIT(26)
+
+#define LOONGARCH_CPUCFG2              0x2
+#define  CPUCFG2_FP                    BIT(0)
+#define  CPUCFG2_FPSP                  BIT(1)
+#define  CPUCFG2_FPDP                  BIT(2)
+#define  CPUCFG2_FPVERS                        GENMASK(5, 3)
+#define  CPUCFG2_LSX                   BIT(6)
+#define  CPUCFG2_LASX                  BIT(7)
+#define  CPUCFG2_COMPLEX               BIT(8)
+#define  CPUCFG2_CRYPTO                        BIT(9)
+#define  CPUCFG2_LVZP                  BIT(10)
+#define  CPUCFG2_LVZVER                        GENMASK(13, 11)
+#define  CPUCFG2_LLFTP                 BIT(14)
+#define  CPUCFG2_LLFTPREV              GENMASK(17, 15)
+#define  CPUCFG2_X86BT                 BIT(18)
+#define  CPUCFG2_ARMBT                 BIT(19)
+#define  CPUCFG2_MIPSBT                        BIT(20)
+#define  CPUCFG2_LSPW                  BIT(21)
+#define  CPUCFG2_LAM                   BIT(22)
+
+#define LOONGARCH_CPUCFG3              0x3
+#define  CPUCFG3_CCDMA                 BIT(0)
+#define  CPUCFG3_SFB                   BIT(1)
+#define  CPUCFG3_UCACC                 BIT(2)
+#define  CPUCFG3_LLEXC                 BIT(3)
+#define  CPUCFG3_SCDLY                 BIT(4)
+#define  CPUCFG3_LLDBAR                        BIT(5)
+#define  CPUCFG3_ITLBT                 BIT(6)
+#define  CPUCFG3_ICACHET               BIT(7)
+#define  CPUCFG3_SPW_LVL               GENMASK(10, 8)
+#define  CPUCFG3_SPW_HG_HF             BIT(11)
+#define  CPUCFG3_RVA                   BIT(12)
+#define  CPUCFG3_RVAMAX                        GENMASK(16, 13)
+
+#define LOONGARCH_CPUCFG4              0x4
+#define  CPUCFG4_CCFREQ                        GENMASK(31, 0)
+
+#define LOONGARCH_CPUCFG5              0x5
+#define  CPUCFG5_CCMUL                 GENMASK(15, 0)
+#define  CPUCFG5_CCDIV                 GENMASK(31, 16)
+
+#define LOONGARCH_CPUCFG6              0x6
+#define  CPUCFG6_PMP                   BIT(0)
+#define  CPUCFG6_PAMVER                        GENMASK(3, 1)
+#define  CPUCFG6_PMNUM                 GENMASK(7, 4)
+#define  CPUCFG6_PMBITS                        GENMASK(13, 8)
+#define  CPUCFG6_UPM                   BIT(14)
+
+#define LOONGARCH_CPUCFG16             0x10
+#define  CPUCFG16_L1_IUPRE             BIT(0)
+#define  CPUCFG16_L1_IUUNIFY           BIT(1)
+#define  CPUCFG16_L1_DPRE              BIT(2)
+#define  CPUCFG16_L2_IUPRE             BIT(3)
+#define  CPUCFG16_L2_IUUNIFY           BIT(4)
+#define  CPUCFG16_L2_IUPRIV            BIT(5)
+#define  CPUCFG16_L2_IUINCL            BIT(6)
+#define  CPUCFG16_L2_DPRE              BIT(7)
+#define  CPUCFG16_L2_DPRIV             BIT(8)
+#define  CPUCFG16_L2_DINCL             BIT(9)
+#define  CPUCFG16_L3_IUPRE             BIT(10)
+#define  CPUCFG16_L3_IUUNIFY           BIT(11)
+#define  CPUCFG16_L3_IUPRIV            BIT(12)
+#define  CPUCFG16_L3_IUINCL            BIT(13)
+#define  CPUCFG16_L3_DPRE              BIT(14)
+#define  CPUCFG16_L3_DPRIV             BIT(15)
+#define  CPUCFG16_L3_DINCL             BIT(16)
+
+#define LOONGARCH_CPUCFG17             0x11
+#define  CPUCFG17_L1I_WAYS_M           GENMASK(15, 0)
+#define  CPUCFG17_L1I_SETS_M           GENMASK(23, 16)
+#define  CPUCFG17_L1I_SIZE_M           GENMASK(30, 24)
+#define  CPUCFG17_L1I_WAYS             0
+#define  CPUCFG17_L1I_SETS             16
+#define  CPUCFG17_L1I_SIZE             24
+
+#define LOONGARCH_CPUCFG18             0x12
+#define  CPUCFG18_L1D_WAYS_M           GENMASK(15, 0)
+#define  CPUCFG18_L1D_SETS_M           GENMASK(23, 16)
+#define  CPUCFG18_L1D_SIZE_M           GENMASK(30, 24)
+#define  CPUCFG18_L1D_WAYS             0
+#define  CPUCFG18_L1D_SETS             16
+#define  CPUCFG18_L1D_SIZE             24
+
+#define LOONGARCH_CPUCFG19             0x13
+#define  CPUCFG19_L2_WAYS_M            GENMASK(15, 0)
+#define  CPUCFG19_L2_SETS_M            GENMASK(23, 16)
+#define  CPUCFG19_L2_SIZE_M            GENMASK(30, 24)
+#define  CPUCFG19_L2_WAYS              0
+#define  CPUCFG19_L2_SETS              16
+#define  CPUCFG19_L2_SIZE              24
+
+#define LOONGARCH_CPUCFG20             0x14
+#define  CPUCFG20_L3_WAYS_M            GENMASK(15, 0)
+#define  CPUCFG20_L3_SETS_M            GENMASK(23, 16)
+#define  CPUCFG20_L3_SIZE_M            GENMASK(30, 24)
+#define  CPUCFG20_L3_WAYS              0
+#define  CPUCFG20_L3_SETS              16
+#define  CPUCFG20_L3_SIZE              24
+
+#define LOONGARCH_CPUCFG48             0x30
+#define  CPUCFG48_MCSR_LCK             BIT(0)
+#define  CPUCFG48_NAP_EN               BIT(1)
+#define  CPUCFG48_VFPU_CG              BIT(2)
+#define  CPUCFG48_RAM_CG               BIT(3)
+
+#ifndef __ASSEMBLY__
+
+/* CSR */
+static __always_inline u32 csr_read32(u32 reg)
+{
+       return __csrrd_w(reg);
+}
+
+static __always_inline u64 csr_read64(u32 reg)
+{
+       return __csrrd_d(reg);
+}
+
+static __always_inline void csr_write32(u32 val, u32 reg)
+{
+       __csrwr_w(val, reg);
+}
+
+static __always_inline void csr_write64(u64 val, u32 reg)
+{
+       __csrwr_d(val, reg);
+}
+
+static __always_inline u32 csr_xchg32(u32 val, u32 mask, u32 reg)
+{
+       return __csrxchg_w(val, mask, reg);
+}
+
+static __always_inline u64 csr_xchg64(u64 val, u64 mask, u32 reg)
+{
+       return __csrxchg_d(val, mask, reg);
+}
+
+/* IOCSR */
+static __always_inline u32 iocsr_read32(u32 reg)
+{
+       return __iocsrrd_w(reg);
+}
+
+static __always_inline u64 iocsr_read64(u32 reg)
+{
+       return __iocsrrd_d(reg);
+}
+
+static __always_inline void iocsr_write32(u32 val, u32 reg)
+{
+       __iocsrwr_w(val, reg);
+}
+
+static __always_inline void iocsr_write64(u64 val, u32 reg)
+{
+       __iocsrwr_d(val, reg);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/* CSR register number */
+
+/* Basic CSR registers */
+#define LOONGARCH_CSR_CRMD             0x0     /* Current mode info */
+#define  CSR_CRMD_WE_SHIFT             9
+#define  CSR_CRMD_WE                   (_ULCAST_(0x1) << CSR_CRMD_WE_SHIFT)
+#define  CSR_CRMD_DACM_SHIFT           7
+#define  CSR_CRMD_DACM_WIDTH           2
+#define  CSR_CRMD_DACM                 (_ULCAST_(0x3) << CSR_CRMD_DACM_SHIFT)
+#define  CSR_CRMD_DACF_SHIFT           5
+#define  CSR_CRMD_DACF_WIDTH           2
+#define  CSR_CRMD_DACF                 (_ULCAST_(0x3) << CSR_CRMD_DACF_SHIFT)
+#define  CSR_CRMD_PG_SHIFT             4
+#define  CSR_CRMD_PG                   (_ULCAST_(0x1) << CSR_CRMD_PG_SHIFT)
+#define  CSR_CRMD_DA_SHIFT             3
+#define  CSR_CRMD_DA                   (_ULCAST_(0x1) << CSR_CRMD_DA_SHIFT)
+#define  CSR_CRMD_IE_SHIFT             2
+#define  CSR_CRMD_IE                   (_ULCAST_(0x1) << CSR_CRMD_IE_SHIFT)
+#define  CSR_CRMD_PLV_SHIFT            0
+#define  CSR_CRMD_PLV_WIDTH            2
+#define  CSR_CRMD_PLV                  (_ULCAST_(0x3) << CSR_CRMD_PLV_SHIFT)
+
+#define PLV_KERN                       0
+#define PLV_USER                       3
+#define PLV_MASK                       0x3
+
+#define LOONGARCH_CSR_PRMD             0x1     /* Prev-exception mode info */
+#define  CSR_PRMD_PWE_SHIFT            3
+#define  CSR_PRMD_PWE                  (_ULCAST_(0x1) << CSR_PRMD_PWE_SHIFT)
+#define  CSR_PRMD_PIE_SHIFT            2
+#define  CSR_PRMD_PIE                  (_ULCAST_(0x1) << CSR_PRMD_PIE_SHIFT)
+#define  CSR_PRMD_PPLV_SHIFT           0
+#define  CSR_PRMD_PPLV_WIDTH           2
+#define  CSR_PRMD_PPLV                 (_ULCAST_(0x3) << CSR_PRMD_PPLV_SHIFT)
+
+#define LOONGARCH_CSR_EUEN             0x2     /* Extended unit enable */
+#define  CSR_EUEN_LBTEN_SHIFT          3
+#define  CSR_EUEN_LBTEN                        (_ULCAST_(0x1) << CSR_EUEN_LBTEN_SHIFT)
+#define  CSR_EUEN_LASXEN_SHIFT         2
+#define  CSR_EUEN_LASXEN               (_ULCAST_(0x1) << CSR_EUEN_LASXEN_SHIFT)
+#define  CSR_EUEN_LSXEN_SHIFT          1
+#define  CSR_EUEN_LSXEN                        (_ULCAST_(0x1) << CSR_EUEN_LSXEN_SHIFT)
+#define  CSR_EUEN_FPEN_SHIFT           0
+#define  CSR_EUEN_FPEN                 (_ULCAST_(0x1) << CSR_EUEN_FPEN_SHIFT)
+
+#define LOONGARCH_CSR_MISC             0x3     /* Misc config */
+
+#define LOONGARCH_CSR_ECFG             0x4     /* Exception config */
+#define  CSR_ECFG_VS_SHIFT             16
+#define  CSR_ECFG_VS_WIDTH             3
+#define  CSR_ECFG_VS                   (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
+#define  CSR_ECFG_IM_SHIFT             0
+#define  CSR_ECFG_IM_WIDTH             13
+#define  CSR_ECFG_IM                   (_ULCAST_(0x1fff) << CSR_ECFG_IM_SHIFT)
+
+#define LOONGARCH_CSR_ESTAT            0x5     /* Exception status */
+#define  CSR_ESTAT_ESUBCODE_SHIFT      22
+#define  CSR_ESTAT_ESUBCODE_WIDTH      9
+#define  CSR_ESTAT_ESUBCODE            (_ULCAST_(0x1ff) << CSR_ESTAT_ESUBCODE_SHIFT)
+#define  CSR_ESTAT_EXC_SHIFT           16
+#define  CSR_ESTAT_EXC_WIDTH           6
+#define  CSR_ESTAT_EXC                 (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
+#define  CSR_ESTAT_IS_SHIFT            0
+#define  CSR_ESTAT_IS_WIDTH            15
+#define  CSR_ESTAT_IS                  (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
+
+#define LOONGARCH_CSR_ERA              0x6     /* ERA */
+
+#define LOONGARCH_CSR_BADV             0x7     /* Bad virtual address */
+
+#define LOONGARCH_CSR_BADI             0x8     /* Bad instruction */
+
+#define LOONGARCH_CSR_EENTRY           0xc     /* Exception entry */
+
+/* TLB related CSR registers */
+#define LOONGARCH_CSR_TLBIDX           0x10    /* TLB Index, EHINV, PageSize, NP */
+#define  CSR_TLBIDX_EHINV_SHIFT                31
+#define  CSR_TLBIDX_EHINV              (_ULCAST_(1) << CSR_TLBIDX_EHINV_SHIFT)
+#define  CSR_TLBIDX_PS_SHIFT           24
+#define  CSR_TLBIDX_PS_WIDTH           6
+#define  CSR_TLBIDX_PS                 (_ULCAST_(0x3f) << CSR_TLBIDX_PS_SHIFT)
+#define  CSR_TLBIDX_IDX_SHIFT          0
+#define  CSR_TLBIDX_IDX_WIDTH          12
+#define  CSR_TLBIDX_IDX                        (_ULCAST_(0xfff) << CSR_TLBIDX_IDX_SHIFT)
+#define  CSR_TLBIDX_SIZEM              0x3f000000
+#define  CSR_TLBIDX_SIZE               CSR_TLBIDX_PS_SHIFT
+#define  CSR_TLBIDX_IDXM               0xfff
+#define  CSR_INVALID_ENTRY(e)          (CSR_TLBIDX_EHINV | e)
+
+#define LOONGARCH_CSR_TLBEHI           0x11    /* TLB EntryHi */
+
+#define LOONGARCH_CSR_TLBELO0          0x12    /* TLB EntryLo0 */
+#define  CSR_TLBLO0_RPLV_SHIFT         63
+#define  CSR_TLBLO0_RPLV               (_ULCAST_(0x1) << CSR_TLBLO0_RPLV_SHIFT)
+#define  CSR_TLBLO0_NX_SHIFT           62
+#define  CSR_TLBLO0_NX                 (_ULCAST_(0x1) << CSR_TLBLO0_NX_SHIFT)
+#define  CSR_TLBLO0_NR_SHIFT           61
+#define  CSR_TLBLO0_NR                 (_ULCAST_(0x1) << CSR_TLBLO0_NR_SHIFT)
+#define  CSR_TLBLO0_PFN_SHIFT          12
+#define  CSR_TLBLO0_PFN_WIDTH          36
+#define  CSR_TLBLO0_PFN                        (_ULCAST_(0xfffffffff) << CSR_TLBLO0_PFN_SHIFT)
+#define  CSR_TLBLO0_GLOBAL_SHIFT       6
+#define  CSR_TLBLO0_GLOBAL             (_ULCAST_(0x1) << CSR_TLBLO0_GLOBAL_SHIFT)
+#define  CSR_TLBLO0_CCA_SHIFT          4
+#define  CSR_TLBLO0_CCA_WIDTH          2
+#define  CSR_TLBLO0_CCA                        (_ULCAST_(0x3) << CSR_TLBLO0_CCA_SHIFT)
+#define  CSR_TLBLO0_PLV_SHIFT          2
+#define  CSR_TLBLO0_PLV_WIDTH          2
+#define  CSR_TLBLO0_PLV                        (_ULCAST_(0x3) << CSR_TLBLO0_PLV_SHIFT)
+#define  CSR_TLBLO0_WE_SHIFT           1
+#define  CSR_TLBLO0_WE                 (_ULCAST_(0x1) << CSR_TLBLO0_WE_SHIFT)
+#define  CSR_TLBLO0_V_SHIFT            0
+#define  CSR_TLBLO0_V                  (_ULCAST_(0x1) << CSR_TLBLO0_V_SHIFT)
+
+#define LOONGARCH_CSR_TLBELO1          0x13    /* TLB EntryLo1 */
+#define  CSR_TLBLO1_RPLV_SHIFT         63
+#define  CSR_TLBLO1_RPLV               (_ULCAST_(0x1) << CSR_TLBLO1_RPLV_SHIFT)
+#define  CSR_TLBLO1_NX_SHIFT           62
+#define  CSR_TLBLO1_NX                 (_ULCAST_(0x1) << CSR_TLBLO1_NX_SHIFT)
+#define  CSR_TLBLO1_NR_SHIFT           61
+#define  CSR_TLBLO1_NR                 (_ULCAST_(0x1) << CSR_TLBLO1_NR_SHIFT)
+#define  CSR_TLBLO1_PFN_SHIFT          12
+#define  CSR_TLBLO1_PFN_WIDTH          36
+#define  CSR_TLBLO1_PFN                        (_ULCAST_(0xfffffffff) << CSR_TLBLO1_PFN_SHIFT)
+#define  CSR_TLBLO1_GLOBAL_SHIFT       6
+#define  CSR_TLBLO1_GLOBAL             (_ULCAST_(0x1) << CSR_TLBLO1_GLOBAL_SHIFT)
+#define  CSR_TLBLO1_CCA_SHIFT          4
+#define  CSR_TLBLO1_CCA_WIDTH          2
+#define  CSR_TLBLO1_CCA                        (_ULCAST_(0x3) << CSR_TLBLO1_CCA_SHIFT)
+#define  CSR_TLBLO1_PLV_SHIFT          2
+#define  CSR_TLBLO1_PLV_WIDTH          2
+#define  CSR_TLBLO1_PLV                        (_ULCAST_(0x3) << CSR_TLBLO1_PLV_SHIFT)
+#define  CSR_TLBLO1_WE_SHIFT           1
+#define  CSR_TLBLO1_WE                 (_ULCAST_(0x1) << CSR_TLBLO1_WE_SHIFT)
+#define  CSR_TLBLO1_V_SHIFT            0
+#define  CSR_TLBLO1_V                  (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
+
+#define LOONGARCH_CSR_GTLBC            0x15    /* Guest TLB control */
+#define  CSR_GTLBC_RID_SHIFT           16
+#define  CSR_GTLBC_RID_WIDTH           8
+#define  CSR_GTLBC_RID                 (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
+#define  CSR_GTLBC_TOTI_SHIFT          13
+#define  CSR_GTLBC_TOTI                        (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
+#define  CSR_GTLBC_USERID_SHIFT                12
+#define  CSR_GTLBC_USERID              (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
+#define  CSR_GTLBC_GMTLBSZ_SHIFT       0
+#define  CSR_GTLBC_GMTLBSZ_WIDTH       6
+#define  CSR_GTLBC_GMTLBSZ             (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
+
+#define LOONGARCH_CSR_TRGP             0x16    /* TLBR read guest info */
+#define  CSR_TRGP_RID_SHIFT            16
+#define  CSR_TRGP_RID_WIDTH            8
+#define  CSR_TRGP_RID                  (_ULCAST_(0xff) << CSR_TRGP_RID_SHIFT)
+#define  CSR_TRGP_GTLB_SHIFT           0
+#define  CSR_TRGP_GTLB                 (1 << CSR_TRGP_GTLB_SHIFT)
+
+#define LOONGARCH_CSR_ASID             0x18    /* ASID */
+#define  CSR_ASID_BIT_SHIFT            16      /* ASIDBits */
+#define  CSR_ASID_BIT_WIDTH            8
+#define  CSR_ASID_BIT                  (_ULCAST_(0xff) << CSR_ASID_BIT_SHIFT)
+#define  CSR_ASID_ASID_SHIFT           0
+#define  CSR_ASID_ASID_WIDTH           10
+#define  CSR_ASID_ASID                 (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT)
+
+#define LOONGARCH_CSR_PGDL             0x19    /* Page table base address when VA[47] = 0 */
+
+#define LOONGARCH_CSR_PGDH             0x1a    /* Page table base address when VA[47] = 1 */
+
+#define LOONGARCH_CSR_PGD              0x1b    /* Page table base */
+
+#define LOONGARCH_CSR_PWCTL0           0x1c    /* PWCtl0 */
+#define  CSR_PWCTL0_PTEW_SHIFT         30
+#define  CSR_PWCTL0_PTEW_WIDTH         2
+#define  CSR_PWCTL0_PTEW               (_ULCAST_(0x3) << CSR_PWCTL0_PTEW_SHIFT)
+#define  CSR_PWCTL0_DIR1WIDTH_SHIFT    25
+#define  CSR_PWCTL0_DIR1WIDTH_WIDTH    5
+#define  CSR_PWCTL0_DIR1WIDTH          (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1WIDTH_SHIFT)
+#define  CSR_PWCTL0_DIR1BASE_SHIFT     20
+#define  CSR_PWCTL0_DIR1BASE_WIDTH     5
+#define  CSR_PWCTL0_DIR1BASE           (_ULCAST_(0x1f) << CSR_PWCTL0_DIR1BASE_SHIFT)
+#define  CSR_PWCTL0_DIR0WIDTH_SHIFT    15
+#define  CSR_PWCTL0_DIR0WIDTH_WIDTH    5
+#define  CSR_PWCTL0_DIR0WIDTH          (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0WIDTH_SHIFT)
+#define  CSR_PWCTL0_DIR0BASE_SHIFT     10
+#define  CSR_PWCTL0_DIR0BASE_WIDTH     5
+#define  CSR_PWCTL0_DIR0BASE           (_ULCAST_(0x1f) << CSR_PWCTL0_DIR0BASE_SHIFT)
+#define  CSR_PWCTL0_PTWIDTH_SHIFT      5
+#define  CSR_PWCTL0_PTWIDTH_WIDTH      5
+#define  CSR_PWCTL0_PTWIDTH            (_ULCAST_(0x1f) << CSR_PWCTL0_PTWIDTH_SHIFT)
+#define  CSR_PWCTL0_PTBASE_SHIFT       0
+#define  CSR_PWCTL0_PTBASE_WIDTH       5
+#define  CSR_PWCTL0_PTBASE             (_ULCAST_(0x1f) << CSR_PWCTL0_PTBASE_SHIFT)
+
+#define LOONGARCH_CSR_PWCTL1           0x1d    /* PWCtl1 */
+#define  CSR_PWCTL1_DIR3WIDTH_SHIFT    18
+#define  CSR_PWCTL1_DIR3WIDTH_WIDTH    5
+#define  CSR_PWCTL1_DIR3WIDTH          (_ULCAST_(0x1f) << CSR_PWCTL1_DIR3WIDTH_SHIFT)
+#define  CSR_PWCTL1_DIR3BASE_SHIFT     12
+#define  CSR_PWCTL1_DIR3BASE_WIDTH     5
+#define  CSR_PWCTL1_DIR3BASE           (_ULCAST_(0x1f) << CSR_PWCTL0_DIR3BASE_SHIFT)
+#define  CSR_PWCTL1_DIR2WIDTH_SHIFT    6
+#define  CSR_PWCTL1_DIR2WIDTH_WIDTH    5
+#define  CSR_PWCTL1_DIR2WIDTH          (_ULCAST_(0x1f) << CSR_PWCTL1_DIR2WIDTH_SHIFT)
+#define  CSR_PWCTL1_DIR2BASE_SHIFT     0
+#define  CSR_PWCTL1_DIR2BASE_WIDTH     5
+#define  CSR_PWCTL1_DIR2BASE           (_ULCAST_(0x1f) << CSR_PWCTL0_DIR2BASE_SHIFT)
+
+#define LOONGARCH_CSR_STLBPGSIZE       0x1e
+#define  CSR_STLBPGSIZE_PS_WIDTH       6
+#define  CSR_STLBPGSIZE_PS             (_ULCAST_(0x3f))
+
+#define LOONGARCH_CSR_RVACFG           0x1f
+#define  CSR_RVACFG_RDVA_WIDTH         4
+#define  CSR_RVACFG_RDVA               (_ULCAST_(0xf))
+
+/* Config CSR registers */
+#define LOONGARCH_CSR_CPUID            0x20    /* CPU core id */
+#define  CSR_CPUID_COREID_WIDTH                9
+#define  CSR_CPUID_COREID              _ULCAST_(0x1ff)
+
+#define LOONGARCH_CSR_PRCFG1           0x21    /* Config1 */
+#define  CSR_CONF1_VSMAX_SHIFT         12
+#define  CSR_CONF1_VSMAX_WIDTH         3
+#define  CSR_CONF1_VSMAX               (_ULCAST_(7) << CSR_CONF1_VSMAX_SHIFT)
+#define  CSR_CONF1_TMRBITS_SHIFT       4
+#define  CSR_CONF1_TMRBITS_WIDTH       8
+#define  CSR_CONF1_TMRBITS             (_ULCAST_(0xff) << CSR_CONF1_TMRBITS_SHIFT)
+#define  CSR_CONF1_KSNUM_WIDTH         4
+#define  CSR_CONF1_KSNUM               _ULCAST_(0xf)
+
+#define LOONGARCH_CSR_PRCFG2           0x22    /* Config2 */
+#define  CSR_CONF2_PGMASK_SUPP         0x3ffff000
+
+#define LOONGARCH_CSR_PRCFG3           0x23    /* Config3 */
+#define  CSR_CONF3_STLBIDX_SHIFT       20
+#define  CSR_CONF3_STLBIDX_WIDTH       6
+#define  CSR_CONF3_STLBIDX             (_ULCAST_(0x3f) << CSR_CONF3_STLBIDX_SHIFT)
+#define  CSR_CONF3_STLBWAYS_SHIFT      12
+#define  CSR_CONF3_STLBWAYS_WIDTH      8
+#define  CSR_CONF3_STLBWAYS            (_ULCAST_(0xff) << CSR_CONF3_STLBWAYS_SHIFT)
+#define  CSR_CONF3_MTLBSIZE_SHIFT      4
+#define  CSR_CONF3_MTLBSIZE_WIDTH      8
+#define  CSR_CONF3_MTLBSIZE            (_ULCAST_(0xff) << CSR_CONF3_MTLBSIZE_SHIFT)
+#define  CSR_CONF3_TLBTYPE_SHIFT       0
+#define  CSR_CONF3_TLBTYPE_WIDTH       4
+#define  CSR_CONF3_TLBTYPE             (_ULCAST_(0xf) << CSR_CONF3_TLBTYPE_SHIFT)
+
+/* KSave registers */
+#define LOONGARCH_CSR_KS0              0x30
+#define LOONGARCH_CSR_KS1              0x31
+#define LOONGARCH_CSR_KS2              0x32
+#define LOONGARCH_CSR_KS3              0x33
+#define LOONGARCH_CSR_KS4              0x34
+#define LOONGARCH_CSR_KS5              0x35
+#define LOONGARCH_CSR_KS6              0x36
+#define LOONGARCH_CSR_KS7              0x37
+#define LOONGARCH_CSR_KS8              0x38
+
+/* Exception allocated KS0, KS1 and KS2 statically */
+#define EXCEPTION_KS0                  LOONGARCH_CSR_KS0
+#define EXCEPTION_KS1                  LOONGARCH_CSR_KS1
+#define EXCEPTION_KS2                  LOONGARCH_CSR_KS2
+#define EXC_KSAVE_MASK                 (1 << 0 | 1 << 1 | 1 << 2)
+
+/* Percpu-data base allocated KS3 statically */
+#define PERCPU_BASE_KS                 LOONGARCH_CSR_KS3
+#define PERCPU_KSAVE_MASK              (1 << 3)
+
+/* KVM allocated KS4 and KS5 statically */
+#define KVM_VCPU_KS                    LOONGARCH_CSR_KS4
+#define KVM_TEMP_KS                    LOONGARCH_CSR_KS5
+#define KVM_KSAVE_MASK                 (1 << 4 | 1 << 5)
+
+/* Timer registers */
+#define LOONGARCH_CSR_TMID             0x40    /* Timer ID */
+
+#define LOONGARCH_CSR_TCFG             0x41    /* Timer config */
+#define  CSR_TCFG_VAL_SHIFT            2
+#define         CSR_TCFG_VAL_WIDTH             48
+#define  CSR_TCFG_VAL                  (_ULCAST_(0x3fffffffffff) << CSR_TCFG_VAL_SHIFT)
+#define  CSR_TCFG_PERIOD_SHIFT         1
+#define  CSR_TCFG_PERIOD               (_ULCAST_(0x1) << CSR_TCFG_PERIOD_SHIFT)
+#define  CSR_TCFG_EN                   (_ULCAST_(0x1))
+
+#define LOONGARCH_CSR_TVAL             0x42    /* Timer value */
+
+#define LOONGARCH_CSR_CNTC             0x43    /* Timer offset */
+
+#define LOONGARCH_CSR_TINTCLR          0x44    /* Timer interrupt clear */
+#define  CSR_TINTCLR_TI_SHIFT          0
+#define  CSR_TINTCLR_TI                        (1 << CSR_TINTCLR_TI_SHIFT)
+
+/* Guest registers */
+#define LOONGARCH_CSR_GSTAT            0x50    /* Guest status */
+#define  CSR_GSTAT_GID_SHIFT           16
+#define  CSR_GSTAT_GID_WIDTH           8
+#define  CSR_GSTAT_GID                 (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
+#define  CSR_GSTAT_GIDBIT_SHIFT                4
+#define  CSR_GSTAT_GIDBIT_WIDTH                6
+#define  CSR_GSTAT_GIDBIT              (_ULCAST_(0x3f) << CSR_GSTAT_GIDBIT_SHIFT)
+#define  CSR_GSTAT_PVM_SHIFT           1
+#define  CSR_GSTAT_PVM                 (_ULCAST_(0x1) << CSR_GSTAT_PVM_SHIFT)
+#define  CSR_GSTAT_VM_SHIFT            0
+#define  CSR_GSTAT_VM                  (_ULCAST_(0x1) << CSR_GSTAT_VM_SHIFT)
+
+#define LOONGARCH_CSR_GCFG             0x51    /* Guest config */
+#define  CSR_GCFG_GPERF_SHIFT          24
+#define  CSR_GCFG_GPERF_WIDTH          3
+#define  CSR_GCFG_GPERF                        (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT)
+#define  CSR_GCFG_GCI_SHIFT            20
+#define  CSR_GCFG_GCI_WIDTH            2
+#define  CSR_GCFG_GCI                  (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCI_ALL              (_ULCAST_(0x0) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCI_HIT              (_ULCAST_(0x1) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCI_SECURE           (_ULCAST_(0x2) << CSR_GCFG_GCI_SHIFT)
+#define  CSR_GCFG_GCIP_SHIFT           16
+#define  CSR_GCFG_GCIP                 (_ULCAST_(0xf) << CSR_GCFG_GCIP_SHIFT)
+#define  CSR_GCFG_GCIP_ALL             (_ULCAST_(0x1) << CSR_GCFG_GCIP_SHIFT)
+#define  CSR_GCFG_GCIP_HIT             (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 1))
+#define  CSR_GCFG_GCIP_SECURE          (_ULCAST_(0x1) << (CSR_GCFG_GCIP_SHIFT + 2))
+#define  CSR_GCFG_TORU_SHIFT           15
+#define  CSR_GCFG_TORU                 (_ULCAST_(0x1) << CSR_GCFG_TORU_SHIFT)
+#define  CSR_GCFG_TORUP_SHIFT          14
+#define  CSR_GCFG_TORUP                        (_ULCAST_(0x1) << CSR_GCFG_TORUP_SHIFT)
+#define  CSR_GCFG_TOP_SHIFT            13
+#define  CSR_GCFG_TOP                  (_ULCAST_(0x1) << CSR_GCFG_TOP_SHIFT)
+#define  CSR_GCFG_TOPP_SHIFT           12
+#define  CSR_GCFG_TOPP                 (_ULCAST_(0x1) << CSR_GCFG_TOPP_SHIFT)
+#define  CSR_GCFG_TOE_SHIFT            11
+#define  CSR_GCFG_TOE                  (_ULCAST_(0x1) << CSR_GCFG_TOE_SHIFT)
+#define  CSR_GCFG_TOEP_SHIFT           10
+#define  CSR_GCFG_TOEP                 (_ULCAST_(0x1) << CSR_GCFG_TOEP_SHIFT)
+#define  CSR_GCFG_TIT_SHIFT            9
+#define  CSR_GCFG_TIT                  (_ULCAST_(0x1) << CSR_GCFG_TIT_SHIFT)
+#define  CSR_GCFG_TITP_SHIFT           8
+#define  CSR_GCFG_TITP                 (_ULCAST_(0x1) << CSR_GCFG_TITP_SHIFT)
+#define  CSR_GCFG_SIT_SHIFT            7
+#define  CSR_GCFG_SIT                  (_ULCAST_(0x1) << CSR_GCFG_SIT_SHIFT)
+#define  CSR_GCFG_SITP_SHIFT           6
+#define  CSR_GCFG_SITP                 (_ULCAST_(0x1) << CSR_GCFG_SITP_SHIFT)
+#define  CSR_GCFG_MATC_SHITF           4
+#define  CSR_GCFG_MATC_WIDTH           2
+#define  CSR_GCFG_MATC_MASK            (_ULCAST_(0x3) << CSR_GCFG_MATC_SHITF)
+#define  CSR_GCFG_MATC_GUEST           (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
+#define  CSR_GCFG_MATC_ROOT            (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
+#define  CSR_GCFG_MATC_NEST            (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
+
+#define LOONGARCH_CSR_GINTC            0x52    /* Guest interrupt control */
+#define  CSR_GINTC_HC_SHIFT            16
+#define  CSR_GINTC_HC_WIDTH            8
+#define  CSR_GINTC_HC                  (_ULCAST_(0xff) << CSR_GINTC_HC_SHIFT)
+#define  CSR_GINTC_PIP_SHIFT           8
+#define  CSR_GINTC_PIP_WIDTH           8
+#define  CSR_GINTC_PIP                 (_ULCAST_(0xff) << CSR_GINTC_PIP_SHIFT)
+#define  CSR_GINTC_VIP_SHIFT           0
+#define  CSR_GINTC_VIP_WIDTH           8
+#define  CSR_GINTC_VIP                 (_ULCAST_(0xff))
+
+#define LOONGARCH_CSR_GCNTC            0x53    /* Guest timer offset */
+
+/* LLBCTL register */
+#define LOONGARCH_CSR_LLBCTL           0x60    /* LLBit control */
+#define  CSR_LLBCTL_ROLLB_SHIFT                0
+#define  CSR_LLBCTL_ROLLB              (_ULCAST_(1) << CSR_LLBCTL_ROLLB_SHIFT)
+#define  CSR_LLBCTL_WCLLB_SHIFT                1
+#define  CSR_LLBCTL_WCLLB              (_ULCAST_(1) << CSR_LLBCTL_WCLLB_SHIFT)
+#define  CSR_LLBCTL_KLO_SHIFT          2
+#define  CSR_LLBCTL_KLO                        (_ULCAST_(1) << CSR_LLBCTL_KLO_SHIFT)
+
+/* Implement dependent */
+#define LOONGARCH_CSR_IMPCTL1          0x80    /* Loongson config1 */
+#define  CSR_MISPEC_SHIFT              20
+#define  CSR_MISPEC_WIDTH              8
+#define  CSR_MISPEC                    (_ULCAST_(0xff) << CSR_MISPEC_SHIFT)
+#define  CSR_SSEN_SHIFT                        18
+#define  CSR_SSEN                      (_ULCAST_(1) << CSR_SSEN_SHIFT)
+#define  CSR_SCRAND_SHIFT              17
+#define  CSR_SCRAND                    (_ULCAST_(1) << CSR_SCRAND_SHIFT)
+#define  CSR_LLEXCL_SHIFT              16
+#define  CSR_LLEXCL                    (_ULCAST_(1) << CSR_LLEXCL_SHIFT)
+#define  CSR_DISVC_SHIFT               15
+#define  CSR_DISVC                     (_ULCAST_(1) << CSR_DISVC_SHIFT)
+#define  CSR_VCLRU_SHIFT               14
+#define  CSR_VCLRU                     (_ULCAST_(1) << CSR_VCLRU_SHIFT)
+#define  CSR_DCLRU_SHIFT               13
+#define  CSR_DCLRU                     (_ULCAST_(1) << CSR_DCLRU_SHIFT)
+#define  CSR_FASTLDQ_SHIFT             12
+#define  CSR_FASTLDQ                   (_ULCAST_(1) << CSR_FASTLDQ_SHIFT)
+#define  CSR_USERCAC_SHIFT             11
+#define  CSR_USERCAC                   (_ULCAST_(1) << CSR_USERCAC_SHIFT)
+#define  CSR_ANTI_MISPEC_SHIFT         10
+#define  CSR_ANTI_MISPEC               (_ULCAST_(1) << CSR_ANTI_MISPEC_SHIFT)
+#define  CSR_AUTO_FLUSHSFB_SHIFT       9
+#define  CSR_AUTO_FLUSHSFB             (_ULCAST_(1) << CSR_AUTO_FLUSHSFB_SHIFT)
+#define  CSR_STFILL_SHIFT              8
+#define  CSR_STFILL                    (_ULCAST_(1) << CSR_STFILL_SHIFT)
+#define  CSR_LIFEP_SHIFT               7
+#define  CSR_LIFEP                     (_ULCAST_(1) << CSR_LIFEP_SHIFT)
+#define  CSR_LLSYNC_SHIFT              6
+#define  CSR_LLSYNC                    (_ULCAST_(1) << CSR_LLSYNC_SHIFT)
+#define  CSR_BRBTDIS_SHIFT             5
+#define  CSR_BRBTDIS                   (_ULCAST_(1) << CSR_BRBTDIS_SHIFT)
+#define  CSR_RASDIS_SHIFT              4
+#define  CSR_RASDIS                    (_ULCAST_(1) << CSR_RASDIS_SHIFT)
+#define  CSR_STPRE_SHIFT               2
+#define  CSR_STPRE_WIDTH               2
+#define  CSR_STPRE                     (_ULCAST_(3) << CSR_STPRE_SHIFT)
+#define  CSR_INSTPRE_SHIFT             1
+#define  CSR_INSTPRE                   (_ULCAST_(1) << CSR_INSTPRE_SHIFT)
+#define  CSR_DATAPRE_SHIFT             0
+#define  CSR_DATAPRE                   (_ULCAST_(1) << CSR_DATAPRE_SHIFT)
+
+#define LOONGARCH_CSR_IMPCTL2          0x81    /* Loongson config2 */
+#define  CSR_FLUSH_MTLB_SHIFT          0
+#define  CSR_FLUSH_MTLB                        (_ULCAST_(1) << CSR_FLUSH_MTLB_SHIFT)
+#define  CSR_FLUSH_STLB_SHIFT          1
+#define  CSR_FLUSH_STLB                        (_ULCAST_(1) << CSR_FLUSH_STLB_SHIFT)
+#define  CSR_FLUSH_DTLB_SHIFT          2
+#define  CSR_FLUSH_DTLB                        (_ULCAST_(1) << CSR_FLUSH_DTLB_SHIFT)
+#define  CSR_FLUSH_ITLB_SHIFT          3
+#define  CSR_FLUSH_ITLB                        (_ULCAST_(1) << CSR_FLUSH_ITLB_SHIFT)
+#define  CSR_FLUSH_BTAC_SHIFT          4
+#define  CSR_FLUSH_BTAC                        (_ULCAST_(1) << CSR_FLUSH_BTAC_SHIFT)
+
+#define LOONGARCH_CSR_GNMI             0x82
+
+/* TLB Refill registers */
+#define LOONGARCH_CSR_TLBRENTRY                0x88    /* TLB refill exception entry */
+#define LOONGARCH_CSR_TLBRBADV         0x89    /* TLB refill badvaddr */
+#define LOONGARCH_CSR_TLBRERA          0x8a    /* TLB refill ERA */
+#define LOONGARCH_CSR_TLBRSAVE         0x8b    /* KSave for TLB refill exception */
+#define LOONGARCH_CSR_TLBRELO0         0x8c    /* TLB refill entrylo0 */
+#define LOONGARCH_CSR_TLBRELO1         0x8d    /* TLB refill entrylo1 */
+#define LOONGARCH_CSR_TLBREHI          0x8e    /* TLB refill entryhi */
+#define  CSR_TLBREHI_PS_SHIFT          0
+#define  CSR_TLBREHI_PS                        (_ULCAST_(0x3f) << CSR_TLBREHI_PS_SHIFT)
+#define LOONGARCH_CSR_TLBRPRMD         0x8f    /* TLB refill mode info */
+
+/* Machine Error registers */
+#define LOONGARCH_CSR_MERRCTL          0x90    /* MERRCTL */
+#define LOONGARCH_CSR_MERRINFO1                0x91    /* MError info1 */
+#define LOONGARCH_CSR_MERRINFO2                0x92    /* MError info2 */
+#define LOONGARCH_CSR_MERRENTRY                0x93    /* MError exception entry */
+#define LOONGARCH_CSR_MERRERA          0x94    /* MError exception ERA */
+#define LOONGARCH_CSR_MERRSAVE         0x95    /* KSave for machine error exception */
+
+#define LOONGARCH_CSR_CTAG             0x98    /* TagLo + TagHi */
+
+#define LOONGARCH_CSR_PRID             0xc0
+
+/* Shadow MCSR : 0xc0 ~ 0xff */
+#define LOONGARCH_CSR_MCSR0            0xc0    /* CPUCFG0 and CPUCFG1 */
+#define  MCSR0_INT_IMPL_SHIFT          58
+#define  MCSR0_INT_IMPL                        0
+#define  MCSR0_IOCSR_BRD_SHIFT         57
+#define  MCSR0_IOCSR_BRD               (_ULCAST_(1) << MCSR0_IOCSR_BRD_SHIFT)
+#define  MCSR0_HUGEPG_SHIFT            56
+#define  MCSR0_HUGEPG                  (_ULCAST_(1) << MCSR0_HUGEPG_SHIFT)
+#define  MCSR0_RPLMTLB_SHIFT           55
+#define  MCSR0_RPLMTLB                 (_ULCAST_(1) << MCSR0_RPLMTLB_SHIFT)
+#define  MCSR0_EP_SHIFT                        54
+#define  MCSR0_EP                      (_ULCAST_(1) << MCSR0_EP_SHIFT)
+#define  MCSR0_RI_SHIFT                        53
+#define  MCSR0_RI                      (_ULCAST_(1) << MCSR0_RI_SHIFT)
+#define  MCSR0_UAL_SHIFT               52
+#define  MCSR0_UAL                     (_ULCAST_(1) << MCSR0_UAL_SHIFT)
+#define  MCSR0_VABIT_SHIFT             44
+#define  MCSR0_VABIT_WIDTH             8
+#define  MCSR0_VABIT                   (_ULCAST_(0xff) << MCSR0_VABIT_SHIFT)
+#define  VABIT_DEFAULT                 0x2f
+#define  MCSR0_PABIT_SHIFT             36
+#define  MCSR0_PABIT_WIDTH             8
+#define  MCSR0_PABIT                   (_ULCAST_(0xff) << MCSR0_PABIT_SHIFT)
+#define  PABIT_DEFAULT                 0x2f
+#define  MCSR0_IOCSR_SHIFT             35
+#define  MCSR0_IOCSR                   (_ULCAST_(1) << MCSR0_IOCSR_SHIFT)
+#define  MCSR0_PAGING_SHIFT            34
+#define  MCSR0_PAGING                  (_ULCAST_(1) << MCSR0_PAGING_SHIFT)
+#define  MCSR0_GR64_SHIFT              33
+#define  MCSR0_GR64                    (_ULCAST_(1) << MCSR0_GR64_SHIFT)
+#define  GR64_DEFAULT                  1
+#define  MCSR0_GR32_SHIFT              32
+#define  MCSR0_GR32                    (_ULCAST_(1) << MCSR0_GR32_SHIFT)
+#define  GR32_DEFAULT                  0
+#define  MCSR0_PRID_WIDTH              32
+#define  MCSR0_PRID                    0x14C010
+
+#define LOONGARCH_CSR_MCSR1            0xc1    /* CPUCFG2 and CPUCFG3 */
+#define  MCSR1_HPFOLD_SHIFT            43
+#define  MCSR1_HPFOLD                  (_ULCAST_(1) << MCSR1_HPFOLD_SHIFT)
+#define  MCSR1_SPW_LVL_SHIFT           40
+#define  MCSR1_SPW_LVL_WIDTH           3
+#define  MCSR1_SPW_LVL                 (_ULCAST_(7) << MCSR1_SPW_LVL_SHIFT)
+#define  MCSR1_ICACHET_SHIFT           39
+#define  MCSR1_ICACHET                 (_ULCAST_(1) << MCSR1_ICACHET_SHIFT)
+#define  MCSR1_ITLBT_SHIFT             38
+#define  MCSR1_ITLBT                   (_ULCAST_(1) << MCSR1_ITLBT_SHIFT)
+#define  MCSR1_LLDBAR_SHIFT            37
+#define  MCSR1_LLDBAR                  (_ULCAST_(1) << MCSR1_LLDBAR_SHIFT)
+#define  MCSR1_SCDLY_SHIFT             36
+#define  MCSR1_SCDLY                   (_ULCAST_(1) << MCSR1_SCDLY_SHIFT)
+#define  MCSR1_LLEXC_SHIFT             35
+#define  MCSR1_LLEXC                   (_ULCAST_(1) << MCSR1_LLEXC_SHIFT)
+#define  MCSR1_UCACC_SHIFT             34
+#define  MCSR1_UCACC                   (_ULCAST_(1) << MCSR1_UCACC_SHIFT)
+#define  MCSR1_SFB_SHIFT               33
+#define  MCSR1_SFB                     (_ULCAST_(1) << MCSR1_SFB_SHIFT)
+#define  MCSR1_CCDMA_SHIFT             32
+#define  MCSR1_CCDMA                   (_ULCAST_(1) << MCSR1_CCDMA_SHIFT)
+#define  MCSR1_LAMO_SHIFT              22
+#define  MCSR1_LAMO                    (_ULCAST_(1) << MCSR1_LAMO_SHIFT)
+#define  MCSR1_LSPW_SHIFT              21
+#define  MCSR1_LSPW                    (_ULCAST_(1) << MCSR1_LSPW_SHIFT)
+#define  MCSR1_MIPSBT_SHIFT            20
+#define  MCSR1_MIPSBT                  (_ULCAST_(1) << MCSR1_MIPSBT_SHIFT)
+#define  MCSR1_ARMBT_SHIFT             19
+#define  MCSR1_ARMBT                   (_ULCAST_(1) << MCSR1_ARMBT_SHIFT)
+#define  MCSR1_X86BT_SHIFT             18
+#define  MCSR1_X86BT                   (_ULCAST_(1) << MCSR1_X86BT_SHIFT)
+#define  MCSR1_LLFTPVERS_SHIFT         15
+#define  MCSR1_LLFTPVERS_WIDTH         3
+#define  MCSR1_LLFTPVERS               (_ULCAST_(7) << MCSR1_LLFTPVERS_SHIFT)
+#define  MCSR1_LLFTP_SHIFT             14
+#define  MCSR1_LLFTP                   (_ULCAST_(1) << MCSR1_LLFTP_SHIFT)
+#define  MCSR1_VZVERS_SHIFT            11
+#define  MCSR1_VZVERS_WIDTH            3
+#define  MCSR1_VZVERS                  (_ULCAST_(7) << MCSR1_VZVERS_SHIFT)
+#define  MCSR1_VZ_SHIFT                        10
+#define  MCSR1_VZ                      (_ULCAST_(1) << MCSR1_VZ_SHIFT)
+#define  MCSR1_CRYPTO_SHIFT            9
+#define  MCSR1_CRYPTO                  (_ULCAST_(1) << MCSR1_CRYPTO_SHIFT)
+#define  MCSR1_COMPLEX_SHIFT           8
+#define  MCSR1_COMPLEX                 (_ULCAST_(1) << MCSR1_COMPLEX_SHIFT)
+#define  MCSR1_LASX_SHIFT              7
+#define  MCSR1_LASX                    (_ULCAST_(1) << MCSR1_LASX_SHIFT)
+#define  MCSR1_LSX_SHIFT               6
+#define  MCSR1_LSX                     (_ULCAST_(1) << MCSR1_LSX_SHIFT)
+#define  MCSR1_FPVERS_SHIFT            3
+#define  MCSR1_FPVERS_WIDTH            3
+#define  MCSR1_FPVERS                  (_ULCAST_(7) << MCSR1_FPVERS_SHIFT)
+#define  MCSR1_FPDP_SHIFT              2
+#define  MCSR1_FPDP                    (_ULCAST_(1) << MCSR1_FPDP_SHIFT)
+#define  MCSR1_FPSP_SHIFT              1
+#define  MCSR1_FPSP                    (_ULCAST_(1) << MCSR1_FPSP_SHIFT)
+#define  MCSR1_FP_SHIFT                        0
+#define  MCSR1_FP                      (_ULCAST_(1) << MCSR1_FP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR2            0xc2    /* CPUCFG4 and CPUCFG5 */
+#define  MCSR2_CCDIV_SHIFT             48
+#define  MCSR2_CCDIV_WIDTH             16
+#define  MCSR2_CCDIV                   (_ULCAST_(0xffff) << MCSR2_CCDIV_SHIFT)
+#define  MCSR2_CCMUL_SHIFT             32
+#define  MCSR2_CCMUL_WIDTH             16
+#define  MCSR2_CCMUL                   (_ULCAST_(0xffff) << MCSR2_CCMUL_SHIFT)
+#define  MCSR2_CCFREQ_WIDTH            32
+#define  MCSR2_CCFREQ                  (_ULCAST_(0xffffffff))
+#define  CCFREQ_DEFAULT                        0x5f5e100       /* 100MHz */
+
+#define LOONGARCH_CSR_MCSR3            0xc3    /* CPUCFG6 */
+#define  MCSR3_UPM_SHIFT               14
+#define  MCSR3_UPM                     (_ULCAST_(1) << MCSR3_UPM_SHIFT)
+#define  MCSR3_PMBITS_SHIFT            8
+#define  MCSR3_PMBITS_WIDTH            6
+#define  MCSR3_PMBITS                  (_ULCAST_(0x3f) << MCSR3_PMBITS_SHIFT)
+#define  PMBITS_DEFAULT                        0x40
+#define  MCSR3_PMNUM_SHIFT             4
+#define  MCSR3_PMNUM_WIDTH             4
+#define  MCSR3_PMNUM                   (_ULCAST_(0xf) << MCSR3_PMNUM_SHIFT)
+#define  MCSR3_PAMVER_SHIFT            1
+#define  MCSR3_PAMVER_WIDTH            3
+#define  MCSR3_PAMVER                  (_ULCAST_(0x7) << MCSR3_PAMVER_SHIFT)
+#define  MCSR3_PMP_SHIFT               0
+#define  MCSR3_PMP                     (_ULCAST_(1) << MCSR3_PMP_SHIFT)
+
+#define LOONGARCH_CSR_MCSR8            0xc8    /* CPUCFG16 and CPUCFG17 */
+#define  MCSR8_L1I_SIZE_SHIFT          56
+#define  MCSR8_L1I_SIZE_WIDTH          7
+#define  MCSR8_L1I_SIZE                        (_ULCAST_(0x7f) << MCSR8_L1I_SIZE_SHIFT)
+#define  MCSR8_L1I_IDX_SHIFT           48
+#define  MCSR8_L1I_IDX_WIDTH           8
+#define  MCSR8_L1I_IDX                 (_ULCAST_(0xff) << MCSR8_L1I_IDX_SHIFT)
+#define  MCSR8_L1I_WAY_SHIFT           32
+#define  MCSR8_L1I_WAY_WIDTH           16
+#define  MCSR8_L1I_WAY                 (_ULCAST_(0xffff) << MCSR8_L1I_WAY_SHIFT)
+#define  MCSR8_L3DINCL_SHIFT           16
+#define  MCSR8_L3DINCL                 (_ULCAST_(1) << MCSR8_L3DINCL_SHIFT)
+#define  MCSR8_L3DPRIV_SHIFT           15
+#define  MCSR8_L3DPRIV                 (_ULCAST_(1) << MCSR8_L3DPRIV_SHIFT)
+#define  MCSR8_L3DPRE_SHIFT            14
+#define  MCSR8_L3DPRE                  (_ULCAST_(1) << MCSR8_L3DPRE_SHIFT)
+#define  MCSR8_L3IUINCL_SHIFT          13
+#define  MCSR8_L3IUINCL                        (_ULCAST_(1) << MCSR8_L3IUINCL_SHIFT)
+#define  MCSR8_L3IUPRIV_SHIFT          12
+#define  MCSR8_L3IUPRIV                        (_ULCAST_(1) << MCSR8_L3IUPRIV_SHIFT)
+#define  MCSR8_L3IUUNIFY_SHIFT         11
+#define  MCSR8_L3IUUNIFY               (_ULCAST_(1) << MCSR8_L3IUUNIFY_SHIFT)
+#define  MCSR8_L3IUPRE_SHIFT           10
+#define  MCSR8_L3IUPRE                 (_ULCAST_(1) << MCSR8_L3IUPRE_SHIFT)
+#define  MCSR8_L2DINCL_SHIFT           9
+#define  MCSR8_L2DINCL                 (_ULCAST_(1) << MCSR8_L2DINCL_SHIFT)
+#define  MCSR8_L2DPRIV_SHIFT           8
+#define  MCSR8_L2DPRIV                 (_ULCAST_(1) << MCSR8_L2DPRIV_SHIFT)
+#define  MCSR8_L2DPRE_SHIFT            7
+#define  MCSR8_L2DPRE                  (_ULCAST_(1) << MCSR8_L2DPRE_SHIFT)
+#define  MCSR8_L2IUINCL_SHIFT          6
+#define  MCSR8_L2IUINCL                        (_ULCAST_(1) << MCSR8_L2IUINCL_SHIFT)
+#define  MCSR8_L2IUPRIV_SHIFT          5
+#define  MCSR8_L2IUPRIV                        (_ULCAST_(1) << MCSR8_L2IUPRIV_SHIFT)
+#define  MCSR8_L2IUUNIFY_SHIFT         4
+#define  MCSR8_L2IUUNIFY               (_ULCAST_(1) << MCSR8_L2IUUNIFY_SHIFT)
+#define  MCSR8_L2IUPRE_SHIFT           3
+#define  MCSR8_L2IUPRE                 (_ULCAST_(1) << MCSR8_L2IUPRE_SHIFT)
+#define  MCSR8_L1DPRE_SHIFT            2
+#define  MCSR8_L1DPRE                  (_ULCAST_(1) << MCSR8_L1DPRE_SHIFT)
+#define  MCSR8_L1IUUNIFY_SHIFT         1
+#define  MCSR8_L1IUUNIFY               (_ULCAST_(1) << MCSR8_L1IUUNIFY_SHIFT)
+#define  MCSR8_L1IUPRE_SHIFT           0
+#define  MCSR8_L1IUPRE                 (_ULCAST_(1) << MCSR8_L1IUPRE_SHIFT)
+
+#define LOONGARCH_CSR_MCSR9            0xc9    /* CPUCFG18 and CPUCFG19 */
+#define  MCSR9_L2U_SIZE_SHIFT          56
+#define  MCSR9_L2U_SIZE_WIDTH          7
+#define  MCSR9_L2U_SIZE                        (_ULCAST_(0x7f) << MCSR9_L2U_SIZE_SHIFT)
+#define  MCSR9_L2U_IDX_SHIFT           48
+#define  MCSR9_L2U_IDX_WIDTH           8
+#define  MCSR9_L2U_IDX                 (_ULCAST_(0xff) << MCSR9_IDX_LOG_SHIFT)
+#define  MCSR9_L2U_WAY_SHIFT           32
+#define  MCSR9_L2U_WAY_WIDTH           16
+#define  MCSR9_L2U_WAY                 (_ULCAST_(0xffff) << MCSR9_L2U_WAY_SHIFT)
+#define  MCSR9_L1D_SIZE_SHIFT          24
+#define  MCSR9_L1D_SIZE_WIDTH          7
+#define  MCSR9_L1D_SIZE                        (_ULCAST_(0x7f) << MCSR9_L1D_SIZE_SHIFT)
+#define  MCSR9_L1D_IDX_SHIFT           16
+#define  MCSR9_L1D_IDX_WIDTH           8
+#define  MCSR9_L1D_IDX                 (_ULCAST_(0xff) << MCSR9_L1D_IDX_SHIFT)
+#define  MCSR9_L1D_WAY_SHIFT           0
+#define  MCSR9_L1D_WAY_WIDTH           16
+#define  MCSR9_L1D_WAY                 (_ULCAST_(0xffff) << MCSR9_L1D_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR10           0xca    /* CPUCFG20 */
+#define  MCSR10_L3U_SIZE_SHIFT         24
+#define  MCSR10_L3U_SIZE_WIDTH         7
+#define  MCSR10_L3U_SIZE               (_ULCAST_(0x7f) << MCSR10_L3U_SIZE_SHIFT)
+#define  MCSR10_L3U_IDX_SHIFT          16
+#define  MCSR10_L3U_IDX_WIDTH          8
+#define  MCSR10_L3U_IDX                        (_ULCAST_(0xff) << MCSR10_L3U_IDX_SHIFT)
+#define  MCSR10_L3U_WAY_SHIFT          0
+#define  MCSR10_L3U_WAY_WIDTH          16
+#define  MCSR10_L3U_WAY                        (_ULCAST_(0xffff) << MCSR10_L3U_WAY_SHIFT)
+
+#define LOONGARCH_CSR_MCSR24           0xf0    /* cpucfg48 */
+#define  MCSR24_RAMCG_SHIFT            3
+#define  MCSR24_RAMCG                  (_ULCAST_(1) << MCSR24_RAMCG_SHIFT)
+#define  MCSR24_VFPUCG_SHIFT           2
+#define  MCSR24_VFPUCG                 (_ULCAST_(1) << MCSR24_VFPUCG_SHIFT)
+#define  MCSR24_NAPEN_SHIFT            1
+#define  MCSR24_NAPEN                  (_ULCAST_(1) << MCSR24_NAPEN_SHIFT)
+#define  MCSR24_MCSRLOCK_SHIFT         0
+#define  MCSR24_MCSRLOCK               (_ULCAST_(1) << MCSR24_MCSRLOCK_SHIFT)
+
+/* Uncached accelerate windows registers */
+#define LOONGARCH_CSR_UCAWIN           0x100
+#define LOONGARCH_CSR_UCAWIN0_LO       0x102
+#define LOONGARCH_CSR_UCAWIN0_HI       0x103
+#define LOONGARCH_CSR_UCAWIN1_LO       0x104
+#define LOONGARCH_CSR_UCAWIN1_HI       0x105
+#define LOONGARCH_CSR_UCAWIN2_LO       0x106
+#define LOONGARCH_CSR_UCAWIN2_HI       0x107
+#define LOONGARCH_CSR_UCAWIN3_LO       0x108
+#define LOONGARCH_CSR_UCAWIN3_HI       0x109
+
+/* Direct Map windows registers */
+#define LOONGARCH_CSR_DMWIN0           0x180   /* 64 direct map win0: MEM & IF */
+#define LOONGARCH_CSR_DMWIN1           0x181   /* 64 direct map win1: MEM & IF */
+#define LOONGARCH_CSR_DMWIN2           0x182   /* 64 direct map win2: MEM */
+#define LOONGARCH_CSR_DMWIN3           0x183   /* 64 direct map win3: MEM */
+
+/* Direct Map window 0/1 */
+#define CSR_DMW0_PLV0          _CONST64_(1 << 0)
+#define CSR_DMW0_VSEG          _CONST64_(0x8000)
+#define CSR_DMW0_BASE          (CSR_DMW0_VSEG << DMW_PABITS)
+#define CSR_DMW0_INIT          (CSR_DMW0_BASE | CSR_DMW0_PLV0)
+
+#define CSR_DMW1_PLV0          _CONST64_(1 << 0)
+#define CSR_DMW1_MAT           _CONST64_(1 << 4)
+#define CSR_DMW1_VSEG          _CONST64_(0x9000)
+#define CSR_DMW1_BASE          (CSR_DMW1_VSEG << DMW_PABITS)
+#define CSR_DMW1_INIT          (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
+
+/* Performance Counter registers */
+#define LOONGARCH_CSR_PERFCTRL0                0x200   /* 32 perf event 0 config */
+#define LOONGARCH_CSR_PERFCNTR0                0x201   /* 64 perf event 0 count value */
+#define LOONGARCH_CSR_PERFCTRL1                0x202   /* 32 perf event 1 config */
+#define LOONGARCH_CSR_PERFCNTR1                0x203   /* 64 perf event 1 count value */
+#define LOONGARCH_CSR_PERFCTRL2                0x204   /* 32 perf event 2 config */
+#define LOONGARCH_CSR_PERFCNTR2                0x205   /* 64 perf event 2 count value */
+#define LOONGARCH_CSR_PERFCTRL3                0x206   /* 32 perf event 3 config */
+#define LOONGARCH_CSR_PERFCNTR3                0x207   /* 64 perf event 3 count value */
+#define  CSR_PERFCTRL_PLV0             (_ULCAST_(1) << 16)
+#define  CSR_PERFCTRL_PLV1             (_ULCAST_(1) << 17)
+#define  CSR_PERFCTRL_PLV2             (_ULCAST_(1) << 18)
+#define  CSR_PERFCTRL_PLV3             (_ULCAST_(1) << 19)
+#define  CSR_PERFCTRL_IE               (_ULCAST_(1) << 20)
+#define  CSR_PERFCTRL_EVENT            0x3ff
+
+/* Debug registers */
+#define LOONGARCH_CSR_MWPC             0x300   /* data breakpoint config */
+#define LOONGARCH_CSR_MWPS             0x301   /* data breakpoint status */
+
+#define LOONGARCH_CSR_DB0ADDR          0x310   /* data breakpoint 0 address */
+#define LOONGARCH_CSR_DB0MASK          0x311   /* data breakpoint 0 mask */
+#define LOONGARCH_CSR_DB0CTL           0x312   /* data breakpoint 0 control */
+#define LOONGARCH_CSR_DB0ASID          0x313   /* data breakpoint 0 asid */
+
+#define LOONGARCH_CSR_DB1ADDR          0x318   /* data breakpoint 1 address */
+#define LOONGARCH_CSR_DB1MASK          0x319   /* data breakpoint 1 mask */
+#define LOONGARCH_CSR_DB1CTL           0x31a   /* data breakpoint 1 control */
+#define LOONGARCH_CSR_DB1ASID          0x31b   /* data breakpoint 1 asid */
+
+#define LOONGARCH_CSR_DB2ADDR          0x320   /* data breakpoint 2 address */
+#define LOONGARCH_CSR_DB2MASK          0x321   /* data breakpoint 2 mask */
+#define LOONGARCH_CSR_DB2CTL           0x322   /* data breakpoint 2 control */
+#define LOONGARCH_CSR_DB2ASID          0x323   /* data breakpoint 2 asid */
+
+#define LOONGARCH_CSR_DB3ADDR          0x328   /* data breakpoint 3 address */
+#define LOONGARCH_CSR_DB3MASK          0x329   /* data breakpoint 3 mask */
+#define LOONGARCH_CSR_DB3CTL           0x32a   /* data breakpoint 3 control */
+#define LOONGARCH_CSR_DB3ASID          0x32b   /* data breakpoint 3 asid */
+
+#define LOONGARCH_CSR_DB4ADDR          0x330   /* data breakpoint 4 address */
+#define LOONGARCH_CSR_DB4MASK          0x331   /* data breakpoint 4 maks */
+#define LOONGARCH_CSR_DB4CTL           0x332   /* data breakpoint 4 control */
+#define LOONGARCH_CSR_DB4ASID          0x333   /* data breakpoint 4 asid */
+
+#define LOONGARCH_CSR_DB5ADDR          0x338   /* data breakpoint 5 address */
+#define LOONGARCH_CSR_DB5MASK          0x339   /* data breakpoint 5 mask */
+#define LOONGARCH_CSR_DB5CTL           0x33a   /* data breakpoint 5 control */
+#define LOONGARCH_CSR_DB5ASID          0x33b   /* data breakpoint 5 asid */
+
+#define LOONGARCH_CSR_DB6ADDR          0x340   /* data breakpoint 6 address */
+#define LOONGARCH_CSR_DB6MASK          0x341   /* data breakpoint 6 mask */
+#define LOONGARCH_CSR_DB6CTL           0x342   /* data breakpoint 6 control */
+#define LOONGARCH_CSR_DB6ASID          0x343   /* data breakpoint 6 asid */
+
+#define LOONGARCH_CSR_DB7ADDR          0x348   /* data breakpoint 7 address */
+#define LOONGARCH_CSR_DB7MASK          0x349   /* data breakpoint 7 mask */
+#define LOONGARCH_CSR_DB7CTL           0x34a   /* data breakpoint 7 control */
+#define LOONGARCH_CSR_DB7ASID          0x34b   /* data breakpoint 7 asid */
+
+#define LOONGARCH_CSR_FWPC             0x380   /* instruction breakpoint config */
+#define LOONGARCH_CSR_FWPS             0x381   /* instruction breakpoint status */
+
+#define LOONGARCH_CSR_IB0ADDR          0x390   /* inst breakpoint 0 address */
+#define LOONGARCH_CSR_IB0MASK          0x391   /* inst breakpoint 0 mask */
+#define LOONGARCH_CSR_IB0CTL           0x392   /* inst breakpoint 0 control */
+#define LOONGARCH_CSR_IB0ASID          0x393   /* inst breakpoint 0 asid */
+
+#define LOONGARCH_CSR_IB1ADDR          0x398   /* inst breakpoint 1 address */
+#define LOONGARCH_CSR_IB1MASK          0x399   /* inst breakpoint 1 mask */
+#define LOONGARCH_CSR_IB1CTL           0x39a   /* inst breakpoint 1 control */
+#define LOONGARCH_CSR_IB1ASID          0x39b   /* inst breakpoint 1 asid */
+
+#define LOONGARCH_CSR_IB2ADDR          0x3a0   /* inst breakpoint 2 address */
+#define LOONGARCH_CSR_IB2MASK          0x3a1   /* inst breakpoint 2 mask */
+#define LOONGARCH_CSR_IB2CTL           0x3a2   /* inst breakpoint 2 control */
+#define LOONGARCH_CSR_IB2ASID          0x3a3   /* inst breakpoint 2 asid */
+
+#define LOONGARCH_CSR_IB3ADDR          0x3a8   /* inst breakpoint 3 address */
+#define LOONGARCH_CSR_IB3MASK          0x3a9   /* breakpoint 3 mask */
+#define LOONGARCH_CSR_IB3CTL           0x3aa   /* inst breakpoint 3 control */
+#define LOONGARCH_CSR_IB3ASID          0x3ab   /* inst breakpoint 3 asid */
+
+#define LOONGARCH_CSR_IB4ADDR          0x3b0   /* inst breakpoint 4 address */
+#define LOONGARCH_CSR_IB4MASK          0x3b1   /* inst breakpoint 4 mask */
+#define LOONGARCH_CSR_IB4CTL           0x3b2   /* inst breakpoint 4 control */
+#define LOONGARCH_CSR_IB4ASID          0x3b3   /* inst breakpoint 4 asid */
+
+#define LOONGARCH_CSR_IB5ADDR          0x3b8   /* inst breakpoint 5 address */
+#define LOONGARCH_CSR_IB5MASK          0x3b9   /* inst breakpoint 5 mask */
+#define LOONGARCH_CSR_IB5CTL           0x3ba   /* inst breakpoint 5 control */
+#define LOONGARCH_CSR_IB5ASID          0x3bb   /* inst breakpoint 5 asid */
+
+#define LOONGARCH_CSR_IB6ADDR          0x3c0   /* inst breakpoint 6 address */
+#define LOONGARCH_CSR_IB6MASK          0x3c1   /* inst breakpoint 6 mask */
+#define LOONGARCH_CSR_IB6CTL           0x3c2   /* inst breakpoint 6 control */
+#define LOONGARCH_CSR_IB6ASID          0x3c3   /* inst breakpoint 6 asid */
+
+#define LOONGARCH_CSR_IB7ADDR          0x3c8   /* inst breakpoint 7 address */
+#define LOONGARCH_CSR_IB7MASK          0x3c9   /* inst breakpoint 7 mask */
+#define LOONGARCH_CSR_IB7CTL           0x3ca   /* inst breakpoint 7 control */
+#define LOONGARCH_CSR_IB7ASID          0x3cb   /* inst breakpoint 7 asid */
+
+#define LOONGARCH_CSR_DEBUG            0x500   /* debug config */
+#define LOONGARCH_CSR_DERA             0x501   /* debug era */
+#define LOONGARCH_CSR_DESAVE           0x502   /* debug save */
+
+/*
+ * CSR_ECFG IM
+ */
+#define ECFG0_IM               0x00001fff
+#define ECFGB_SIP0             0
+#define ECFGF_SIP0             (_ULCAST_(1) << ECFGB_SIP0)
+#define ECFGB_SIP1             1
+#define ECFGF_SIP1             (_ULCAST_(1) << ECFGB_SIP1)
+#define ECFGB_IP0              2
+#define ECFGF_IP0              (_ULCAST_(1) << ECFGB_IP0)
+#define ECFGB_IP1              3
+#define ECFGF_IP1              (_ULCAST_(1) << ECFGB_IP1)
+#define ECFGB_IP2              4
+#define ECFGF_IP2              (_ULCAST_(1) << ECFGB_IP2)
+#define ECFGB_IP3              5
+#define ECFGF_IP3              (_ULCAST_(1) << ECFGB_IP3)
+#define ECFGB_IP4              6
+#define ECFGF_IP4              (_ULCAST_(1) << ECFGB_IP4)
+#define ECFGB_IP5              7
+#define ECFGF_IP5              (_ULCAST_(1) << ECFGB_IP5)
+#define ECFGB_IP6              8
+#define ECFGF_IP6              (_ULCAST_(1) << ECFGB_IP6)
+#define ECFGB_IP7              9
+#define ECFGF_IP7              (_ULCAST_(1) << ECFGB_IP7)
+#define ECFGB_PMC              10
+#define ECFGF_PMC              (_ULCAST_(1) << ECFGB_PMC)
+#define ECFGB_TIMER            11
+#define ECFGF_TIMER            (_ULCAST_(1) << ECFGB_TIMER)
+#define ECFGB_IPI              12
+#define ECFGF_IPI              (_ULCAST_(1) << ECFGB_IPI)
+#define ECFGF(hwirq)           (_ULCAST_(1) << hwirq)
+
+#define ESTATF_IP              0x00001fff
+
+#define LOONGARCH_IOCSR_FEATURES       0x8
+#define  IOCSRF_TEMP                   BIT_ULL(0)
+#define  IOCSRF_NODECNT                        BIT_ULL(1)
+#define  IOCSRF_MSI                    BIT_ULL(2)
+#define  IOCSRF_EXTIOI                 BIT_ULL(3)
+#define  IOCSRF_CSRIPI                 BIT_ULL(4)
+#define  IOCSRF_FREQCSR                        BIT_ULL(5)
+#define  IOCSRF_FREQSCALE              BIT_ULL(6)
+#define  IOCSRF_DVFSV1                 BIT_ULL(7)
+#define  IOCSRF_EIODECODE              BIT_ULL(9)
+#define  IOCSRF_FLATMODE               BIT_ULL(10)
+#define  IOCSRF_VM                     BIT_ULL(11)
+
+#define LOONGARCH_IOCSR_VENDOR         0x10
+
+#define LOONGARCH_IOCSR_CPUNAME                0x20
+
+#define LOONGARCH_IOCSR_NODECNT                0x408
+
+#define LOONGARCH_IOCSR_MISC_FUNC      0x420
+#define  IOCSR_MISC_FUNC_TIMER_RESET   BIT_ULL(21)
+#define  IOCSR_MISC_FUNC_EXT_IOI_EN    BIT_ULL(48)
+
+#define LOONGARCH_IOCSR_CPUTEMP                0x428
+
+/* PerCore CSR, only accessible by local cores */
+#define LOONGARCH_IOCSR_IPI_STATUS     0x1000
+#define LOONGARCH_IOCSR_IPI_EN         0x1004
+#define LOONGARCH_IOCSR_IPI_SET                0x1008
+#define LOONGARCH_IOCSR_IPI_CLEAR      0x100c
+#define LOONGARCH_IOCSR_MBUF0          0x1020
+#define LOONGARCH_IOCSR_MBUF1          0x1028
+#define LOONGARCH_IOCSR_MBUF2          0x1030
+#define LOONGARCH_IOCSR_MBUF3          0x1038
+
+#define LOONGARCH_IOCSR_IPI_SEND       0x1040
+#define  IOCSR_IPI_SEND_IP_SHIFT       0
+#define  IOCSR_IPI_SEND_CPU_SHIFT      16
+#define  IOCSR_IPI_SEND_BLOCKING       BIT(31)
+
+#define LOONGARCH_IOCSR_MBUF_SEND      0x1048
+#define  IOCSR_MBUF_SEND_BLOCKING      BIT_ULL(31)
+#define  IOCSR_MBUF_SEND_BOX_SHIFT     2
+#define  IOCSR_MBUF_SEND_BOX_LO(box)   (box << 1)
+#define  IOCSR_MBUF_SEND_BOX_HI(box)   ((box << 1) + 1)
+#define  IOCSR_MBUF_SEND_CPU_SHIFT     16
+#define  IOCSR_MBUF_SEND_BUF_SHIFT     32
+#define  IOCSR_MBUF_SEND_H32_MASK      0xFFFFFFFF00000000ULL
+
+#define LOONGARCH_IOCSR_ANY_SEND       0x1158
+#define  IOCSR_ANY_SEND_BLOCKING       BIT_ULL(31)
+#define  IOCSR_ANY_SEND_CPU_SHIFT      16
+#define  IOCSR_ANY_SEND_MASK_SHIFT     27
+#define  IOCSR_ANY_SEND_BUF_SHIFT      32
+#define  IOCSR_ANY_SEND_H32_MASK       0xFFFFFFFF00000000ULL
+
+/* Register offset and bit definition for CSR access */
+#define LOONGARCH_IOCSR_TIMER_CFG       0x1060
+#define LOONGARCH_IOCSR_TIMER_TICK      0x1070
+#define  IOCSR_TIMER_CFG_RESERVED       (_ULCAST_(1) << 63)
+#define  IOCSR_TIMER_CFG_PERIODIC       (_ULCAST_(1) << 62)
+#define  IOCSR_TIMER_CFG_EN             (_ULCAST_(1) << 61)
+#define  IOCSR_TIMER_MASK              0x0ffffffffffffULL
+#define  IOCSR_TIMER_INITVAL_RST        (_ULCAST_(0xffff) << 48)
+
+#define LOONGARCH_IOCSR_EXTIOI_NODEMAP_BASE    0x14a0
+#define LOONGARCH_IOCSR_EXTIOI_IPMAP_BASE      0x14c0
+#define LOONGARCH_IOCSR_EXTIOI_EN_BASE         0x1600
+#define LOONGARCH_IOCSR_EXTIOI_BOUNCE_BASE     0x1680
+#define LOONGARCH_IOCSR_EXTIOI_ISR_BASE                0x1800
+#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE      0x1c00
+#define IOCSR_EXTIOI_VECTOR_NUM                        256
+
+#ifndef __ASSEMBLY__
+
+static inline u64 drdtime(void)
+{
+       int rID = 0;
+       u64 val = 0;
+
+       __asm__ __volatile__(
+               "rdtime.d %0, %1 \n\t"
+               : "=r"(val), "=r"(rID)
+               :
+               );
+       return val;
+}
+
+static inline unsigned int get_csr_cpuid(void)
+{
+       return csr_read32(LOONGARCH_CSR_CPUID);
+}
+
+static inline void csr_any_send(unsigned int addr, unsigned int data,
+                               unsigned int data_mask, unsigned int cpu)
+{
+       uint64_t val = 0;
+
+       val = IOCSR_ANY_SEND_BLOCKING | addr;
+       val |= (cpu << IOCSR_ANY_SEND_CPU_SHIFT);
+       val |= (data_mask << IOCSR_ANY_SEND_MASK_SHIFT);
+       val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT);
+       iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND);
+}
+
+static inline unsigned int read_csr_excode(void)
+{
+       return (csr_read32(LOONGARCH_CSR_ESTAT) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+}
+
+static inline void write_csr_index(unsigned int idx)
+{
+       csr_xchg32(idx, CSR_TLBIDX_IDXM, LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_pagesize(void)
+{
+       return (csr_read32(LOONGARCH_CSR_TLBIDX) & CSR_TLBIDX_SIZEM) >> CSR_TLBIDX_SIZE;
+}
+
+static inline void write_csr_pagesize(unsigned int size)
+{
+       csr_xchg32(size << CSR_TLBIDX_SIZE, CSR_TLBIDX_SIZEM, LOONGARCH_CSR_TLBIDX);
+}
+
+static inline unsigned int read_csr_tlbrefill_pagesize(void)
+{
+       return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
+}
+
+static inline void write_csr_tlbrefill_pagesize(unsigned int size)
+{
+       csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
+}
+
+#define read_csr_asid()                        csr_read32(LOONGARCH_CSR_ASID)
+#define write_csr_asid(val)            csr_write32(val, LOONGARCH_CSR_ASID)
+#define read_csr_entryhi()             csr_read64(LOONGARCH_CSR_TLBEHI)
+#define write_csr_entryhi(val)         csr_write64(val, LOONGARCH_CSR_TLBEHI)
+#define read_csr_entrylo0()            csr_read64(LOONGARCH_CSR_TLBELO0)
+#define write_csr_entrylo0(val)                csr_write64(val, LOONGARCH_CSR_TLBELO0)
+#define read_csr_entrylo1()            csr_read64(LOONGARCH_CSR_TLBELO1)
+#define write_csr_entrylo1(val)                csr_write64(val, LOONGARCH_CSR_TLBELO1)
+#define read_csr_ecfg()                        csr_read32(LOONGARCH_CSR_ECFG)
+#define write_csr_ecfg(val)            csr_write32(val, LOONGARCH_CSR_ECFG)
+#define read_csr_estat()               csr_read32(LOONGARCH_CSR_ESTAT)
+#define write_csr_estat(val)           csr_write32(val, LOONGARCH_CSR_ESTAT)
+#define read_csr_tlbidx()              csr_read32(LOONGARCH_CSR_TLBIDX)
+#define write_csr_tlbidx(val)          csr_write32(val, LOONGARCH_CSR_TLBIDX)
+#define read_csr_euen()                        csr_read32(LOONGARCH_CSR_EUEN)
+#define write_csr_euen(val)            csr_write32(val, LOONGARCH_CSR_EUEN)
+#define read_csr_cpuid()               csr_read32(LOONGARCH_CSR_CPUID)
+#define read_csr_prcfg1()              csr_read64(LOONGARCH_CSR_PRCFG1)
+#define write_csr_prcfg1(val)          csr_write64(val, LOONGARCH_CSR_PRCFG1)
+#define read_csr_prcfg2()              csr_read64(LOONGARCH_CSR_PRCFG2)
+#define write_csr_prcfg2(val)          csr_write64(val, LOONGARCH_CSR_PRCFG2)
+#define read_csr_prcfg3()              csr_read64(LOONGARCH_CSR_PRCFG3)
+#define write_csr_prcfg3(val)          csr_write64(val, LOONGARCH_CSR_PRCFG3)
+#define read_csr_stlbpgsize()          csr_read32(LOONGARCH_CSR_STLBPGSIZE)
+#define write_csr_stlbpgsize(val)      csr_write32(val, LOONGARCH_CSR_STLBPGSIZE)
+#define read_csr_rvacfg()              csr_read32(LOONGARCH_CSR_RVACFG)
+#define write_csr_rvacfg(val)          csr_write32(val, LOONGARCH_CSR_RVACFG)
+#define write_csr_tintclear(val)       csr_write32(val, LOONGARCH_CSR_TINTCLR)
+#define read_csr_impctl1()             csr_read64(LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl1(val)         csr_write64(val, LOONGARCH_CSR_IMPCTL1)
+#define write_csr_impctl2(val)         csr_write64(val, LOONGARCH_CSR_IMPCTL2)
+
+#define read_csr_perfctrl0()           csr_read64(LOONGARCH_CSR_PERFCTRL0)
+#define read_csr_perfcntr0()           csr_read64(LOONGARCH_CSR_PERFCNTR0)
+#define read_csr_perfctrl1()           csr_read64(LOONGARCH_CSR_PERFCTRL1)
+#define read_csr_perfcntr1()           csr_read64(LOONGARCH_CSR_PERFCNTR1)
+#define read_csr_perfctrl2()           csr_read64(LOONGARCH_CSR_PERFCTRL2)
+#define read_csr_perfcntr2()           csr_read64(LOONGARCH_CSR_PERFCNTR2)
+#define read_csr_perfctrl3()           csr_read64(LOONGARCH_CSR_PERFCTRL3)
+#define read_csr_perfcntr3()           csr_read64(LOONGARCH_CSR_PERFCNTR3)
+#define write_csr_perfctrl0(val)       csr_write64(val, LOONGARCH_CSR_PERFCTRL0)
+#define write_csr_perfcntr0(val)       csr_write64(val, LOONGARCH_CSR_PERFCNTR0)
+#define write_csr_perfctrl1(val)       csr_write64(val, LOONGARCH_CSR_PERFCTRL1)
+#define write_csr_perfcntr1(val)       csr_write64(val, LOONGARCH_CSR_PERFCNTR1)
+#define write_csr_perfctrl2(val)       csr_write64(val, LOONGARCH_CSR_PERFCTRL2)
+#define write_csr_perfcntr2(val)       csr_write64(val, LOONGARCH_CSR_PERFCNTR2)
+#define write_csr_perfctrl3(val)       csr_write64(val, LOONGARCH_CSR_PERFCTRL3)
+#define write_csr_perfcntr3(val)       csr_write64(val, LOONGARCH_CSR_PERFCNTR3)
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_CSR_COMMON(name)                               \
+static inline unsigned long                                    \
+set_##name(unsigned long set)                                  \
+{                                                              \
+       unsigned long res, new;                                 \
+                                                               \
+       res = read_##name();                                    \
+       new = res | set;                                        \
+       write_##name(new);                                      \
+                                                               \
+       return res;                                             \
+}                                                              \
+                                                               \
+static inline unsigned long                                    \
+clear_##name(unsigned long clear)                              \
+{                                                              \
+       unsigned long res, new;                                 \
+                                                               \
+       res = read_##name();                                    \
+       new = res & ~clear;                                     \
+       write_##name(new);                                      \
+                                                               \
+       return res;                                             \
+}                                                              \
+                                                               \
+static inline unsigned long                                    \
+change_##name(unsigned long change, unsigned long val)         \
+{                                                              \
+       unsigned long res, new;                                 \
+                                                               \
+       res = read_##name();                                    \
+       new = res & ~change;                                    \
+       new |= (val & change);                                  \
+       write_##name(new);                                      \
+                                                               \
+       return res;                                             \
+}
+
+#define __BUILD_CSR_OP(name)   __BUILD_CSR_COMMON(csr_##name)
+
+__BUILD_CSR_OP(euen)
+__BUILD_CSR_OP(ecfg)
+__BUILD_CSR_OP(tlbidx)
+
+#define set_csr_estat(val)     \
+       csr_xchg32(val, val, LOONGARCH_CSR_ESTAT)
+#define clear_csr_estat(val)   \
+       csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT)
+
+#endif /* __ASSEMBLY__ */
+
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_V              (_ULCAST_(1) << 0)
+#define ENTRYLO_D              (_ULCAST_(1) << 1)
+#define ENTRYLO_PLV_SHIFT      2
+#define ENTRYLO_PLV            (_ULCAST_(3) << ENTRYLO_PLV_SHIFT)
+#define ENTRYLO_C_SHIFT                4
+#define ENTRYLO_C              (_ULCAST_(3) << ENTRYLO_C_SHIFT)
+#define ENTRYLO_G              (_ULCAST_(1) << 6)
+#define ENTRYLO_NR             (_ULCAST_(1) << 61)
+#define ENTRYLO_NX             (_ULCAST_(1) << 62)
+
+/* Values for PageSize register */
+#define PS_4K          0x0000000c
+#define PS_8K          0x0000000d
+#define PS_16K         0x0000000e
+#define PS_32K         0x0000000f
+#define PS_64K         0x00000010
+#define PS_128K                0x00000011
+#define PS_256K                0x00000012
+#define PS_512K                0x00000013
+#define PS_1M          0x00000014
+#define PS_2M          0x00000015
+#define PS_4M          0x00000016
+#define PS_8M          0x00000017
+#define PS_16M         0x00000018
+#define PS_32M         0x00000019
+#define PS_64M         0x0000001a
+#define PS_128M                0x0000001b
+#define PS_256M                0x0000001c
+#define PS_512M                0x0000001d
+#define PS_1G          0x0000001e
+
+/* Default page size for a given kernel configuration */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PS_DEFAULT_SIZE PS_4K
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PS_DEFAULT_SIZE PS_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PS_DEFAULT_SIZE PS_64K
+#else
+#error Bad page size configuration!
+#endif
+
+/* Default huge tlb size for a given kernel configuration */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PS_HUGE_SIZE   PS_1M
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PS_HUGE_SIZE   PS_16M
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PS_HUGE_SIZE   PS_256M
+#else
+#error Bad page size configuration for hugetlbfs!
+#endif
+
+/* ExStatus.ExcCode */
+#define EXCCODE_RSV            0       /* Reserved */
+#define EXCCODE_TLBL           1       /* TLB miss on a load */
+#define EXCCODE_TLBS           2       /* TLB miss on a store */
+#define EXCCODE_TLBI           3       /* TLB miss on a ifetch */
+#define EXCCODE_TLBM           4       /* TLB modified fault */
+#define EXCCODE_TLBNR          5       /* TLB Read-Inhibit exception */
+#define EXCCODE_TLBNX          6       /* TLB Execution-Inhibit exception */
+#define EXCCODE_TLBPE          7       /* TLB Privilege Error */
+#define EXCCODE_ADE            8       /* Address Error */
+       #define EXSUBCODE_ADEF          0       /* Fetch Instruction */
+       #define EXSUBCODE_ADEM          1       /* Access Memory*/
+#define EXCCODE_ALE            9       /* Unalign Access */
+#define EXCCODE_OOB            10      /* Out of bounds */
+#define EXCCODE_SYS            11      /* System call */
+#define EXCCODE_BP             12      /* Breakpoint */
+#define EXCCODE_INE            13      /* Inst. Not Exist */
+#define EXCCODE_IPE            14      /* Inst. Privileged Error */
+#define EXCCODE_FPDIS          15      /* FPU Disabled */
+#define EXCCODE_LSXDIS         16      /* LSX Disabled */
+#define EXCCODE_LASXDIS                17      /* LASX Disabled */
+#define EXCCODE_FPE            18      /* Floating Point Exception */
+       #define EXCSUBCODE_FPE          0       /* Floating Point Exception */
+       #define EXCSUBCODE_VFPE         1       /* Vector Exception */
+#define EXCCODE_WATCH          19      /* Watch address reference */
+#define EXCCODE_BTDIS          20      /* Binary Trans. Disabled */
+#define EXCCODE_BTE            21      /* Binary Trans. Exception */
+#define EXCCODE_PSI            22      /* Guest Privileged Error */
+#define EXCCODE_HYP            23      /* Hypercall */
+#define EXCCODE_GCM            24      /* Guest CSR modified */
+       #define EXCSUBCODE_GCSC         0       /* Software caused */
+       #define EXCSUBCODE_GCHC         1       /* Hardware caused */
+#define EXCCODE_SE             25      /* Security */
+
+#define EXCCODE_INT_START   64
+#define EXCCODE_SIP0        64
+#define EXCCODE_SIP1        65
+#define EXCCODE_IP0         66
+#define EXCCODE_IP1         67
+#define EXCCODE_IP2         68
+#define EXCCODE_IP3         69
+#define EXCCODE_IP4         70
+#define EXCCODE_IP5         71
+#define EXCCODE_IP6         72
+#define EXCCODE_IP7         73
+#define EXCCODE_PMC         74 /* Performance Counter */
+#define EXCCODE_TIMER       75
+#define EXCCODE_IPI         76
+#define EXCCODE_NMI         77
+#define EXCCODE_INT_END     78
+#define EXCCODE_INT_NUM            (EXCCODE_INT_END - EXCCODE_INT_START)
+
+/* FPU register names */
+#define LOONGARCH_FCSR0        $r0
+#define LOONGARCH_FCSR1        $r1
+#define LOONGARCH_FCSR2        $r2
+#define LOONGARCH_FCSR3        $r3
+
+/* FPU Status Register Values */
+#define FPU_CSR_RSVD   0xe0e0fce0
+
+/*
+ * X the exception cause indicator
+ * E the exception enable
+ * S the sticky/flag bit
+ */
+#define FPU_CSR_ALL_X  0x1f000000
+#define FPU_CSR_INV_X  0x10000000
+#define FPU_CSR_DIV_X  0x08000000
+#define FPU_CSR_OVF_X  0x04000000
+#define FPU_CSR_UDF_X  0x02000000
+#define FPU_CSR_INE_X  0x01000000
+
+#define FPU_CSR_ALL_S  0x001f0000
+#define FPU_CSR_INV_S  0x00100000
+#define FPU_CSR_DIV_S  0x00080000
+#define FPU_CSR_OVF_S  0x00040000
+#define FPU_CSR_UDF_S  0x00020000
+#define FPU_CSR_INE_S  0x00010000
+
+#define FPU_CSR_ALL_E  0x0000001f
+#define FPU_CSR_INV_E  0x00000010
+#define FPU_CSR_DIV_E  0x00000008
+#define FPU_CSR_OVF_E  0x00000004
+#define FPU_CSR_UDF_E  0x00000002
+#define FPU_CSR_INE_E  0x00000001
+
+/* Bits 8 and 9 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM     0x300
+#define FPU_CSR_RN     0x000   /* nearest */
+#define FPU_CSR_RZ     0x100   /* towards zero */
+#define FPU_CSR_RU     0x200   /* towards +Infinity */
+#define FPU_CSR_RD     0x300   /* towards -Infinity */
+
+#define read_fcsr(source)      \
+({     \
+       unsigned int __res;     \
+\
+       __asm__ __volatile__(   \
+       "       movfcsr2gr      %0, "__stringify(source)" \n"   \
+       : "=r" (__res));        \
+       __res;  \
+})
+
+#define write_fcsr(dest, val) \
+do {   \
+       __asm__ __volatile__(   \
+       "       movgr2fcsr      %0, "__stringify(dest)" \n"     \
+       : : "r" (val)); \
+} while (0)
+
+#endif /* _ASM_LOONGARCH_H */
diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h
new file mode 100644 (file)
index 0000000..6a80387
--- /dev/null
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGSON_H
+#define __ASM_LOONGSON_H
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+
+extern const struct plat_smp_ops loongson3_smp_ops;
+
+#define LOONGSON_REG(x) \
+       (*(volatile u32 *)((char *)TO_UNCACHE(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON_LIO_BASE      0x18000000
+#define LOONGSON_LIO_SIZE      0x00100000      /* 1M */
+#define LOONGSON_LIO_TOP       (LOONGSON_LIO_BASE+LOONGSON_LIO_SIZE-1)
+
+#define LOONGSON_BOOT_BASE     0x1c000000
+#define LOONGSON_BOOT_SIZE     0x02000000      /* 32M */
+#define LOONGSON_BOOT_TOP      (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+
+#define LOONGSON_REG_BASE      0x1fe00000
+#define LOONGSON_REG_SIZE      0x00100000      /* 1M */
+#define LOONGSON_REG_TOP       (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA              LOONGSON_REG(0x11c)
+#define LOONGSON_GPIOIE                        LOONGSON_REG(0x120)
+#define LOONGSON_REG_GPIO_BASE          (LOONGSON_REG_BASE + 0x11c)
+
+#define MAX_PACKAGES 16
+
+/* Chip Config register of each physical cpu package */
+extern u64 loongson_chipcfg[MAX_PACKAGES];
+#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
+
+/* Chip Temperature register of each physical cpu package */
+extern u64 loongson_chiptemp[MAX_PACKAGES];
+#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
+
+/* Freq Control register of each physical cpu package */
+extern u64 loongson_freqctrl[MAX_PACKAGES];
+#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
+
+#define xconf_readl(addr) readl(addr)
+#define xconf_readq(addr) readq(addr)
+
+static inline void xconf_writel(u32 val, volatile void __iomem *addr)
+{
+       asm volatile (
+       "       st.w    %[v], %[hw], 0  \n"
+       "       ld.b    $r0, %[hw], 0   \n"
+       :
+       : [hw] "r" (addr), [v] "r" (val)
+       );
+}
+
+static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
+{
+       asm volatile (
+       "       st.d    %[v], %[hw], 0  \n"
+       "       ld.b    $r0, %[hw], 0   \n"
+       :
+       : [hw] "r" (addr),  [v] "r" (val64)
+       );
+}
+
+/* ============== LS7A registers =============== */
+#define LS7A_PCH_REG_BASE              0x10000000UL
+/* LPC regs */
+#define LS7A_LPC_REG_BASE              (LS7A_PCH_REG_BASE + 0x00002000)
+/* CHIPCFG regs */
+#define LS7A_CHIPCFG_REG_BASE          (LS7A_PCH_REG_BASE + 0x00010000)
+/* MISC reg base */
+#define LS7A_MISC_REG_BASE             (LS7A_PCH_REG_BASE + 0x00080000)
+/* ACPI regs */
+#define LS7A_ACPI_REG_BASE             (LS7A_MISC_REG_BASE + 0x00050000)
+/* RTC regs */
+#define LS7A_RTC_REG_BASE              (LS7A_MISC_REG_BASE + 0x00050100)
+
+#define LS7A_DMA_CFG                   (volatile void *)TO_UNCACHE(LS7A_CHIPCFG_REG_BASE + 0x041c)
+#define LS7A_DMA_NODE_SHF              8
+#define LS7A_DMA_NODE_MASK             0x1F00
+
+#define LS7A_INT_MASK_REG              (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x020)
+#define LS7A_INT_EDGE_REG              (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x060)
+#define LS7A_INT_CLEAR_REG             (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x080)
+#define LS7A_INT_HTMSI_EN_REG          (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x040)
+#define LS7A_INT_ROUTE_ENTRY_REG       (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x100)
+#define LS7A_INT_HTMSI_VEC_REG         (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200)
+#define LS7A_INT_STATUS_REG            (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3a0)
+#define LS7A_INT_POL_REG               (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x3e0)
+#define LS7A_LPC_INT_CTL               (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2000)
+#define LS7A_LPC_INT_ENA               (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2004)
+#define LS7A_LPC_INT_STS               (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2008)
+#define LS7A_LPC_INT_CLR               (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200c)
+#define LS7A_LPC_INT_POL               (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2010)
+
+#define LS7A_PMCON_SOC_REG             (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x000)
+#define LS7A_PMCON_RESUME_REG          (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x004)
+#define LS7A_PMCON_RTC_REG             (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x008)
+#define LS7A_PM1_EVT_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x00c)
+#define LS7A_PM1_ENA_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x010)
+#define LS7A_PM1_CNT_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x014)
+#define LS7A_PM1_TMR_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x018)
+#define LS7A_P_CNT_REG                 (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x01c)
+#define LS7A_GPE0_STS_REG              (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x028)
+#define LS7A_GPE0_ENA_REG              (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x02c)
+#define LS7A_RST_CNT_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x030)
+#define LS7A_WD_SET_REG                        (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x034)
+#define LS7A_WD_TIMER_REG              (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x038)
+#define LS7A_THSENS_CNT_REG            (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x04c)
+#define LS7A_GEN_RTC_1_REG             (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x050)
+#define LS7A_GEN_RTC_2_REG             (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x054)
+#define LS7A_DPM_CFG_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x400)
+#define LS7A_DPM_STS_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x404)
+#define LS7A_DPM_CNT_REG               (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x408)
+
+typedef enum {
+       ACPI_PCI_HOTPLUG_STATUS = 1 << 1,
+       ACPI_CPU_HOTPLUG_STATUS = 1 << 2,
+       ACPI_MEM_HOTPLUG_STATUS = 1 << 3,
+       ACPI_POWERBUTTON_STATUS = 1 << 8,
+       ACPI_RTC_WAKE_STATUS    = 1 << 10,
+       ACPI_PCI_WAKE_STATUS    = 1 << 14,
+       ACPI_ANY_WAKE_STATUS    = 1 << 15,
+} AcpiEventStatusBits;
+
+#define HT1LO_OFFSET           0xe0000000000UL
+
+/* PCI Configuration Space Base */
+#define MCFG_EXT_PCICFG_BASE           0xefe00000000UL
+
+/* REG ACCESS*/
+#define ls7a_readb(addr)       (*(volatile unsigned char  *)TO_UNCACHE(addr))
+#define ls7a_readw(addr)       (*(volatile unsigned short *)TO_UNCACHE(addr))
+#define ls7a_readl(addr)       (*(volatile unsigned int   *)TO_UNCACHE(addr))
+#define ls7a_readq(addr)       (*(volatile unsigned long  *)TO_UNCACHE(addr))
+#define ls7a_writeb(val, addr) *(volatile unsigned char  *)TO_UNCACHE(addr) = (val)
+#define ls7a_writew(val, addr) *(volatile unsigned short *)TO_UNCACHE(addr) = (val)
+#define ls7a_writel(val, addr) *(volatile unsigned int   *)TO_UNCACHE(addr) = (val)
+#define ls7a_writeq(val, addr) *(volatile unsigned long  *)TO_UNCACHE(addr) = (val)
+
+#endif /* __ASM_LOONGSON_H */
diff --git a/arch/loongarch/include/asm/mmu.h b/arch/loongarch/include/asm/mmu.h
new file mode 100644 (file)
index 0000000..0cc2d08
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_MMU_H
+#define __ASM_MMU_H
+
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+
+typedef struct {
+       u64 asid[NR_CPUS];
+       void *vdso;
+} mm_context_t;
+
+#endif /* __ASM_MMU_H */
diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h
new file mode 100644 (file)
index 0000000..9f97c34
--- /dev/null
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Switch a MMU context.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MMU_CONTEXT_H
+#define _ASM_MMU_CONTEXT_H
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+
+/*
+ *  All unused by hardware upper bits will be considered
+ *  as a software asid extension.
+ */
+static inline u64 asid_version_mask(unsigned int cpu)
+{
+       return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
+}
+
+static inline u64 asid_first_version(unsigned int cpu)
+{
+       return cpu_asid_mask(&cpu_data[cpu]) + 1;
+}
+
+#define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
+#define asid_cache(cpu)                (cpu_data[cpu].asid_cache)
+#define cpu_asid(cpu, mm)      (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
+
+static inline int asid_valid(struct mm_struct *mm, unsigned int cpu)
+{
+       if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
+               return 0;
+
+       return 1;
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/* Normal, classic get_new_mmu_context */
+static inline void
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+{
+       u64 asid = asid_cache(cpu);
+
+       if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
+               local_flush_tlb_user(); /* start new asid cycle */
+
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+       int i;
+
+       for_each_possible_cpu(i)
+               cpu_context(i, mm) = 0;
+
+       return 0;
+}
+
+static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+                                     struct task_struct *tsk)
+{
+       unsigned int cpu = smp_processor_id();
+
+       /* Check if our ASID is of an older version and thus invalid */
+       if (!asid_valid(next, cpu))
+               get_new_mmu_context(next, cpu);
+
+       write_csr_asid(cpu_asid(cpu, next));
+
+       if (next != &init_mm)
+               csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
+       else
+               csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+
+       /*
+        * Mark current->active_mm as not "active" anymore.
+        * We don't want to mislead possible IPI tlb flush routines.
+        */
+       cpumask_set_cpu(cpu, mm_cpumask(next));
+}
+
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       switch_mm_irqs_off(prev, next, tsk);
+       local_irq_restore(flags);
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+#define activate_mm(prev, next)        switch_mm(prev, next, current)
+#define deactivate_mm(task, mm)        do { } while (0)
+
+/*
+ * If mm is currently active, we can't really drop it.
+ * Instead, we will get a new one for it.
+ */
+static inline void
+drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+       int asid;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
+
+       if (asid == cpu_asid(cpu, mm)) {
+               if (!current->mm || (current->mm == mm)) {
+                       get_new_mmu_context(mm, cpu);
+                       write_csr_asid(cpu_asid(cpu, mm));
+                       goto out;
+               }
+       }
+
+       /* Will get a new context next time */
+       cpu_context(cpu, mm) = 0;
+       cpumask_clear_cpu(cpu, mm_cpumask(mm));
+out:
+       local_irq_restore(flags);
+}
+
+#endif /* _ASM_MMU_CONTEXT_H */
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
new file mode 100644 (file)
index 0000000..fe67d0b
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen (chenhuacai@loongson.cn)
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+
+#include <asm/page.h>
+#include <asm/numa.h>
+
+extern struct pglist_data *node_data[];
+
+#define NODE_DATA(nid) (node_data[(nid)])
+
+extern void setup_zero_pages(void);
+
+#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h
new file mode 100644 (file)
index 0000000..9f6718d
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_MODULE_H
+#define _ASM_MODULE_H
+
+#include <asm/inst.h>
+#include <asm-generic/module.h>
+
+#define RELA_STACK_DEPTH 16
+
+struct mod_section {
+       Elf_Shdr *shdr;
+       int num_entries;
+       int max_entries;
+};
+
+struct mod_arch_specific {
+       struct mod_section plt;
+       struct mod_section plt_idx;
+};
+
+struct plt_entry {
+       u32 inst_lu12iw;
+       u32 inst_lu32id;
+       u32 inst_lu52id;
+       u32 inst_jirl;
+};
+
+struct plt_idx_entry {
+       unsigned long symbol_addr;
+};
+
+Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val);
+
+static inline struct plt_entry emit_plt_entry(unsigned long val)
+{
+       u32 lu12iw, lu32id, lu52id, jirl;
+
+       lu12iw = (lu12iw_op << 25 | (((val >> 12) & 0xfffff) << 5) | LOONGARCH_GPR_T1);
+       lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID));
+       lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID));
+       jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff));
+
+       return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl };
+}
+
+static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val)
+{
+       return (struct plt_idx_entry) { val };
+}
+
+static inline int get_plt_idx(unsigned long val, const struct mod_section *sec)
+{
+       int i;
+       struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sec->shdr->sh_addr;
+
+       for (i = 0; i < sec->num_entries; i++) {
+               if (plt_idx[i].symbol_addr == val)
+                       return i;
+       }
+
+       return -1;
+}
+
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+                                     const struct mod_section *sec_plt,
+                                     const struct mod_section *sec_plt_idx)
+{
+       int plt_idx = get_plt_idx(val, sec_plt_idx);
+       struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
+
+       if (plt_idx < 0)
+               return NULL;
+
+       return plt + plt_idx;
+}
+
+#endif /* _ASM_MODULE_H */
diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h
new file mode 100644 (file)
index 0000000..31c1c0d
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */
+SECTIONS {
+       . = ALIGN(4);
+       .plt : { BYTE(0) }
+       .plt.idx : { BYTE(0) }
+}
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
new file mode 100644 (file)
index 0000000..27f319b
--- /dev/null
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_NUMA_H
+#define _ASM_LOONGARCH_NUMA_H
+
+#include <linux/nodemask.h>
+
+#define NODE_ADDRSPACE_SHIFT 44
+
+#define pa_to_nid(addr)                (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid)   (_ULCAST_(nid) << NODE_ADDRSPACE_SHIFT)
+
+#ifdef CONFIG_NUMA
+
+extern int numa_off;
+extern s16 __cpuid_to_node[CONFIG_NR_CPUS];
+extern nodemask_t numa_nodes_parsed __initdata;
+
+struct numa_memblk {
+       u64                     start;
+       u64                     end;
+       int                     nid;
+};
+
+#define NR_NODE_MEMBLKS                (MAX_NUMNODES*2)
+struct numa_meminfo {
+       int                     nr_blks;
+       struct numa_memblk      blk[NR_NODE_MEMBLKS];
+};
+
+extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+
+extern void __init early_numa_add_cpu(int cpuid, s16 node);
+extern void numa_add_cpu(unsigned int cpu);
+extern void numa_remove_cpu(unsigned int cpu);
+
+static inline void numa_clear_node(int cpu)
+{
+}
+
+static inline void set_cpuid_to_node(int cpuid, s16 node)
+{
+       __cpuid_to_node[cpuid] = node;
+}
+
+extern int early_cpu_to_node(int cpu);
+
+#else
+
+static inline void early_numa_add_cpu(int cpuid, s16 node)     { }
+static inline void numa_add_cpu(unsigned int cpu)              { }
+static inline void numa_remove_cpu(unsigned int cpu)           { }
+
+static inline int early_cpu_to_node(int cpu)
+{
+       return 0;
+}
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_LOONGARCH_NUMA_H */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
new file mode 100644 (file)
index 0000000..3dba498
--- /dev/null
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#include <linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PAGE_SHIFT     12
+#endif
+#ifdef CONFIG_PAGE_SIZE_16KB
+#define PAGE_SHIFT     14
+#endif
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define PAGE_SHIFT     16
+#endif
+#define PAGE_SIZE      (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK      (~(PAGE_SIZE - 1))
+
+#define HPAGE_SHIFT    (PAGE_SHIFT + PAGE_SHIFT - 3)
+#define HPAGE_SIZE     (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK     (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/kernel.h>
+#include <linux/pfn.h>
+
+#define MAX_DMA32_PFN  (1UL << (32 - PAGE_SHIFT))
+
+/*
+ * It's normally defined only for FLATMEM config but it's
+ * used in our early mem init code for all memory models.
+ * So always define it.
+ */
+#define ARCH_PFN_OFFSET        PFN_UP(PHYS_OFFSET)
+
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+#define clear_user_page(page, vaddr, pg)       clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
+
+extern unsigned long shm_align_mask;
+
+struct page;
+struct vm_area_struct;
+void copy_user_highpage(struct page *to, struct page *from,
+             unsigned long vaddr, struct vm_area_struct *vma);
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+typedef struct { unsigned long pte; } pte_t;
+#define pte_val(x)     ((x).pte)
+#define __pte(x)       ((pte_t) { (x) })
+typedef struct page *pgtable_t;
+
+typedef struct { unsigned long pgd; } pgd_t;
+#define pgd_val(x)     ((x).pgd)
+#define __pgd(x)       ((pgd_t) { (x) })
+
+/*
+ * Manipulate page protection bits
+ */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x)  ((x).pgprot)
+#define __pgprot(x)    ((pgprot_t) { (x) })
+#define pte_pgprot(x)  __pgprot(pte_val(x) & ~_PFN_MASK)
+
+#define ptep_buddy(x)  ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
+
+/*
+ * __pa()/__va() should be used only during mem init.
+ */
+#define __pa(x)                PHYSADDR(x)
+#define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_FLATMEM
+
+static inline int pfn_valid(unsigned long pfn)
+{
+       /* avoid <linux/mm.h> include hell */
+       extern unsigned long max_mapnr;
+       unsigned long pfn_offset = ARCH_PFN_OFFSET;
+
+       return pfn >= pfn_offset && pfn < max_mapnr;
+}
+
+#endif
+
+#define virt_to_pfn(kaddr)     PFN_DOWN(virt_to_phys((void *)(kaddr)))
+#define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
+
+extern int __virt_addr_valid(volatile void *kaddr);
+#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
+
+#define VM_DATA_DEFAULT_FLAGS \
+       (VM_READ | VM_WRITE | \
+        ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PAGE_H */
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
new file mode 100644 (file)
index 0000000..34f15a6
--- /dev/null
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+#include <asm/cmpxchg.h>
+
+/* Use r21 for fast access */
+register unsigned long __my_cpu_offset __asm__("$r21");
+
+static inline void set_my_cpu_offset(unsigned long off)
+{
+       __my_cpu_offset = off;
+       csr_write64(off, PERCPU_BASE_KS);
+}
+#define __my_cpu_offset __my_cpu_offset
+
+#define PERCPU_OP(op, asm_op, c_op)                                    \
+static inline unsigned long __percpu_##op(void *ptr,                   \
+                       unsigned long val, int size)                    \
+{                                                                      \
+       unsigned long ret;                                              \
+                                                                       \
+       switch (size) {                                                 \
+       case 4:                                                         \
+               __asm__ __volatile__(                                   \
+               "am"#asm_op".w" " %[ret], %[val], %[ptr]        \n"             \
+               : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr)           \
+               : [val] "r" (val));                                     \
+               break;                                                  \
+       case 8:                                                         \
+               __asm__ __volatile__(                                   \
+               "am"#asm_op".d" " %[ret], %[val], %[ptr]        \n"             \
+               : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr)           \
+               : [val] "r" (val));                                     \
+               break;                                                  \
+       default:                                                        \
+               ret = 0;                                                \
+               BUILD_BUG();                                            \
+       }                                                               \
+                                                                       \
+       return ret c_op val;                                            \
+}
+
+PERCPU_OP(add, add, +)
+PERCPU_OP(and, and, &)
+PERCPU_OP(or, or, |)
+#undef PERCPU_OP
+
+static inline unsigned long __percpu_read(void *ptr, int size)
+{
+       unsigned long ret;
+
+       switch (size) {
+       case 1:
+               __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr]       \n"
+               : [ret] "=&r"(ret)
+               : [ptr] "r"(ptr)
+               : "memory");
+               break;
+       case 2:
+               __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr]       \n"
+               : [ret] "=&r"(ret)
+               : [ptr] "r"(ptr)
+               : "memory");
+               break;
+       case 4:
+               __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr]       \n"
+               : [ret] "=&r"(ret)
+               : [ptr] "r"(ptr)
+               : "memory");
+               break;
+       case 8:
+               __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr]       \n"
+               : [ret] "=&r"(ret)
+               : [ptr] "r"(ptr)
+               : "memory");
+               break;
+       default:
+               ret = 0;
+               BUILD_BUG();
+       }
+
+       return ret;
+}
+
+static inline void __percpu_write(void *ptr, unsigned long val, int size)
+{
+       switch (size) {
+       case 1:
+               __asm__ __volatile__("stx.b %[val], $r21, %[ptr]        \n"
+               :
+               : [val] "r" (val), [ptr] "r" (ptr)
+               : "memory");
+               break;
+       case 2:
+               __asm__ __volatile__("stx.h %[val], $r21, %[ptr]        \n"
+               :
+               : [val] "r" (val), [ptr] "r" (ptr)
+               : "memory");
+               break;
+       case 4:
+               __asm__ __volatile__("stx.w %[val], $r21, %[ptr]        \n"
+               :
+               : [val] "r" (val), [ptr] "r" (ptr)
+               : "memory");
+               break;
+       case 8:
+               __asm__ __volatile__("stx.d %[val], $r21, %[ptr]        \n"
+               :
+               : [val] "r" (val), [ptr] "r" (ptr)
+               : "memory");
+               break;
+       default:
+               BUILD_BUG();
+       }
+}
+
+static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+                                               int size)
+{
+       switch (size) {
+       case 4:
+               return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
+
+       case 8:
+               return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
+
+       default:
+               BUILD_BUG();
+       }
+
+       return 0;
+}
+
+/* this_cpu_cmpxchg */
+#define _protect_cmpxchg_local(pcp, o, n)                      \
+({                                                             \
+       typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
+       preempt_disable_notrace();                              \
+       __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);       \
+       preempt_enable_notrace();                               \
+       __ret;                                                  \
+})
+
+#define _percpu_read(pcp)                                              \
+({                                                                     \
+       typeof(pcp) __retval;                                           \
+       __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp));     \
+       __retval;                                                       \
+})
+
+#define _percpu_write(pcp, val)                                                \
+do {                                                                   \
+       __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp));      \
+} while (0)                                                            \
+
+#define _pcp_protect(operation, pcp, val)                      \
+({                                                             \
+       typeof(pcp) __retval;                                   \
+       preempt_disable_notrace();                              \
+       __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),  \
+                                         (val), sizeof(pcp));  \
+       preempt_enable_notrace();                               \
+       __retval;                                               \
+})
+
+#define _percpu_add(pcp, val) \
+       _pcp_protect(__percpu_add, pcp, val)
+
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
+
+#define _percpu_and(pcp, val) \
+       _pcp_protect(__percpu_and, pcp, val)
+
+#define _percpu_or(pcp, val) \
+       _pcp_protect(__percpu_or, pcp, val)
+
+#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
+       _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
+
+#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
+
+#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
+
+#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
+
+#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+
+#define this_cpu_read_1(pcp) _percpu_read(pcp)
+#define this_cpu_read_2(pcp) _percpu_read(pcp)
+#define this_cpu_read_4(pcp) _percpu_read(pcp)
+#define this_cpu_read_8(pcp) _percpu_read(pcp)
+
+#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+
+#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
new file mode 100644 (file)
index 0000000..dcb3b17
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LOONGARCH_PERF_EVENT_H__
+#define __LOONGARCH_PERF_EVENT_H__
+/* Nothing to show here; the file is required by linux/perf_event.h. */
+#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
new file mode 100644 (file)
index 0000000..b0a57b2
--- /dev/null
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define __HAVE_ARCH_PMD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+                                      pmd_t *pmd, pte_t *pte)
+{
+       set_pmd(pmd, __pmd((unsigned long)pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
+{
+       set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       set_pud(pud, __pud((unsigned long)pmd));
+}
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+       set_p4d(p4d, __p4d((unsigned long)pud));
+}
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+extern void pagetable_init(void);
+
+/*
+ * Initialize a new pmd table with invalid pointers.
+ */
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+#define __pte_free_tlb(tlb, pte, address)                      \
+do {                                                   \
+       pgtable_pte_page_dtor(pte);                     \
+       tlb_remove_page((tlb), pte);                    \
+} while (0)
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       pmd_t *pmd;
+       struct page *pg;
+
+       pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+       if (!pg)
+               return NULL;
+
+       if (!pgtable_pmd_page_ctor(pg)) {
+               __free_pages(pg, PMD_ORDER);
+               return NULL;
+       }
+
+       pmd = (pmd_t *)page_address(pg);
+       pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+       return pmd;
+}
+
+#define __pmd_free_tlb(tlb, x, addr)   pmd_free((tlb)->mm, x)
+
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       pud_t *pud;
+
+       pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+       if (pud)
+               pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
+       return pud;
+}
+
+#define __pud_free_tlb(tlb, x, addr)   pud_free((tlb)->mm, x)
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+#endif /* _ASM_PGALLOC_H */
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
new file mode 100644 (file)
index 0000000..3badd11
--- /dev/null
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PGTABLE_BITS_H
+#define _ASM_PGTABLE_BITS_H
+
+/* Page table bits */
+#define        _PAGE_VALID_SHIFT       0
+#define        _PAGE_ACCESSED_SHIFT    0  /* Reuse Valid for Accessed */
+#define        _PAGE_DIRTY_SHIFT       1
+#define        _PAGE_PLV_SHIFT         2  /* 2~3, two bits */
+#define        _CACHE_SHIFT            4  /* 4~5, two bits */
+#define        _PAGE_GLOBAL_SHIFT      6
+#define        _PAGE_HUGE_SHIFT        6  /* HUGE is a PMD bit */
+#define        _PAGE_PRESENT_SHIFT     7
+#define        _PAGE_WRITE_SHIFT       8
+#define        _PAGE_MODIFIED_SHIFT    9
+#define        _PAGE_PROTNONE_SHIFT    10
+#define        _PAGE_SPECIAL_SHIFT     11
+#define        _PAGE_HGLOBAL_SHIFT     12 /* HGlobal is a PMD bit */
+#define        _PAGE_PFN_SHIFT         12
+#define        _PAGE_PFN_END_SHIFT     48
+#define        _PAGE_NO_READ_SHIFT     61
+#define        _PAGE_NO_EXEC_SHIFT     62
+#define        _PAGE_RPLV_SHIFT        63
+
+/* Used by software */
+#define _PAGE_PRESENT          (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
+#define _PAGE_WRITE            (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED         (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED         (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
+#define _PAGE_PROTNONE         (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
+#define _PAGE_SPECIAL          (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+
+/* Used by TLB hardware (placed in EntryLo*) */
+#define _PAGE_VALID            (_ULCAST_(1) << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY            (_ULCAST_(1) << _PAGE_DIRTY_SHIFT)
+#define _PAGE_PLV              (_ULCAST_(3) << _PAGE_PLV_SHIFT)
+#define _PAGE_GLOBAL           (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_HUGE             (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
+#define _PAGE_HGLOBAL          (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
+#define _PAGE_NO_READ          (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
+#define _PAGE_NO_EXEC          (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
+#define _PAGE_RPLV             (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
+#define _CACHE_MASK            (_ULCAST_(3) << _CACHE_SHIFT)
+#define _PFN_SHIFT             (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
+
+#define _PAGE_USER     (PLV_USER << _PAGE_PLV_SHIFT)
+#define _PAGE_KERN     (PLV_KERN << _PAGE_PLV_SHIFT)
+
+#define _PFN_MASK (~((_ULCAST_(1) << (_PFN_SHIFT)) - 1) & \
+                 ((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1))
+
+/*
+ * Cache attributes
+ */
+#ifndef _CACHE_SUC
+#define _CACHE_SUC                     (0<<_CACHE_SHIFT) /* Strong-ordered UnCached */
+#endif
+#ifndef _CACHE_CC
+#define _CACHE_CC                      (1<<_CACHE_SHIFT) /* Coherent Cached */
+#endif
+#ifndef _CACHE_WUC
+#define _CACHE_WUC                     (2<<_CACHE_SHIFT) /* Weak-ordered UnCached */
+#endif
+
+#define __READABLE     (_PAGE_VALID)
+#define __WRITEABLE    (_PAGE_DIRTY | _PAGE_WRITE)
+
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK        (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+
+#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
+                                _PAGE_USER | _CACHE_CC)
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+                                _PAGE_USER | _CACHE_CC)
+#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _CACHE_CC)
+
+#define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+                                _PAGE_GLOBAL | _PAGE_KERN | _CACHE_CC)
+#define PAGE_KERNEL_SUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+                                _PAGE_GLOBAL | _PAGE_KERN |  _CACHE_SUC)
+#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+                                _PAGE_GLOBAL | _PAGE_KERN |  _CACHE_WUC)
+
+#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+
+#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
+#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
+#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
+#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
+#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
+
+#ifndef __ASSEMBLY__
+
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+       unsigned long prot = pgprot_val(_prot);
+
+       prot = (prot & ~_CACHE_MASK) | _CACHE_SUC;
+
+       return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+       unsigned long prot = pgprot_val(_prot);
+
+       prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
+
+       return __pgprot(prot);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
new file mode 100644 (file)
index 0000000..5dc84d8
--- /dev/null
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PGTABLE_H
+#define _ASM_PGTABLE_H
+
+#include <linux/compiler.h>
+#include <asm/addrspace.h>
+#include <asm/pgtable-bits.h>
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#elif CONFIG_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
+#endif
+
+#define PGD_ORDER              0
+#define PUD_ORDER              0
+#define PMD_ORDER              0
+#define PTE_ORDER              0
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#define PGDIR_SHIFT    (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#elif CONFIG_PGTABLE_LEVELS == 3
+#define PMD_SHIFT      (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+#define PGDIR_SHIFT    (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#elif CONFIG_PGTABLE_LEVELS == 4
+#define PMD_SHIFT      (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+#define PUD_SHIFT      (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SIZE       (1UL << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE-1))
+#define PGDIR_SHIFT    (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#endif
+
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#define VA_BITS                (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3))
+
+#define PTRS_PER_PGD   ((PAGE_SIZE << PGD_ORDER) >> 3)
+#if CONFIG_PGTABLE_LEVELS > 3
+#define PTRS_PER_PUD   ((PAGE_SIZE << PUD_ORDER) >> 3)
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+#define PTRS_PER_PMD   ((PAGE_SIZE << PMD_ORDER) >> 3)
+#endif
+#define PTRS_PER_PTE   ((PAGE_SIZE << PTE_ORDER) >> 3)
+
+#define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/mm_types.h>
+#include <linux/mmzone.h>
+#include <asm/fixmap.h>
+#include <asm/io.h>
+
+struct mm_struct;
+struct vm_area_struct;
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero; used
+ * for zero-mapped memory areas etc..
+ */
+
+extern unsigned long empty_zero_page;
+extern unsigned long zero_page_mask;
+
+#define ZERO_PAGE(vaddr) \
+       (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
+#define __HAVE_COLOR_ZERO_PAGE
+
+/*
+ * TLB refill handlers may also map the vmalloc area into xkvrange.
+ * Avoid the first couple of pages so NULL pointer dereferences will
+ * still reliably trap.
+ */
+#define MODULES_VADDR  (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
+#define MODULES_END    (MODULES_VADDR + SZ_256M)
+
+#define VMALLOC_START  MODULES_END
+#define VMALLOC_END    \
+       (vm_map_base +  \
+        min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE)
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_ERROR(e) \
+       pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_ERROR(e) \
+       pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x)     ((x).pud)
+#define __pud(x)       ((pud_t) { (x) })
+
+extern pud_t invalid_pud_table[PTRS_PER_PUD];
+
+/*
+ * Empty pgd/p4d entries point to the invalid_pud_table.
+ */
+static inline int p4d_none(p4d_t p4d)
+{
+       return p4d_val(p4d) == (unsigned long)invalid_pud_table;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+       return p4d_val(p4d) & ~PAGE_MASK;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+       return p4d_val(p4d) != (unsigned long)invalid_pud_table;
+}
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+       p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
+}
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+       return (pud_t *)p4d_val(p4d);
+}
+
+static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
+{
+       *p4d = p4dval;
+}
+
+#define p4d_phys(p4d)          virt_to_phys((void *)p4d_val(p4d))
+#define p4d_page(p4d)          (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
+
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x)     ((x).pmd)
+#define __pmd(x)       ((pmd_t) { (x) })
+
+extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
+
+/*
+ * Empty pud entries point to the invalid_pmd_table.
+ */
+static inline int pud_none(pud_t pud)
+{
+       return pud_val(pud) == (unsigned long)invalid_pmd_table;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+       return pud_val(pud) & ~PAGE_MASK;
+}
+
+static inline int pud_present(pud_t pud)
+{
+       return pud_val(pud) != (unsigned long)invalid_pmd_table;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+       pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+       return (pmd_t *)pud_val(pud);
+}
+
+#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
+
+#define pud_phys(pud)          virt_to_phys((void *)pud_val(pud))
+#define pud_page(pud)          (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
+
+#endif
+
+/*
+ * Empty pmd entries point to the invalid_pte_table.
+ */
+static inline int pmd_none(pmd_t pmd)
+{
+       return pmd_val(pmd) == (unsigned long)invalid_pte_table;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+       return (pmd_val(pmd) & ~PAGE_MASK);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+       if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+               return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
+
+       return pmd_val(pmd) != (unsigned long)invalid_pte_table;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
+}
+
+#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
+
+#define pmd_phys(pmd)          virt_to_phys((void *)pmd_val(pmd))
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_page(pmd)          (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
+
+#define pmd_page_vaddr(pmd)    pmd_val(pmd)
+
+extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
+
+#define pte_page(x)            pfn_to_page(pte_pfn(x))
+#define pte_pfn(x)             ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
+#define pfn_pte(pfn, prot)     __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)     __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern void pud_init(unsigned long page, unsigned long pagetable);
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+/*
+ * Non-present pages:  high 40 bits are offset, next 8 bits type,
+ * low 16 bits zero.
+ */
+static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
+{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
+
+#define __swp_type(x)          (((x).val >> 16) & 0xff)
+#define __swp_offset(x)                ((x).val >> 24)
+#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(x)  ((pmd_t) { (x).val | _PAGE_HUGE })
+
+extern void paging_init(void);
+
+#define pte_none(pte)          (!(pte_val(pte) & ~_PAGE_GLOBAL))
+#define pte_present(pte)       (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_no_exec(pte)       (pte_val(pte) & _PAGE_NO_EXEC)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       *ptep = pteval;
+       if (pte_val(pteval) & _PAGE_GLOBAL) {
+               pte_t *buddy = ptep_buddy(ptep);
+               /*
+                * Make sure the buddy is global too (if it's !none,
+                * it better already be global)
+                */
+#ifdef CONFIG_SMP
+               /*
+                * For SMP, multiple CPUs can race, so we need to do
+                * this atomically.
+                */
+               unsigned long page_global = _PAGE_GLOBAL;
+               unsigned long tmp;
+
+               __asm__ __volatile__ (
+               "1:"    __LL    "%[tmp], %[buddy]               \n"
+               "       bnez    %[tmp], 2f                      \n"
+               "        or     %[tmp], %[tmp], %[global]       \n"
+                       __SC    "%[tmp], %[buddy]               \n"
+               "       beqz    %[tmp], 1b                      \n"
+               "       nop                                     \n"
+               "2:                                             \n"
+               __WEAK_LLSC_MB
+               : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
+               : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
+               if (pte_none(*buddy))
+                       pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
+       }
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep, pte_t pteval)
+{
+       set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+       /* Preserve global status for the pair */
+       if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
+               set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
+       else
+               set_pte_at(mm, addr, ptep, __pte(0));
+}
+
+#define PGD_T_LOG2     (__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PMD_T_LOG2     (__builtin_ffs(sizeof(pmd_t)) - 1)
+#define PTE_T_LOG2     (__builtin_ffs(sizeof(pte_t)) - 1)
+
+extern pgd_t swapper_pg_dir[];
+extern pgd_t invalid_pg_dir[];
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       pte_val(pte) &= ~_PAGE_ACCESSED;
+       return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       pte_val(pte) |= _PAGE_ACCESSED;
+       return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
+       return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+       return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY);
+       return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+       return pte;
+}
+
+static inline int pte_huge(pte_t pte)  { return pte_val(pte) & _PAGE_HUGE; }
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       pte_val(pte) |= _PAGE_HUGE;
+       return pte;
+}
+
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+static inline int pte_special(pte_t pte)       { return pte_val(pte) & _PAGE_SPECIAL; }
+static inline pte_t pte_mkspecial(pte_t pte)   { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
+
+#define pte_accessible pte_accessible
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
+{
+       if (pte_val(a) & _PAGE_PRESENT)
+               return true;
+
+       if ((pte_val(a) & _PAGE_PROTNONE) &&
+                       atomic_read(&mm->tlb_flush_pending))
+               return true;
+
+       return false;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+                    (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
+}
+
+extern void __update_tlb(struct vm_area_struct *vma,
+                       unsigned long address, pte_t *ptep);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+                       unsigned long address, pte_t *ptep)
+{
+       __update_tlb(vma, address, ptep);
+}
+
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+                       unsigned long address, pmd_t *pmdp)
+{
+       __update_tlb(vma, address, (pte_t *)pmdp);
+}
+
+#define kern_addr_valid(addr)  (1)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+       pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
+               ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
+       pmd_val(pmd) |= _PAGE_HUGE;
+
+       return pmd;
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_WRITE);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+       pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY);
+       return pmd;
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+       pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+       return pmd;
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_MODIFIED);
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+       pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
+       return pmd;
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+       pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+       return pmd;
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+       pmd_val(pmd) &= ~_PAGE_ACCESSED;
+       return pmd;
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+       pmd_val(pmd) |= _PAGE_ACCESSED;
+       return pmd;
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+       return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+       if (pmd_trans_huge(pmd))
+               return pfn_to_page(pmd_pfn(pmd));
+
+       return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+       pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
+                               (pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
+       return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+       pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
+
+       return pmd;
+}
+
+/*
+ * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
+ * different prototype.
+ */
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                           unsigned long address, pmd_t *pmdp)
+{
+       pmd_t old = *pmdp;
+
+       pmd_clear(pmdp);
+
+       return old;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline long pte_protnone(pte_t pte)
+{
+       return (pte_val(pte) & _PAGE_PROTNONE);
+}
+
+static inline long pmd_protnone(pmd_t pmd)
+{
+       return (pmd_val(pmd) & _PAGE_PROTNONE);
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+/*
+ * We provide our own get_unmapped area to cope with the virtual aliasing
+ * constraints placed on us by the cache architecture.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_H */
diff --git a/arch/loongarch/include/asm/prefetch.h b/arch/loongarch/include/asm/prefetch.h
new file mode 100644 (file)
index 0000000..1672262
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_PREFETCH_H
+#define __ASM_PREFETCH_H
+
+#define Pref_Load      0
+#define Pref_Store     8
+
+#ifdef __ASSEMBLY__
+
+       .macro  __pref hint addr
+#ifdef CONFIG_CPU_HAS_PREFETCH
+       preld   \hint, \addr, 0
+#endif
+       .endm
+
+       .macro  pref_load addr
+       __pref  Pref_Load, \addr
+       .endm
+
+       .macro  pref_store addr
+       __pref  Pref_Store, \addr
+       .endm
+
+#endif
+
+#endif /* __ASM_PREFETCH_H */
diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h
new file mode 100644 (file)
index 0000000..1d63c93
--- /dev/null
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PROCESSOR_H
+#define _ASM_PROCESSOR_H
+
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/sizes.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/loongarch.h>
+#include <asm/vdso/processor.h>
+#include <uapi/asm/ptrace.h>
+#include <uapi/asm/sigcontext.h>
+
+#ifdef CONFIG_32BIT
+
+#define TASK_SIZE      0x80000000UL
+#define TASK_SIZE_MIN  TASK_SIZE
+#define STACK_TOP_MAX  TASK_SIZE
+
+#define TASK_IS_32BIT_ADDR 1
+
+#endif
+
+#ifdef CONFIG_64BIT
+
+#define TASK_SIZE32    0x100000000UL
+#define TASK_SIZE64     (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
+
+#define TASK_SIZE      (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+#define TASK_SIZE_MIN  TASK_SIZE32
+#define STACK_TOP_MAX  TASK_SIZE64
+
+#define TASK_SIZE_OF(tsk)                                              \
+       (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+
+#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
+
+#endif
+
+#define VDSO_RANDOMIZE_SIZE    (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
+
+unsigned long stack_top(void);
+#define STACK_TOP stack_top()
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
+#define FPU_REG_WIDTH          256
+#define FPU_ALIGN              __attribute__((aligned(32)))
+
+union fpureg {
+       __u32   val32[FPU_REG_WIDTH / 32];
+       __u64   val64[FPU_REG_WIDTH / 64];
+};
+
+#define FPR_IDX(width, idx)    (idx)
+
+#define BUILD_FPR_ACCESS(width) \
+static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
+{                                                                      \
+       return fpr->val##width[FPR_IDX(width, idx)];                    \
+}                                                                      \
+                                                                       \
+static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
+                                 u##width val)                         \
+{                                                                      \
+       fpr->val##width[FPR_IDX(width, idx)] = val;                     \
+}
+
+BUILD_FPR_ACCESS(32)
+BUILD_FPR_ACCESS(64)
+
+struct loongarch_fpu {
+       unsigned int    fcsr;
+       unsigned int    vcsr;
+       uint64_t        fcc;    /* 8x8 */
+       union fpureg    fpr[NUM_FPU_REGS];
+};
+
+#define INIT_CPUMASK { \
+       {0,} \
+}
+
+#define ARCH_MIN_TASKALIGN     32
+
+struct loongarch_vdso_info;
+
+/*
+ * If you change thread_struct remember to change the #defines below too!
+ */
+struct thread_struct {
+       /* Main processor registers. */
+       unsigned long reg01, reg03, reg22; /* ra sp fp */
+       unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
+       unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
+
+       /* CSR registers */
+       unsigned long csr_prmd;
+       unsigned long csr_crmd;
+       unsigned long csr_euen;
+       unsigned long csr_ecfg;
+       unsigned long csr_badvaddr;     /* Last user fault */
+
+       /* Scratch registers */
+       unsigned long scr0;
+       unsigned long scr1;
+       unsigned long scr2;
+       unsigned long scr3;
+
+       /* Eflags register */
+       unsigned long eflags;
+
+       /* Other stuff associated with the thread. */
+       unsigned long trap_nr;
+       unsigned long error_code;
+       struct loongarch_vdso_info *vdso;
+
+       /*
+        * FPU & vector registers, must be at last because
+        * they are conditionally copied at fork().
+        */
+       struct loongarch_fpu fpu FPU_ALIGN;
+};
+
+#define INIT_THREAD  {                                         \
+       /*                                                      \
+        * Main processor registers                             \
+        */                                                     \
+       .reg01                  = 0,                            \
+       .reg03                  = 0,                            \
+       .reg22                  = 0,                            \
+       .reg23                  = 0,                            \
+       .reg24                  = 0,                            \
+       .reg25                  = 0,                            \
+       .reg26                  = 0,                            \
+       .reg27                  = 0,                            \
+       .reg28                  = 0,                            \
+       .reg29                  = 0,                            \
+       .reg30                  = 0,                            \
+       .reg31                  = 0,                            \
+       .csr_crmd               = 0,                            \
+       .csr_prmd               = 0,                            \
+       .csr_euen               = 0,                            \
+       .csr_ecfg               = 0,                            \
+       .csr_badvaddr           = 0,                            \
+       /*                                                      \
+        * Other stuff associated with the process              \
+        */                                                     \
+       .trap_nr                = 0,                            \
+       .error_code             = 0,                            \
+       /*                                                      \
+        * FPU & vector registers                               \
+        */                                                     \
+       .fpu                    = {                             \
+               .fcsr           = 0,                            \
+               .vcsr           = 0,                            \
+               .fcc            = 0,                            \
+               .fpr            = {{{0,},},},                   \
+       },                                                      \
+}
+
+struct task_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
+
+extern unsigned long           boot_option_idle_override;
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
+
+static inline void flush_thread(void)
+{
+}
+
+unsigned long __get_wchan(struct task_struct *p);
+
+#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
+                        THREAD_SIZE - 32 - sizeof(struct pt_regs))
+#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
+#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
+#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
+
+#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
+
+#ifdef CONFIG_CPU_HAS_PREFETCH
+
+#define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch((x), 0, 1)
+
+#define ARCH_HAS_PREFETCHW
+#define prefetchw(x) __builtin_prefetch((x), 1, 1)
+
+#endif
+
+#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
new file mode 100644 (file)
index 0000000..17838c6
--- /dev/null
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
+
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * This struct defines the way the registers are stored on the stack during
+ * a system call/exception. If you add a register here, please also add it to
+ * regoffset_table[] in arch/loongarch/kernel/ptrace.c.
+ */
+struct pt_regs {
+       /* Main processor registers. */
+       unsigned long regs[32];
+
+       /* Original syscall arg0. */
+       unsigned long orig_a0;
+
+       /* Special CSR registers. */
+       unsigned long csr_era;
+       unsigned long csr_badvaddr;
+       unsigned long csr_crmd;
+       unsigned long csr_prmd;
+       unsigned long csr_euen;
+       unsigned long csr_ecfg;
+       unsigned long csr_estat;
+       unsigned long __last[0];
+} __aligned(8);
+
+static inline int regs_irqs_disabled(struct pt_regs *regs)
+{
+       return arch_irqs_disabled_flags(regs->csr_prmd);
+}
+
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+       return regs->regs[3];
+}
+
+/*
+ * Don't use asm-generic/ptrace.h it defines FP accessors that don't make
+ * sense on LoongArch.  We rather want an error if they get invoked.
+ */
+
+static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val)
+{
+       regs->csr_era = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:       pt_regs from which register value is gotten.
+ * @offset:     offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
+{
+       if (unlikely(offset > MAX_REG_OFFSET))
+               return 0;
+
+       return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @addr:       address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+       return ((addr & ~(THREAD_SIZE - 1))  ==
+               (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @n:          stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+       unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+       addr += n;
+       if (regs_within_kernel_stack(regs, (unsigned long)addr))
+               return *addr;
+       else
+               return 0;
+}
+
+struct task_struct;
+
+/*
+ * Does the process account for user or for system time?
+ */
+#define user_mode(regs) (((regs)->csr_prmd & PLV_MASK) == PLV_USER)
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->regs[4];
+}
+
+#define instruction_pointer(regs) ((regs)->csr_era)
+#define profile_pc(regs) instruction_pointer(regs)
+
+extern void die(const char *, struct pt_regs *) __noreturn;
+
+static inline void die_if_kernel(const char *str, struct pt_regs *regs)
+{
+       if (unlikely(!user_mode(regs)))
+               die(str, regs);
+}
+
+#define current_pt_regs()                                              \
+({                                                                     \
+       unsigned long sp = (unsigned long)__builtin_frame_address(0);   \
+       (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1;      \
+})
+
+/* Helpers for working with the user stack pointer */
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+       return regs->regs[3];
+}
+
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+       unsigned long val)
+{
+       regs->regs[3] = val;
+}
+
+#endif /* _ASM_PTRACE_H */
diff --git a/arch/loongarch/include/asm/reboot.h b/arch/loongarch/include/asm/reboot.h
new file mode 100644 (file)
index 0000000..5115174
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_REBOOT_H
+#define _ASM_REBOOT_H
+
+extern void (*pm_restart)(void);
+
+#endif /* _ASM_REBOOT_H */
diff --git a/arch/loongarch/include/asm/regdef.h b/arch/loongarch/include/asm/regdef.h
new file mode 100644 (file)
index 0000000..49a374c
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_REGDEF_H
+#define _ASM_REGDEF_H
+
+#define zero   $r0     /* wired zero */
+#define ra     $r1     /* return address */
+#define tp     $r2
+#define sp     $r3     /* stack pointer */
+#define a0     $r4     /* argument registers, a0/a1 reused as v0/v1 for return value */
+#define a1     $r5
+#define a2     $r6
+#define a3     $r7
+#define a4     $r8
+#define a5     $r9
+#define a6     $r10
+#define a7     $r11
+#define t0     $r12    /* caller saved */
+#define t1     $r13
+#define t2     $r14
+#define t3     $r15
+#define t4     $r16
+#define t5     $r17
+#define t6     $r18
+#define t7     $r19
+#define t8     $r20
+#define u0     $r21
+#define fp     $r22    /* frame pointer */
+#define s0     $r23    /* callee saved */
+#define s1     $r24
+#define s2     $r25
+#define s3     $r26
+#define s4     $r27
+#define s5     $r28
+#define s6     $r29
+#define s7     $r30
+#define s8     $r31
+
+#endif /* _ASM_REGDEF_H */
diff --git a/arch/loongarch/include/asm/seccomp.h b/arch/loongarch/include/asm/seccomp.h
new file mode 100644 (file)
index 0000000..31d6ab4
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#include <asm-generic/seccomp.h>
+
+#ifdef CONFIG_32BIT
+# define SECCOMP_ARCH_NATIVE           AUDIT_ARCH_LOONGARCH32
+# define SECCOMP_ARCH_NATIVE_NR                NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME      "loongarch32"
+#else
+# define SECCOMP_ARCH_NATIVE           AUDIT_ARCH_LOONGARCH64
+# define SECCOMP_ARCH_NATIVE_NR                NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME      "loongarch64"
+#endif
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/loongarch/include/asm/serial.h b/arch/loongarch/include/asm/serial.h
new file mode 100644 (file)
index 0000000..3fb550e
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM__SERIAL_H
+#define __ASM__SERIAL_H
+
+#define BASE_BAUD 0
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+
+#endif /* __ASM__SERIAL_H */
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
new file mode 100644 (file)
index 0000000..6d7d2a3
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _LOONGARCH_SETUP_H
+#define _LOONGARCH_SETUP_H
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define VECSIZE 0x200
+
+extern unsigned long eentry;
+extern unsigned long tlbrentry;
+extern void cpu_cache_init(void);
+extern void per_cpu_trap_init(int cpu);
+extern void set_handler(unsigned long offset, void *addr, unsigned long len);
+extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len);
+
+#endif /* __SETUP_H */
diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h
new file mode 100644 (file)
index 0000000..c9554f4
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_SHMPARAM_H
+#define _ASM_SHMPARAM_H
+
+#define __ARCH_FORCE_SHMLBA    1
+
+#define        SHMLBA  SZ_64K           /* attach addr a multiple of this */
+
+#endif /* _ASM_SHMPARAM_H */
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
new file mode 100644 (file)
index 0000000..551e1f3
--- /dev/null
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+
+void loongson3_smp_setup(void);
+void loongson3_prepare_cpus(unsigned int max_cpus);
+void loongson3_boot_secondary(int cpu, struct task_struct *idle);
+void loongson3_init_secondary(void);
+void loongson3_smp_finish(void);
+void loongson3_send_ipi_single(int cpu, unsigned int action);
+void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+#ifdef CONFIG_HOTPLUG_CPU
+int loongson3_cpu_disable(void);
+void loongson3_cpu_die(unsigned int cpu);
+#endif
+
+#ifdef CONFIG_SMP
+
+static inline void plat_smp_setup(void)
+{
+       loongson3_smp_setup();
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void plat_smp_setup(void) { }
+
+#endif /* !CONFIG_SMP */
+
+extern int smp_num_siblings;
+extern int num_processors;
+extern int disabled_cpus;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map[];
+
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+       extern int vdso_smp_processor_id(void)
+               __compiletime_error("VDSO should not call smp_processor_id()");
+       return vdso_smp_processor_id();
+#else
+       return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
+
+/* Map from cpu id to sequential logical cpu number.  This will only
+ * not be idempotent when cpus failed to come on-line. */
+extern int __cpu_number_map[NR_CPUS];
+#define cpu_number_map(cpu)  __cpu_number_map[cpu]
+
+/* The reverse map from sequential logical cpu number to cpu id.  */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
+
+#define cpu_physical_id(cpu)   cpu_logical_map(cpu)
+
+#define SMP_BOOT_CPU           0x1
+#define SMP_RESCHEDULE         0x2
+#define SMP_CALL_FUNCTION      0x4
+
+struct secondary_data {
+       unsigned long stack;
+       unsigned long thread_info;
+};
+extern struct secondary_data cpuboot_data;
+
+extern asmlinkage void smpboot_entry(void);
+
+extern void calculate_cpu_foreign_map(void);
+
+/*
+ * Generate IPI list text
+ */
+extern void show_ipi_list(struct seq_file *p, int prec);
+
+/*
+ * This function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+static inline void smp_send_reschedule(int cpu)
+{
+       loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
+}
+
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+       loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline int __cpu_disable(void)
+{
+       return loongson3_cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+       loongson3_cpu_die(cpu);
+}
+
+extern void play_dead(void);
+#endif
+
+#endif /* __ASM_SMP_H */
diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h
new file mode 100644 (file)
index 0000000..3d18cdf
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LOONGARCH_SPARSEMEM_H
+#define _LOONGARCH_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SIZE_BITS           2^N: how big each section will be
+ * MAX_PHYSMEM_BITS            2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS      29 /* 2^29 = Largest Huge Page Size */
+#define MAX_PHYSMEM_BITS       48
+
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int memory_add_physaddr_to_nid(u64 addr);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif
+
+#define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS)
+
+#endif /* _LOONGARCH_SPARSEMEM_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
new file mode 100644 (file)
index 0000000..4ca9530
--- /dev/null
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STACKFRAME_H
+#define _ASM_STACKFRAME_H
+
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/loongarch.h>
+#include <asm/thread_info.h>
+
+/* Make the addition of cfi info a little easier. */
+       .macro cfi_rel_offset reg offset=0 docfi=0
+       .if \docfi
+       .cfi_rel_offset \reg, \offset
+       .endif
+       .endm
+
+       .macro cfi_st reg offset=0 docfi=0
+       cfi_rel_offset \reg, \offset, \docfi
+       LONG_S  \reg, sp, \offset
+       .endm
+
+       .macro cfi_restore reg offset=0 docfi=0
+       .if \docfi
+       .cfi_restore \reg
+       .endif
+       .endm
+
+       .macro cfi_ld reg offset=0 docfi=0
+       LONG_L  \reg, sp, \offset
+       cfi_restore \reg \offset \docfi
+       .endm
+
+       .macro BACKUP_T0T1
+       csrwr   t0, EXCEPTION_KS0
+       csrwr   t1, EXCEPTION_KS1
+       .endm
+
+       .macro RELOAD_T0T1
+       csrrd   t0, EXCEPTION_KS0
+       csrrd   t1, EXCEPTION_KS1
+       .endm
+
+       .macro  SAVE_TEMP docfi=0
+       RELOAD_T0T1
+       cfi_st  t0, PT_R12, \docfi
+       cfi_st  t1, PT_R13, \docfi
+       cfi_st  t2, PT_R14, \docfi
+       cfi_st  t3, PT_R15, \docfi
+       cfi_st  t4, PT_R16, \docfi
+       cfi_st  t5, PT_R17, \docfi
+       cfi_st  t6, PT_R18, \docfi
+       cfi_st  t7, PT_R19, \docfi
+       cfi_st  t8, PT_R20, \docfi
+       .endm
+
+       .macro  SAVE_STATIC docfi=0
+       cfi_st  s0, PT_R23, \docfi
+       cfi_st  s1, PT_R24, \docfi
+       cfi_st  s2, PT_R25, \docfi
+       cfi_st  s3, PT_R26, \docfi
+       cfi_st  s4, PT_R27, \docfi
+       cfi_st  s5, PT_R28, \docfi
+       cfi_st  s6, PT_R29, \docfi
+       cfi_st  s7, PT_R30, \docfi
+       cfi_st  s8, PT_R31, \docfi
+       .endm
+
+/*
+ * get_saved_sp returns the SP for the current CPU by looking in the
+ * kernelsp array for it. It stores the current sp in t0 and loads the
+ * new value in sp.
+ */
+       .macro  get_saved_sp docfi=0
+       la.abs    t1, kernelsp
+#ifdef CONFIG_SMP
+       csrrd     t0, PERCPU_BASE_KS
+       LONG_ADD  t1, t1, t0
+#endif
+       move      t0, sp
+       .if \docfi
+       .cfi_register sp, t0
+       .endif
+       LONG_L    sp, t1, 0
+       .endm
+
+       .macro  set_saved_sp stackp temp temp2
+       la.abs    \temp, kernelsp
+#ifdef CONFIG_SMP
+       LONG_ADD  \temp, \temp, u0
+#endif
+       LONG_S    \stackp, \temp, 0
+       .endm
+
+       .macro  SAVE_SOME docfi=0
+       csrrd   t1, LOONGARCH_CSR_PRMD
+       andi    t1, t1, 0x3     /* extract pplv bit */
+       move    t0, sp
+       beqz    t1, 8f
+       /* Called from user mode, new stack. */
+       get_saved_sp docfi=\docfi
+8:
+       PTR_ADDI sp, sp, -PT_SIZE
+       .if \docfi
+       .cfi_def_cfa sp, 0
+       .endif
+       cfi_st  t0, PT_R3, \docfi
+       cfi_rel_offset  sp, PT_R3, \docfi
+       LONG_S  zero, sp, PT_R0
+       csrrd   t0, LOONGARCH_CSR_PRMD
+       LONG_S  t0, sp, PT_PRMD
+       csrrd   t0, LOONGARCH_CSR_CRMD
+       LONG_S  t0, sp, PT_CRMD
+       csrrd   t0, LOONGARCH_CSR_EUEN
+       LONG_S  t0, sp, PT_EUEN
+       csrrd   t0, LOONGARCH_CSR_ECFG
+       LONG_S  t0, sp, PT_ECFG
+       csrrd   t0, LOONGARCH_CSR_ESTAT
+       PTR_S   t0, sp, PT_ESTAT
+       cfi_st  ra, PT_R1, \docfi
+       cfi_st  a0, PT_R4, \docfi
+       cfi_st  a1, PT_R5, \docfi
+       cfi_st  a2, PT_R6, \docfi
+       cfi_st  a3, PT_R7, \docfi
+       cfi_st  a4, PT_R8, \docfi
+       cfi_st  a5, PT_R9, \docfi
+       cfi_st  a6, PT_R10, \docfi
+       cfi_st  a7, PT_R11, \docfi
+       csrrd   ra, LOONGARCH_CSR_ERA
+       LONG_S  ra, sp, PT_ERA
+       .if \docfi
+       .cfi_rel_offset ra, PT_ERA
+       .endif
+       cfi_st  tp, PT_R2, \docfi
+       cfi_st  fp, PT_R22, \docfi
+
+       /* Set thread_info if we're coming from user mode */
+       csrrd   t0, LOONGARCH_CSR_PRMD
+       andi    t0, t0, 0x3     /* extract pplv bit */
+       beqz    t0, 9f
+
+       li.d    tp, ~_THREAD_MASK
+       and     tp, tp, sp
+       cfi_st  u0, PT_R21, \docfi
+       csrrd   u0, PERCPU_BASE_KS
+9:
+       .endm
+
+       .macro  SAVE_ALL docfi=0
+       SAVE_SOME \docfi
+       SAVE_TEMP \docfi
+       SAVE_STATIC \docfi
+       .endm
+
+       .macro  RESTORE_TEMP docfi=0
+       cfi_ld  t0, PT_R12, \docfi
+       cfi_ld  t1, PT_R13, \docfi
+       cfi_ld  t2, PT_R14, \docfi
+       cfi_ld  t3, PT_R15, \docfi
+       cfi_ld  t4, PT_R16, \docfi
+       cfi_ld  t5, PT_R17, \docfi
+       cfi_ld  t6, PT_R18, \docfi
+       cfi_ld  t7, PT_R19, \docfi
+       cfi_ld  t8, PT_R20, \docfi
+       .endm
+
+       .macro  RESTORE_STATIC docfi=0
+       cfi_ld  s0, PT_R23, \docfi
+       cfi_ld  s1, PT_R24, \docfi
+       cfi_ld  s2, PT_R25, \docfi
+       cfi_ld  s3, PT_R26, \docfi
+       cfi_ld  s4, PT_R27, \docfi
+       cfi_ld  s5, PT_R28, \docfi
+       cfi_ld  s6, PT_R29, \docfi
+       cfi_ld  s7, PT_R30, \docfi
+       cfi_ld  s8, PT_R31, \docfi
+       .endm
+
+       .macro  RESTORE_SOME docfi=0
+       LONG_L  a0, sp, PT_PRMD
+       andi    a0, a0, 0x3     /* extract pplv bit */
+       beqz    a0, 8f
+       cfi_ld  u0, PT_R21, \docfi
+8:
+       LONG_L  a0, sp, PT_ERA
+       csrwr   a0, LOONGARCH_CSR_ERA
+       LONG_L  a0, sp, PT_PRMD
+       csrwr   a0, LOONGARCH_CSR_PRMD
+       cfi_ld  ra, PT_R1, \docfi
+       cfi_ld  a0, PT_R4, \docfi
+       cfi_ld  a1, PT_R5, \docfi
+       cfi_ld  a2, PT_R6, \docfi
+       cfi_ld  a3, PT_R7, \docfi
+       cfi_ld  a4, PT_R8, \docfi
+       cfi_ld  a5, PT_R9, \docfi
+       cfi_ld  a6, PT_R10, \docfi
+       cfi_ld  a7, PT_R11, \docfi
+       cfi_ld  tp, PT_R2, \docfi
+       cfi_ld  fp, PT_R22, \docfi
+       .endm
+
+       .macro  RESTORE_SP_AND_RET docfi=0
+       cfi_ld  sp, PT_R3, \docfi
+       ertn
+       .endm
+
+       .macro  RESTORE_ALL_AND_RET docfi=0
+       RESTORE_STATIC \docfi
+       RESTORE_TEMP \docfi
+       RESTORE_SOME \docfi
+       RESTORE_SP_AND_RET \docfi
+       .endm
+
+#endif /* _ASM_STACKFRAME_H */
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
new file mode 100644 (file)
index 0000000..26483e3
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STACKTRACE_H
+#define _ASM_STACKTRACE_H
+
+#include <asm/asm.h>
+#include <asm/ptrace.h>
+#include <asm/loongarch.h>
+#include <linux/stringify.h>
+
+#define STR_LONG_L    __stringify(LONG_L)
+#define STR_LONG_S    __stringify(LONG_S)
+#define STR_LONGSIZE  __stringify(LONGSIZE)
+
+#define STORE_ONE_REG(r) \
+    STR_LONG_S   " $r" __stringify(r)", %1, "STR_LONGSIZE"*"__stringify(r)"\n\t"
+
+#define CSRRD_ONE_REG(reg) \
+    __stringify(csrrd) " %0, "__stringify(reg)"\n\t"
+
+static __always_inline void prepare_frametrace(struct pt_regs *regs)
+{
+       __asm__ __volatile__(
+               /* Save $r1 */
+               STORE_ONE_REG(1)
+               /* Use $r1 to save PC */
+               "pcaddi $r1, 0\n\t"
+               STR_LONG_S " $r1, %0\n\t"
+               /* Restore $r1 */
+               STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"
+               STORE_ONE_REG(2)
+               STORE_ONE_REG(3)
+               STORE_ONE_REG(4)
+               STORE_ONE_REG(5)
+               STORE_ONE_REG(6)
+               STORE_ONE_REG(7)
+               STORE_ONE_REG(8)
+               STORE_ONE_REG(9)
+               STORE_ONE_REG(10)
+               STORE_ONE_REG(11)
+               STORE_ONE_REG(12)
+               STORE_ONE_REG(13)
+               STORE_ONE_REG(14)
+               STORE_ONE_REG(15)
+               STORE_ONE_REG(16)
+               STORE_ONE_REG(17)
+               STORE_ONE_REG(18)
+               STORE_ONE_REG(19)
+               STORE_ONE_REG(20)
+               STORE_ONE_REG(21)
+               STORE_ONE_REG(22)
+               STORE_ONE_REG(23)
+               STORE_ONE_REG(24)
+               STORE_ONE_REG(25)
+               STORE_ONE_REG(26)
+               STORE_ONE_REG(27)
+               STORE_ONE_REG(28)
+               STORE_ONE_REG(29)
+               STORE_ONE_REG(30)
+               STORE_ONE_REG(31)
+               : "=m" (regs->csr_era)
+               : "r" (regs->regs)
+               : "memory");
+       __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_BADV) : "=r" (regs->csr_badvaddr));
+       __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_CRMD) : "=r" (regs->csr_crmd));
+       __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_PRMD) : "=r" (regs->csr_prmd));
+       __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_EUEN) : "=r" (regs->csr_euen));
+       __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ECFG) : "=r" (regs->csr_ecfg));
+       __asm__ __volatile__(CSRRD_ONE_REG(LOONGARCH_CSR_ESTAT) : "=r" (regs->csr_estat));
+}
+
+#endif /* _ASM_STACKTRACE_H */
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
new file mode 100644 (file)
index 0000000..b07e60d
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_STRING_H
+#define _ASM_STRING_H
+
+extern void *memset(void *__s, int __c, size_t __count);
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#endif /* _ASM_STRING_H */
diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h
new file mode 100644 (file)
index 0000000..2a8d043
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_SWITCH_TO_H
+#define _ASM_SWITCH_TO_H
+
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+
+struct task_struct;
+
+/**
+ * __switch_to - switch execution of a task
+ * @prev:      The task previously executed.
+ * @next:      The task to begin executing.
+ * @next_ti:   task_thread_info(next).
+ *
+ * This function is used whilst scheduling to save the context of prev & load
+ * the context of next. Returns prev.
+ */
+extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
+                       struct task_struct *next, struct thread_info *next_ti);
+
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following __switch_to() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before __switch_to().
+ */
+#define switch_to(prev, next, last)                                    \
+do {                                                                   \
+       lose_fpu_inatomic(1, prev);                                     \
+       (last) = __switch_to(prev, next, task_thread_info(next));       \
+} while (0)
+
+#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/loongarch/include/asm/syscall.h b/arch/loongarch/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..e286dc5
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_SYSCALL_H
+#define __ASM_LOONGARCH_SYSCALL_H
+
+#include <linux/compiler.h>
+#include <uapi/linux/audit.h>
+#include <linux/elf-em.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+extern void *sys_call_table[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+                                 struct pt_regs *regs)
+{
+       return regs->regs[11];
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+                                   struct pt_regs *regs)
+{
+        regs->regs[4] = regs->orig_a0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+                                    struct pt_regs *regs)
+{
+       unsigned long error = regs->regs[4];
+
+       return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->regs[4];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+                                           struct pt_regs *regs,
+                                           int error, long val)
+{
+       regs->regs[4] = (long) error ? error : val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned long *args)
+{
+       args[0] = regs->orig_a0;
+       memcpy(&args[1], &regs->regs[5], 5 * sizeof(long));
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+       return AUDIT_ARCH_LOONGARCH64;
+}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+       return false;
+}
+
+#endif /* __ASM_LOONGARCH_SYSCALL_H */
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
new file mode 100644 (file)
index 0000000..99beb11
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * thread_info.h: LoongArch low-level thread information
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ *   must also be changed
+ */
+struct thread_info {
+       struct task_struct      *task;          /* main task structure */
+       unsigned long           flags;          /* low level flags */
+       unsigned long           tp_value;       /* thread pointer */
+       __u32                   cpu;            /* current CPU */
+       int                     preempt_count;  /* 0 => preemptible, <0 => BUG */
+       struct pt_regs          *regs;
+       unsigned long           syscall;        /* syscall number */
+       unsigned long           syscall_work;   /* SYSCALL_WORK_ flags */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk)                  \
+{                                              \
+       .task           = &tsk,                 \
+       .flags          = 0,                    \
+       .cpu            = 0,                    \
+       .preempt_count  = INIT_PREEMPT_COUNT,   \
+}
+
+/* How to get the thread information struct from C. */
+register struct thread_info *__current_thread_info __asm__("$r2");
+
+static inline struct thread_info *current_thread_info(void)
+{
+       return __current_thread_info;
+}
+
+register unsigned long current_stack_pointer __asm__("$r3");
+
+#endif /* !__ASSEMBLY__ */
+
+/* thread information allocation */
+#define THREAD_SIZE            SZ_16K
+#define THREAD_MASK            (THREAD_SIZE - 1UL)
+#define THREAD_SIZE_ORDER      ilog2(THREAD_SIZE / PAGE_SIZE)
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ *   access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SIGPENDING         1       /* signal pending */
+#define TIF_NEED_RESCHED       2       /* rescheduling necessary */
+#define TIF_NOTIFY_RESUME      3       /* callback before returning to user */
+#define TIF_NOTIFY_SIGNAL      4       /* signal notifications exist */
+#define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
+#define TIF_NOHZ               6       /* in adaptive nohz mode */
+#define TIF_UPROBE             7       /* breakpointed or singlestepping */
+#define TIF_USEDFPU            8       /* FPU was used by this task this quantum (SMP) */
+#define TIF_USEDSIMD           9       /* SIMD has been used this quantum */
+#define TIF_MEMDIE             10      /* is terminating due to OOM killer */
+#define TIF_FIXADE             11      /* Fix address errors in software */
+#define TIF_LOGADE             12      /* Log address errors to syslog */
+#define TIF_32BIT_REGS         13      /* 32-bit general purpose registers */
+#define TIF_32BIT_ADDR         14      /* 32-bit address space */
+#define TIF_LOAD_WATCH         15      /* If set, load watch registers */
+#define TIF_SINGLESTEP         16      /* Single Step */
+#define TIF_LSX_CTX_LIVE       17      /* LSX context must be preserved */
+#define TIF_LASX_CTX_LIVE      18      /* LASX context must be preserved */
+
+#define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
+#define _TIF_NOTIFY_SIGNAL     (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_NOHZ              (1<<TIF_NOHZ)
+#define _TIF_UPROBE            (1<<TIF_UPROBE)
+#define _TIF_USEDFPU           (1<<TIF_USEDFPU)
+#define _TIF_USEDSIMD          (1<<TIF_USEDSIMD)
+#define _TIF_FIXADE            (1<<TIF_FIXADE)
+#define _TIF_LOGADE            (1<<TIF_LOGADE)
+#define _TIF_32BIT_REGS                (1<<TIF_32BIT_REGS)
+#define _TIF_32BIT_ADDR                (1<<TIF_32BIT_ADDR)
+#define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
+#define _TIF_SINGLESTEP                (1<<TIF_SINGLESTEP)
+#define _TIF_LSX_CTX_LIVE      (1<<TIF_LSX_CTX_LIVE)
+#define _TIF_LASX_CTX_LIVE     (1<<TIF_LASX_CTX_LIVE)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/loongarch/include/asm/time.h b/arch/loongarch/include/asm/time.h
new file mode 100644 (file)
index 0000000..2eae219
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TIME_H
+#define _ASM_TIME_H
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <asm/loongarch.h>
+
+extern u64 cpu_clock_freq;
+extern u64 const_clock_freq;
+
+extern void sync_counter(void);
+
+static inline unsigned int calc_const_freq(void)
+{
+       unsigned int res;
+       unsigned int base_freq;
+       unsigned int cfm, cfd;
+
+       res = read_cpucfg(LOONGARCH_CPUCFG2);
+       if (!(res & CPUCFG2_LLFTP))
+               return 0;
+
+       base_freq = read_cpucfg(LOONGARCH_CPUCFG4);
+       res = read_cpucfg(LOONGARCH_CPUCFG5);
+       cfm = res & 0xffff;
+       cfd = (res >> 16) & 0xffff;
+
+       if (!base_freq || !cfm || !cfd)
+               return 0;
+
+       return (base_freq * cfm / cfd);
+}
+
+/*
+ * Initialize the calling CPU's timer interrupt as clockevent device
+ */
+extern int constant_clockevent_init(void);
+extern int constant_clocksource_init(void);
+
+static inline void clockevent_set_clock(struct clock_event_device *cd,
+                                       unsigned int clock)
+{
+       clockevents_calc_mult_shift(cd, clock, 4);
+}
+
+#endif /* _ASM_TIME_H */
diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h
new file mode 100644 (file)
index 0000000..d3ed99a
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TIMEX_H
+#define _ASM_TIMEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+
+/*
+ * Standard way to access the cycle counter.
+ * Currently only used on SMP for scheduling.
+ *
+ * We know that all SMP capable CPUs have cycle counters.
+ */
+
+typedef unsigned long cycles_t;
+
+#define get_cycles get_cycles
+
+static inline cycles_t get_cycles(void)
+{
+       return drdtime();
+}
+
+#endif /* __KERNEL__ */
+
+#endif /*  _ASM_TIMEX_H */
diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
new file mode 100644 (file)
index 0000000..4f629ae
--- /dev/null
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TLB_H
+#define __ASM_TLB_H
+
+#include <linux/mm_types.h>
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+
+/*
+ * TLB Invalidate Flush
+ */
+static inline void tlbclr(void)
+{
+       __asm__ __volatile__("tlbclr");
+}
+
+static inline void tlbflush(void)
+{
+       __asm__ __volatile__("tlbflush");
+}
+
+/*
+ * TLB R/W operations.
+ */
+static inline void tlb_probe(void)
+{
+       __asm__ __volatile__("tlbsrch");
+}
+
+static inline void tlb_read(void)
+{
+       __asm__ __volatile__("tlbrd");
+}
+
+static inline void tlb_write_indexed(void)
+{
+       __asm__ __volatile__("tlbwr");
+}
+
+static inline void tlb_write_random(void)
+{
+       __asm__ __volatile__("tlbfill");
+}
+
+enum invtlb_ops {
+       /* Invalid all tlb */
+       INVTLB_ALL = 0x0,
+       /* Invalid current tlb */
+       INVTLB_CURRENT_ALL = 0x1,
+       /* Invalid all global=1 lines in current tlb */
+       INVTLB_CURRENT_GTRUE = 0x2,
+       /* Invalid all global=0 lines in current tlb */
+       INVTLB_CURRENT_GFALSE = 0x3,
+       /* Invalid global=0 and matched asid lines in current tlb */
+       INVTLB_GFALSE_AND_ASID = 0x4,
+       /* Invalid addr with global=0 and matched asid in current tlb */
+       INVTLB_ADDR_GFALSE_AND_ASID = 0x5,
+       /* Invalid addr with global=1 or matched asid in current tlb */
+       INVTLB_ADDR_GTRUE_OR_ASID = 0x6,
+       /* Invalid matched gid in guest tlb */
+       INVGTLB_GID = 0x9,
+       /* Invalid global=1, matched gid in guest tlb */
+       INVGTLB_GID_GTRUE = 0xa,
+       /* Invalid global=0, matched gid in guest tlb */
+       INVGTLB_GID_GFALSE = 0xb,
+       /* Invalid global=0, matched gid and asid in guest tlb */
+       INVGTLB_GID_GFALSE_ASID = 0xc,
+       /* Invalid global=0 , matched gid, asid and addr in guest tlb */
+       INVGTLB_GID_GFALSE_ASID_ADDR = 0xd,
+       /* Invalid global=1 , matched gid, asid and addr in guest tlb */
+       INVGTLB_GID_GTRUE_ASID_ADDR = 0xe,
+       /* Invalid all gid gva-->gpa guest tlb */
+       INVGTLB_ALLGID_GVA_TO_GPA = 0x10,
+       /* Invalid all gid gpa-->hpa tlb */
+       INVTLB_ALLGID_GPA_TO_HPA = 0x11,
+       /* Invalid all gid tlb, including  gva-->gpa and gpa-->hpa */
+       INVTLB_ALLGID = 0x12,
+       /* Invalid matched gid gva-->gpa guest tlb */
+       INVGTLB_GID_GVA_TO_GPA = 0x13,
+       /* Invalid matched gid gpa-->hpa tlb */
+       INVTLB_GID_GPA_TO_HPA = 0x14,
+       /* Invalid matched gid tlb,including gva-->gpa and gpa-->hpa */
+       INVTLB_GID_ALL = 0x15,
+       /* Invalid matched gid and addr gpa-->hpa tlb */
+       INVTLB_GID_ADDR = 0x16,
+};
+
+/*
+ * invtlb op info addr
+ * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
+ * (addr << 10) | (info << 5) | op
+ */
+static inline void invtlb(u32 op, u32 info, u64 addr)
+{
+       __asm__ __volatile__(
+               "parse_r addr,%0\n\t"
+               "parse_r info,%1\n\t"
+               ".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
+               :
+               : "r"(addr), "r"(info), "i"(op)
+               :
+               );
+}
+
+static inline void invtlb_addr(u32 op, u32 info, u64 addr)
+{
+       __asm__ __volatile__(
+               "parse_r addr,%0\n\t"
+               ".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
+               :
+               : "r"(addr), "i"(op)
+               :
+               );
+}
+
+static inline void invtlb_info(u32 op, u32 info, u64 addr)
+{
+       __asm__ __volatile__(
+               "parse_r info,%0\n\t"
+               ".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
+               :
+               : "r"(info), "i"(op)
+               :
+               );
+}
+
+static inline void invtlb_all(u32 op, u32 info, u64 addr)
+{
+       __asm__ __volatile__(
+               ".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
+               :
+               : "i"(op)
+               :
+               );
+}
+
+/*
+ * LoongArch doesn't need any special per-pte or per-vma handling, except
+ * we need to flush cache for area to be unmapped.
+ */
+#define tlb_start_vma(tlb, vma)                                        \
+       do {                                                    \
+               if (!(tlb)->fullmm)                             \
+                       flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+       }  while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+static void tlb_flush(struct mmu_gather *tlb);
+
+#define tlb_flush tlb_flush
+#include <asm-generic/tlb.h>
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+       struct vm_area_struct vma;
+
+       vma.vm_mm = tlb->mm;
+       vma.vm_flags = 0;
+       if (tlb->fullmm) {
+               flush_tlb_mm(tlb->mm);
+               return;
+       }
+
+       flush_tlb_range(&vma, tlb->start, tlb->end);
+}
+
+extern void handle_tlb_load(void);
+extern void handle_tlb_store(void);
+extern void handle_tlb_modify(void);
+extern void handle_tlb_refill(void);
+extern void handle_tlb_protect(void);
+
+extern void dump_tlb_all(void);
+extern void dump_tlb_regs(void);
+
+#endif /* __ASM_TLB_H */
diff --git a/arch/loongarch/include/asm/tlbflush.h b/arch/loongarch/include/asm/tlbflush.h
new file mode 100644 (file)
index 0000000..a0785e5
--- /dev/null
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+#include <linux/mm.h>
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb_all() flushes all processes TLB entries
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_user(void);
+extern void local_flush_tlb_kernel(void);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void local_flush_tlb_one(unsigned long vaddr);
+
+#ifdef CONFIG_SMP
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long);
+extern void flush_tlb_kernel_range(unsigned long, unsigned long);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_one(unsigned long vaddr);
+
+#else /* CONFIG_SMP */
+
+#define flush_tlb_all()                        local_flush_tlb_all()
+#define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
+#define flush_tlb_range(vma, vmaddr, end)      local_flush_tlb_range(vma, vmaddr, end)
+#define flush_tlb_kernel_range(vmaddr, end)    local_flush_tlb_kernel_range(vmaddr, end)
+#define flush_tlb_page(vma, page)      local_flush_tlb_page(vma, page)
+#define flush_tlb_one(vaddr)           local_flush_tlb_one(vaddr)
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_TLBFLUSH_H */
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
new file mode 100644 (file)
index 0000000..66128de
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#include <linux/smp.h>
+
+#ifdef CONFIG_NUMA
+
+extern cpumask_t cpus_on_node[];
+
+#define cpumask_of_node(node)  (&cpus_on_node[node])
+
+struct pci_bus;
+extern int pcibus_to_node(struct pci_bus *);
+
+#define cpumask_of_pcibus(bus) (cpu_online_mask)
+
+extern unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+void numa_set_distance(int from, int to, int distance);
+
+#define node_distance(from, to)        (node_distances[(from)][(to)])
+
+#else
+#define pcibus_to_node(bus)    0
+#endif
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu)      (cpu_data[cpu].package)
+#define topology_core_id(cpu)                  (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
+#define topology_sibling_cpumask(cpu)          (&cpu_sibling_map[cpu])
+#endif
+
+#include <asm-generic/topology.h>
+
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/loongarch/include/asm/types.h b/arch/loongarch/include/asm/types.h
new file mode 100644 (file)
index 0000000..baf15a0
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
+
+#ifdef __ASSEMBLY__
+#define _ULCAST_
+#define _U64CAST_
+#else
+#define _ULCAST_ (unsigned long)
+#define _U64CAST_ (u64)
+#endif
+
+#endif /* _ASM_TYPES_H */
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
new file mode 100644 (file)
index 0000000..217c6a3
--- /dev/null
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/extable.h>
+#include <asm/pgtable.h>
+#include <asm-generic/extable.h>
+#include <asm-generic/access_ok.h>
+
+extern u64 __ua_limit;
+
+#define __UA_ADDR      ".dword"
+#define __UA_LA                "la.abs"
+#define __UA_LIMIT     __ua_limit
+
+/*
+ * get_user: - Get a simple variable from user space.
+ * @x:  Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+({                                                                     \
+       const __typeof__(*(ptr)) __user *__p = (ptr);                   \
+                                                                       \
+       might_fault();                                                  \
+       access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) :           \
+                                      ((x) = 0, -EFAULT);              \
+})
+
+/*
+ * put_user: - Write a simple value into user space.
+ * @x:  Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({                                                                     \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+                                                                       \
+       might_fault();                                                  \
+       access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT;  \
+})
+
+/*
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x:  Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+({                                                                     \
+       int __gu_err = 0;                                               \
+                                                                       \
+       __chk_user_ptr(ptr);                                            \
+       __get_user_common((x), sizeof(*(ptr)), ptr);                    \
+       __gu_err;                                                       \
+})
+
+/*
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x:  Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+({                                                                     \
+       int __pu_err = 0;                                               \
+       __typeof__(*(ptr)) __pu_val;                                    \
+                                                                       \
+       __pu_val = (x);                                                 \
+       __chk_user_ptr(ptr);                                            \
+       __put_user_common(ptr, sizeof(*(ptr)));                         \
+       __pu_err;                                                       \
+})
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+#define __get_user_common(val, size, ptr)                              \
+do {                                                                   \
+       switch (size) {                                                 \
+       case 1: __get_data_asm(val, "ld.b", ptr); break;                \
+       case 2: __get_data_asm(val, "ld.h", ptr); break;                \
+       case 4: __get_data_asm(val, "ld.w", ptr); break;                \
+       case 8: __get_data_asm(val, "ld.d", ptr); break;                \
+       default: BUILD_BUG(); break;                                    \
+       }                                                               \
+} while (0)
+
+#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
+
+#define __get_data_asm(val, insn, ptr)                                 \
+{                                                                      \
+       long __gu_tmp;                                                  \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     " insn "        %1, %2                          \n"     \
+       "2:                                                     \n"     \
+       "       .section .fixup,\"ax\"                          \n"     \
+       "3:     li.w    %0, %3                                  \n"     \
+       "       or      %1, $r0, $r0                            \n"     \
+       "       b       2b                                      \n"     \
+       "       .previous                                       \n"     \
+       "       .section __ex_table,\"a\"                       \n"     \
+       "       "__UA_ADDR "\t1b, 3b                            \n"     \
+       "       .previous                                       \n"     \
+       : "+r" (__gu_err), "=r" (__gu_tmp)                              \
+       : "m" (__m(ptr)), "i" (-EFAULT));                               \
+                                                                       \
+       (val) = (__typeof__(*(ptr))) __gu_tmp;                          \
+}
+
+#define __put_user_common(ptr, size)                                   \
+do {                                                                   \
+       switch (size) {                                                 \
+       case 1: __put_data_asm("st.b", ptr); break;                     \
+       case 2: __put_data_asm("st.h", ptr); break;                     \
+       case 4: __put_data_asm("st.w", ptr); break;                     \
+       case 8: __put_data_asm("st.d", ptr); break;                     \
+       default: BUILD_BUG(); break;                                    \
+       }                                                               \
+} while (0)
+
+#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
+
+#define __put_data_asm(insn, ptr)                                      \
+{                                                                      \
+       __asm__ __volatile__(                                           \
+       "1:     " insn "        %z2, %1         # __put_user_asm\n"     \
+       "2:                                                     \n"     \
+       "       .section        .fixup,\"ax\"                   \n"     \
+       "3:     li.w    %0, %3                                  \n"     \
+       "       b       2b                                      \n"     \
+       "       .previous                                       \n"     \
+       "       .section        __ex_table,\"a\"                \n"     \
+       "       " __UA_ADDR "   1b, 3b                          \n"     \
+       "       .previous                                       \n"     \
+       : "+r" (__pu_err), "=m" (__m(ptr))                              \
+       : "Jr" (__pu_val), "i" (-EFAULT));                              \
+}
+
+#define __get_kernel_nofault(dst, src, type, err_label)                        \
+do {                                                                   \
+       int __gu_err = 0;                                               \
+                                                                       \
+       __get_kernel_common(*((type *)(dst)), sizeof(type),             \
+                           (__force type *)(src));                     \
+       if (unlikely(__gu_err))                                         \
+               goto err_label;                                         \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label)                        \
+do {                                                                   \
+       type __pu_val;                                                  \
+       int __pu_err = 0;                                               \
+                                                                       \
+       __pu_val = *(__force type *)(src);                              \
+       __put_kernel_common(((type *)(dst)), sizeof(type));             \
+       if (unlikely(__pu_err))                                         \
+               goto err_label;                                         \
+} while (0)
+
+extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
+
+static inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       return __copy_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       return __copy_user(to, from, n);
+}
+
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+/*
+ * __clear_user: - Zero a block of memory in user space, with less checking.
+ * @addr: Destination address, in user space.
+ * @size: Number of bytes to zero.
+ *
+ * Zero a block of memory in user space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+extern unsigned long __clear_user(void __user *addr, __kernel_size_t size);
+
+#define clear_user(addr, n)                                            \
+({                                                                     \
+       void __user *__cl_addr = (addr);                                \
+       unsigned long __cl_size = (n);                                  \
+       if (__cl_size && access_ok(__cl_addr, __cl_size))               \
+               __cl_size = __clear_user(__cl_addr, __cl_size);         \
+       __cl_size;                                                      \
+})
+
+extern long strncpy_from_user(char *to, const char __user *from, long n);
+extern long strnlen_user(const char __user *str, long n);
+
+#endif /* _ASM_UACCESS_H */
diff --git a/arch/loongarch/include/asm/unistd.h b/arch/loongarch/include/asm/unistd.h
new file mode 100644 (file)
index 0000000..cfddb01
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h
new file mode 100644 (file)
index 0000000..8f8a0f9
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#include <linux/mm_types.h>
+#include <vdso/datapage.h>
+
+#include <asm/barrier.h>
+
+/*
+ * struct loongarch_vdso_info - Details of a VDSO image.
+ * @vdso: Pointer to VDSO image (page-aligned).
+ * @size: Size of the VDSO image (page-aligned).
+ * @off_rt_sigreturn: Offset of the rt_sigreturn() trampoline.
+ * @code_mapping: Special mapping structure for vdso code.
+ * @code_mapping: Special mapping structure for vdso data.
+ *
+ * This structure contains details of a VDSO image, including the image data
+ * and offsets of certain symbols required by the kernel. It is generated as
+ * part of the VDSO build process, aside from the mapping page array, which is
+ * populated at runtime.
+ */
+struct loongarch_vdso_info {
+       void *vdso;
+       unsigned long size;
+       unsigned long offset_sigreturn;
+       struct vm_special_mapping code_mapping;
+       struct vm_special_mapping data_mapping;
+};
+
+extern struct loongarch_vdso_info vdso_info;
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/loongarch/include/asm/vdso/clocksource.h b/arch/loongarch/include/asm/vdso/clocksource.h
new file mode 100644 (file)
index 0000000..13cd580
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES   \
+       VDSO_CLOCKMODE_CPU
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
new file mode 100644 (file)
index 0000000..7b2cd37
--- /dev/null
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vdso.h>
+
+#define VDSO_HAS_CLOCK_GETRES          1
+
+static __always_inline long gettimeofday_fallback(
+                               struct __kernel_old_timeval *_tv,
+                               struct timezone *_tz)
+{
+       register struct __kernel_old_timeval *tv asm("a0") = _tv;
+       register struct timezone *tz asm("a1") = _tz;
+       register long nr asm("a7") = __NR_gettimeofday;
+       register long ret asm("a0");
+
+       asm volatile(
+       "       syscall 0\n"
+       : "+r" (ret)
+       : "r" (nr), "r" (tv), "r" (tz)
+       : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+         "$t8", "memory");
+
+       return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+                                       clockid_t _clkid,
+                                       struct __kernel_timespec *_ts)
+{
+       register clockid_t clkid asm("a0") = _clkid;
+       register struct __kernel_timespec *ts asm("a1") = _ts;
+       register long nr asm("a7") = __NR_clock_gettime;
+       register long ret asm("a0");
+
+       asm volatile(
+       "       syscall 0\n"
+       : "+r" (ret)
+       : "r" (nr), "r" (clkid), "r" (ts)
+       : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+         "$t8", "memory");
+
+       return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+                                       clockid_t _clkid,
+                                       struct __kernel_timespec *_ts)
+{
+       register clockid_t clkid asm("a0") = _clkid;
+       register struct __kernel_timespec *ts asm("a1") = _ts;
+       register long nr asm("a7") = __NR_clock_getres;
+       register long ret asm("a0");
+
+       asm volatile(
+       "       syscall 0\n"
+       : "+r" (ret)
+       : "r" (nr), "r" (clkid), "r" (ts)
+       : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+         "$t8", "memory");
+
+       return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+                                                const struct vdso_data *vd)
+{
+       uint64_t count;
+
+       __asm__ __volatile__(
+       "       rdtime.d %0, $zero\n"
+       : "=r" (count));
+
+       return count;
+}
+
+static inline bool loongarch_vdso_hres_capable(void)
+{
+       return true;
+}
+#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+       return get_vdso_data();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/asm/vdso/processor.h b/arch/loongarch/include/asm/vdso/processor.h
new file mode 100644 (file)
index 0000000..ef5770b
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#define cpu_relax()    barrier()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
new file mode 100644 (file)
index 0000000..5a01643
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/page.h>
+
+static inline unsigned long get_vdso_base(void)
+{
+       unsigned long addr;
+
+       __asm__(
+       " la.pcrel %0, _start\n"
+       : "=r" (addr)
+       :
+       :);
+
+       return addr;
+}
+
+static inline const struct vdso_data *get_vdso_data(void)
+{
+       return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
+}
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
new file mode 100644 (file)
index 0000000..5de6153
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__loongarch_get_k_vdso_data(void)
+{
+       return vdso_data;
+}
+#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/loongarch/include/asm/vermagic.h b/arch/loongarch/include/asm/vermagic.h
new file mode 100644 (file)
index 0000000..8b47ccf
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_PROC_FAMILY "LOONGARCH "
+
+#ifdef CONFIG_32BIT
+#define MODULE_KERNEL_TYPE "32BIT "
+#elif defined CONFIG_64BIT
+#define MODULE_KERNEL_TYPE "64BIT "
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+       MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/loongarch/include/asm/vmalloc.h b/arch/loongarch/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..965a0d4
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_LOONGARCH_VMALLOC_H
+#define _ASM_LOONGARCH_VMALLOC_H
+
+#endif /* _ASM_LOONGARCH_VMALLOC_H */
similarity index 59%
rename from drivers/staging/vme/Makefile
rename to arch/loongarch/include/uapi/asm/Kbuild
index cf2f686..4aa680c 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-y                          += devices/
+generic-y += kvm_para.h
diff --git a/arch/loongarch/include/uapi/asm/auxvec.h b/arch/loongarch/include/uapi/asm/auxvec.h
new file mode 100644 (file)
index 0000000..922d9e6
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_AUXVEC_H
+#define __ASM_AUXVEC_H
+
+/* Location of VDSO image. */
+#define AT_SYSINFO_EHDR                33
+
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
+#endif /* __ASM_AUXVEC_H */
diff --git a/arch/loongarch/include/uapi/asm/bitsperlong.h b/arch/loongarch/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..00b4ba1
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_LOONGARCH_BITSPERLONG_H
+#define __ASM_LOONGARCH_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_LOONGARCH_BITSPERLONG_H */
diff --git a/arch/loongarch/include/uapi/asm/break.h b/arch/loongarch/include/uapi/asm/break.h
new file mode 100644 (file)
index 0000000..bb9b82b
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __UAPI_ASM_BREAK_H
+#define __UAPI_ASM_BREAK_H
+
+#define BRK_DEFAULT            0       /* Used as default */
+#define BRK_BUG                        1       /* Used by BUG() */
+#define BRK_KDB                        2       /* Used in KDB_ENTER() */
+#define BRK_MATHEMU            3       /* Used by FPU emulator */
+#define BRK_USERBP             4       /* User bp (used by debuggers) */
+#define BRK_SSTEPBP            5       /* User bp (used by debuggers) */
+#define BRK_OVERFLOW           6       /* Overflow check */
+#define BRK_DIVZERO            7       /* Divide by zero check */
+#define BRK_RANGE              8       /* Range error check */
+#define BRK_MULOVFL            9       /* Multiply overflow */
+#define BRK_KPROBE_BP          10      /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP     11      /* Kprobe single step break */
+#define BRK_UPROBE_BP          12      /* See <asm/uprobes.h> */
+#define BRK_UPROBE_XOLBP       13      /* See <asm/uprobes.h> */
+
+#endif /* __UAPI_ASM_BREAK_H */
diff --git a/arch/loongarch/include/uapi/asm/byteorder.h b/arch/loongarch/include/uapi/asm/byteorder.h
new file mode 100644 (file)
index 0000000..b1722d8
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BYTEORDER_H
+#define _ASM_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h
new file mode 100644 (file)
index 0000000..8840b72
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_LOONGARCH_CPUCFG         (1 << 0)
+#define HWCAP_LOONGARCH_LAM            (1 << 1)
+#define HWCAP_LOONGARCH_UAL            (1 << 2)
+#define HWCAP_LOONGARCH_FPU            (1 << 3)
+#define HWCAP_LOONGARCH_LSX            (1 << 4)
+#define HWCAP_LOONGARCH_LASX           (1 << 5)
+#define HWCAP_LOONGARCH_CRC32          (1 << 6)
+#define HWCAP_LOONGARCH_COMPLEX                (1 << 7)
+#define HWCAP_LOONGARCH_CRYPTO         (1 << 8)
+#define HWCAP_LOONGARCH_LVZ            (1 << 9)
+#define HWCAP_LOONGARCH_LBT_X86                (1 << 10)
+#define HWCAP_LOONGARCH_LBT_ARM                (1 << 11)
+#define HWCAP_LOONGARCH_LBT_MIPS       (1 << 12)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
new file mode 100644 (file)
index 0000000..083193f
--- /dev/null
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_PTRACE_H
+#define _UAPI_ASM_PTRACE_H
+
+#include <linux/types.h>
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
+
+/*
+ * For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
+ * 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
+ */
+#define GPR_BASE       0
+#define GPR_NUM                32
+#define GPR_END                (GPR_BASE + GPR_NUM - 1)
+#define ARG0           (GPR_END + 1)
+#define PC             (GPR_END + 2)
+#define BADVADDR       (GPR_END + 3)
+
+#define NUM_FPU_REGS   32
+
+struct user_pt_regs {
+       /* Main processor registers. */
+       unsigned long regs[32];
+
+       /* Original syscall arg0. */
+       unsigned long orig_a0;
+
+       /* Special CSR registers. */
+       unsigned long csr_era;
+       unsigned long csr_badv;
+       unsigned long reserved[10];
+} __attribute__((aligned(8)));
+
+struct user_fp_state {
+       uint64_t    fpr[32];
+       uint64_t    fcc;
+       uint32_t    fcsr;
+};
+
+#define PTRACE_SYSEMU                  0x1f
+#define PTRACE_SYSEMU_SINGLESTEP       0x20
+
+#endif /* _UAPI_ASM_PTRACE_H */
diff --git a/arch/loongarch/include/uapi/asm/reg.h b/arch/loongarch/include/uapi/asm/reg.h
new file mode 100644 (file)
index 0000000..90ad910
--- /dev/null
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Various register offset definitions for debuggers, core file
+ * examiners and whatnot.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef __UAPI_ASM_LOONGARCH_REG_H
+#define __UAPI_ASM_LOONGARCH_REG_H
+
+#define LOONGARCH_EF_R0                0
+#define LOONGARCH_EF_R1                1
+#define LOONGARCH_EF_R2                2
+#define LOONGARCH_EF_R3                3
+#define LOONGARCH_EF_R4                4
+#define LOONGARCH_EF_R5                5
+#define LOONGARCH_EF_R6                6
+#define LOONGARCH_EF_R7                7
+#define LOONGARCH_EF_R8                8
+#define LOONGARCH_EF_R9                9
+#define LOONGARCH_EF_R10       10
+#define LOONGARCH_EF_R11       11
+#define LOONGARCH_EF_R12       12
+#define LOONGARCH_EF_R13       13
+#define LOONGARCH_EF_R14       14
+#define LOONGARCH_EF_R15       15
+#define LOONGARCH_EF_R16       16
+#define LOONGARCH_EF_R17       17
+#define LOONGARCH_EF_R18       18
+#define LOONGARCH_EF_R19       19
+#define LOONGARCH_EF_R20       20
+#define LOONGARCH_EF_R21       21
+#define LOONGARCH_EF_R22       22
+#define LOONGARCH_EF_R23       23
+#define LOONGARCH_EF_R24       24
+#define LOONGARCH_EF_R25       25
+#define LOONGARCH_EF_R26       26
+#define LOONGARCH_EF_R27       27
+#define LOONGARCH_EF_R28       28
+#define LOONGARCH_EF_R29       29
+#define LOONGARCH_EF_R30       30
+#define LOONGARCH_EF_R31       31
+
+/*
+ * Saved special registers
+ */
+#define LOONGARCH_EF_ORIG_A0   32
+#define LOONGARCH_EF_CSR_ERA   33
+#define LOONGARCH_EF_CSR_BADV  34
+#define LOONGARCH_EF_CSR_CRMD  35
+#define LOONGARCH_EF_CSR_PRMD  36
+#define LOONGARCH_EF_CSR_EUEN  37
+#define LOONGARCH_EF_CSR_ECFG  38
+#define LOONGARCH_EF_CSR_ESTAT 39
+
+#define LOONGARCH_EF_SIZE      320     /* size in bytes */
+
+#endif /* __UAPI_ASM_LOONGARCH_REG_H */
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
new file mode 100644 (file)
index 0000000..52e49b8
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_SIGCONTEXT_H
+#define _UAPI_ASM_SIGCONTEXT_H
+
+#include <linux/types.h>
+#include <linux/posix_types.h>
+
+/* FP context was used */
+#define SC_USED_FP             (1 << 0)
+/* Address error was due to memory load */
+#define SC_ADDRERR_RD          (1 << 30)
+/* Address error was due to memory store */
+#define SC_ADDRERR_WR          (1 << 31)
+
+struct sigcontext {
+       __u64   sc_pc;
+       __u64   sc_regs[32];
+       __u32   sc_flags;
+       __u64   sc_extcontext[0] __attribute__((__aligned__(16)));
+};
+
+#define CONTEXT_INFO_ALIGN     16
+struct sctx_info {
+       __u32   magic;
+       __u32   size;
+       __u64   padding;        /* padding to 16 bytes */
+};
+
+/* FPU context */
+#define FPU_CTX_MAGIC          0x46505501
+#define FPU_CTX_ALIGN          8
+struct fpu_context {
+       __u64   regs[32];
+       __u64   fcc;
+       __u32   fcsr;
+};
+
+#endif /* _UAPI_ASM_SIGCONTEXT_H */
diff --git a/arch/loongarch/include/uapi/asm/signal.h b/arch/loongarch/include/uapi/asm/signal.h
new file mode 100644 (file)
index 0000000..992d965
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _UAPI_ASM_SIGNAL_H
+#define _UAPI_ASM_SIGNAL_H
+
+#define MINSIGSTKSZ 4096
+#define SIGSTKSZ    16384
+
+#include <asm-generic/signal.h>
+
+#endif
diff --git a/arch/loongarch/include/uapi/asm/ucontext.h b/arch/loongarch/include/uapi/asm/ucontext.h
new file mode 100644 (file)
index 0000000..12577e2
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LOONGARCH_UAPI_ASM_UCONTEXT_H
+#define __LOONGARCH_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext:       holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext:     holds extended processor state
+ */
+struct ucontext {
+       unsigned long           uc_flags;
+       struct ucontext         *uc_link;
+       stack_t                 uc_stack;
+       sigset_t                uc_sigmask;
+       /* There's some padding here to allow sigset_t to be expanded in the
+        * future.  Though this is unlikely, other architectures put uc_sigmask
+        * at the end of this structure and explicitly state it can be
+        * expanded, so we didn't want to box ourselves in here. */
+       __u8              __unused[1024 / 8 - sizeof(sigset_t)];
+       /* We can't put uc_sigmask at the end of this structure because we need
+        * to be able to expand sigcontext in the future.  For example, the
+        * vector ISA extension will almost certainly add ISA state.  We want
+        * to ensure all user-visible ISA state can be saved and restored via a
+        * ucontext, so we're putting this at the end in order to allow for
+        * infinite extensibility.  Since we know this will be extended and we
+        * assume sigset_t won't be extended an extreme amount, we're
+        * prioritizing this. */
+       struct sigcontext       uc_mcontext;
+};
+
+#endif /* __LOONGARCH_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..fcb6689
--- /dev/null
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+#include <asm-generic/unistd.h>
diff --git a/arch/loongarch/kernel/.gitignore b/arch/loongarch/kernel/.gitignore
new file mode 100644 (file)
index 0000000..bbb90f9
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
new file mode 100644 (file)
index 0000000..940de91
--- /dev/null
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/LoongArch kernel.
+#
+
+extra-y                := head.o vmlinux.lds
+
+obj-y          += cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
+                  traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+                  elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o
+
+obj-$(CONFIG_ACPI)             += acpi.o
+obj-$(CONFIG_EFI)              += efi.o
+
+obj-$(CONFIG_CPU_HAS_FPU)      += fpu.o
+
+obj-$(CONFIG_MODULES)          += module.o module-sections.o
+
+obj-$(CONFIG_PROC_FS)          += proc.o
+
+obj-$(CONFIG_SMP)              += smp.o
+
+obj-$(CONFIG_NUMA)             += numa.o
+
+CPPFLAGS_vmlinux.lds           := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/access-helper.h b/arch/loongarch/kernel/access-helper.h
new file mode 100644 (file)
index 0000000..4a35ca8
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/uaccess.h>
+
+static inline int __get_inst(u32 *i, u32 *p, bool user)
+{
+       return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
+}
+
+static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
+{
+       return user ? get_user(*a, (unsigned long __user *)p) : get_kernel_nofault(*a, p);
+}
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
new file mode 100644 (file)
index 0000000..b16c3de
--- /dev/null
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/memblock.h>
+#include <linux/serial_core.h>
+#include <asm/io.h>
+#include <asm/numa.h>
+#include <asm/loongson.h>
+
+int acpi_disabled;
+EXPORT_SYMBOL(acpi_disabled);
+int acpi_noirq;
+int acpi_pci_disabled;
+EXPORT_SYMBOL(acpi_pci_disabled);
+int acpi_strict = 1; /* We have no workarounds on LoongArch */
+int num_processors;
+int disabled_cpus;
+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
+
+u64 acpi_saved_sp;
+
+#define MAX_CORE_PIC 256
+
+#define PREFIX                 "ACPI: "
+
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
+{
+       if (irqp != NULL)
+               *irqp = acpi_register_gsi(NULL, gsi, -1, -1);
+       return (*irqp >= 0) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi)
+{
+       if (gsi)
+               *gsi = isa_irq;
+       return 0;
+}
+
+/*
+ * success: return IRQ number (>=0)
+ * failure: return < 0
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
+{
+       struct irq_fwspec fwspec;
+
+       switch (gsi) {
+       case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
+               fwspec.fwnode = liointc_domain->fwnode;
+               fwspec.param[0] = gsi - GSI_MIN_CPU_IRQ;
+               fwspec.param_count = 1;
+
+               return irq_create_fwspec_mapping(&fwspec);
+
+       case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
+               if (!pch_lpc_domain)
+                       return -EINVAL;
+
+               fwspec.fwnode = pch_lpc_domain->fwnode;
+               fwspec.param[0] = gsi - GSI_MIN_LPC_IRQ;
+               fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
+               fwspec.param_count = 2;
+
+               return irq_create_fwspec_mapping(&fwspec);
+
+       case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
+               if (!pch_pic_domain[0])
+                       return -EINVAL;
+
+               fwspec.fwnode = pch_pic_domain[0]->fwnode;
+               fwspec.param[0] = gsi - GSI_MIN_PCH_IRQ;
+               fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+               fwspec.param_count = 2;
+
+               return irq_create_fwspec_mapping(&fwspec);
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
+
+void acpi_unregister_gsi(u32 gsi)
+{
+
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+
+void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
+{
+
+       if (!phys || !size)
+               return NULL;
+
+       return early_memremap(phys, size);
+}
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
+{
+       if (!map || !size)
+               return;
+
+       early_memunmap(map, size);
+}
+
+void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+{
+       if (!memblock_is_memory(phys))
+               return ioremap(phys, size);
+       else
+               return ioremap_cache(phys, size);
+}
+
+void __init acpi_boot_table_init(void)
+{
+       /*
+        * If acpi_disabled, bail out
+        */
+       if (acpi_disabled)
+               return;
+
+       /*
+        * Initialize the ACPI boot-time table parser.
+        */
+       if (acpi_table_init()) {
+               disable_acpi();
+               return;
+       }
+}
+
+static int set_processor_mask(u32 id, u32 flags)
+{
+
+       int cpu, cpuid = id;
+
+       if (num_processors >= nr_cpu_ids) {
+               pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
+                       " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
+
+               return -ENODEV;
+
+       }
+       if (cpuid == loongson_sysconf.boot_cpu_id)
+               cpu = 0;
+       else
+               cpu = cpumask_next_zero(-1, cpu_present_mask);
+
+       if (flags & ACPI_MADT_ENABLED) {
+               num_processors++;
+               set_cpu_possible(cpu, true);
+               set_cpu_present(cpu, true);
+               __cpu_number_map[cpuid] = cpu;
+               __cpu_logical_map[cpu] = cpuid;
+       } else
+               disabled_cpus++;
+
+       return cpu;
+}
+
+static void __init acpi_process_madt(void)
+{
+       int i;
+
+       for (i = 0; i < NR_CPUS; i++) {
+               __cpu_number_map[i] = -1;
+               __cpu_logical_map[i] = -1;
+       }
+
+       loongson_sysconf.nr_cpus = num_processors;
+}
+
+int __init acpi_boot_init(void)
+{
+       /*
+        * If acpi_disabled, bail out
+        */
+       if (acpi_disabled)
+               return -1;
+
+       loongson_sysconf.boot_cpu_id = read_csr_cpuid();
+
+       /*
+        * Process the Multiple APIC Description Table (MADT), if present
+        */
+       acpi_process_madt();
+
+       /* Do not enable ACPI SPCR console by default */
+       acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+
+       return 0;
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+static __init int setup_node(int pxm)
+{
+       return acpi_map_pxm_to_node(pxm);
+}
+
+/*
+ * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them.  I/O localities are
+ * not supported at this point.
+ */
+unsigned int numa_distance_cnt;
+
+static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
+{
+       return slit->locality_count;
+}
+
+void __init numa_set_distance(int from, int to, int distance)
+{
+       if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
+               pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+                               from, to, distance);
+               return;
+       }
+
+       node_distances[from][to] = distance;
+}
+
+/* Callback for Proximity Domain -> CPUID mapping */
+void __init
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+{
+       int pxm, node;
+
+       if (srat_disabled())
+               return;
+       if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
+               bad_srat();
+               return;
+       }
+       if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+               return;
+       pxm = pa->proximity_domain_lo;
+       if (acpi_srat_revision >= 2) {
+               pxm |= (pa->proximity_domain_hi[0] << 8);
+               pxm |= (pa->proximity_domain_hi[1] << 16);
+               pxm |= (pa->proximity_domain_hi[2] << 24);
+       }
+       node = setup_node(pxm);
+       if (node < 0) {
+               pr_err("SRAT: Too many proximity domains %x\n", pxm);
+               bad_srat();
+               return;
+       }
+
+       if (pa->apic_id >= CONFIG_NR_CPUS) {
+               pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
+                               pxm, pa->apic_id, node);
+               return;
+       }
+
+       early_numa_add_cpu(pa->apic_id, node);
+
+       set_cpuid_to_node(pa->apic_id, node);
+       node_set(node, numa_nodes_parsed);
+       pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
+}
+
+void __init acpi_numa_arch_fixup(void) {}
+#endif
+
+void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+       memblock_reserve(addr, size);
+}
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+
+#include <acpi/processor.h>
+
+static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+{
+#ifdef CONFIG_ACPI_NUMA
+       int nid;
+
+       nid = acpi_get_node(handle);
+       if (nid != NUMA_NO_NODE) {
+               set_cpuid_to_node(physid, nid);
+               node_set(nid, numa_nodes_parsed);
+               set_cpu_numa_node(cpu, nid);
+               cpumask_set_cpu(cpu, cpumask_of_node(nid));
+       }
+#endif
+       return 0;
+}
+
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
+{
+       int cpu;
+
+       cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
+       if (cpu < 0) {
+               pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+               return cpu;
+       }
+
+       acpi_map_cpu2node(handle, cpu, physid);
+
+       *pcpu = cpu;
+
+       return 0;
+}
+EXPORT_SYMBOL(acpi_map_cpu);
+
+int acpi_unmap_cpu(int cpu)
+{
+#ifdef CONFIG_ACPI_NUMA
+       set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
+#endif
+       set_cpu_present(cpu, false);
+       num_processors--;
+
+       pr_info("cpu%d hot remove!\n", cpu);
+
+       return 0;
+}
+EXPORT_SYMBOL(acpi_unmap_cpu);
+
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
new file mode 100644 (file)
index 0000000..bfb65eb
--- /dev/null
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/cpu-info.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+void output_ptreg_defines(void)
+{
+       COMMENT("LoongArch pt_regs offsets.");
+       OFFSET(PT_R0, pt_regs, regs[0]);
+       OFFSET(PT_R1, pt_regs, regs[1]);
+       OFFSET(PT_R2, pt_regs, regs[2]);
+       OFFSET(PT_R3, pt_regs, regs[3]);
+       OFFSET(PT_R4, pt_regs, regs[4]);
+       OFFSET(PT_R5, pt_regs, regs[5]);
+       OFFSET(PT_R6, pt_regs, regs[6]);
+       OFFSET(PT_R7, pt_regs, regs[7]);
+       OFFSET(PT_R8, pt_regs, regs[8]);
+       OFFSET(PT_R9, pt_regs, regs[9]);
+       OFFSET(PT_R10, pt_regs, regs[10]);
+       OFFSET(PT_R11, pt_regs, regs[11]);
+       OFFSET(PT_R12, pt_regs, regs[12]);
+       OFFSET(PT_R13, pt_regs, regs[13]);
+       OFFSET(PT_R14, pt_regs, regs[14]);
+       OFFSET(PT_R15, pt_regs, regs[15]);
+       OFFSET(PT_R16, pt_regs, regs[16]);
+       OFFSET(PT_R17, pt_regs, regs[17]);
+       OFFSET(PT_R18, pt_regs, regs[18]);
+       OFFSET(PT_R19, pt_regs, regs[19]);
+       OFFSET(PT_R20, pt_regs, regs[20]);
+       OFFSET(PT_R21, pt_regs, regs[21]);
+       OFFSET(PT_R22, pt_regs, regs[22]);
+       OFFSET(PT_R23, pt_regs, regs[23]);
+       OFFSET(PT_R24, pt_regs, regs[24]);
+       OFFSET(PT_R25, pt_regs, regs[25]);
+       OFFSET(PT_R26, pt_regs, regs[26]);
+       OFFSET(PT_R27, pt_regs, regs[27]);
+       OFFSET(PT_R28, pt_regs, regs[28]);
+       OFFSET(PT_R29, pt_regs, regs[29]);
+       OFFSET(PT_R30, pt_regs, regs[30]);
+       OFFSET(PT_R31, pt_regs, regs[31]);
+       OFFSET(PT_CRMD, pt_regs, csr_crmd);
+       OFFSET(PT_PRMD, pt_regs, csr_prmd);
+       OFFSET(PT_EUEN, pt_regs, csr_euen);
+       OFFSET(PT_ECFG, pt_regs, csr_ecfg);
+       OFFSET(PT_ESTAT, pt_regs, csr_estat);
+       OFFSET(PT_ERA, pt_regs, csr_era);
+       OFFSET(PT_BVADDR, pt_regs, csr_badvaddr);
+       OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
+       DEFINE(PT_SIZE, sizeof(struct pt_regs));
+       BLANK();
+}
+
+void output_task_defines(void)
+{
+       COMMENT("LoongArch task_struct offsets.");
+       OFFSET(TASK_STATE, task_struct, __state);
+       OFFSET(TASK_THREAD_INFO, task_struct, stack);
+       OFFSET(TASK_FLAGS, task_struct, flags);
+       OFFSET(TASK_MM, task_struct, mm);
+       OFFSET(TASK_PID, task_struct, pid);
+       DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+       BLANK();
+}
+
+void output_thread_info_defines(void)
+{
+       COMMENT("LoongArch thread_info offsets.");
+       OFFSET(TI_TASK, thread_info, task);
+       OFFSET(TI_FLAGS, thread_info, flags);
+       OFFSET(TI_TP_VALUE, thread_info, tp_value);
+       OFFSET(TI_CPU, thread_info, cpu);
+       OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+       OFFSET(TI_REGS, thread_info, regs);
+       DEFINE(_THREAD_SIZE, THREAD_SIZE);
+       DEFINE(_THREAD_MASK, THREAD_MASK);
+       DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+       DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
+       BLANK();
+}
+
+void output_thread_defines(void)
+{
+       COMMENT("LoongArch specific thread_struct offsets.");
+       OFFSET(THREAD_REG01, task_struct, thread.reg01);
+       OFFSET(THREAD_REG03, task_struct, thread.reg03);
+       OFFSET(THREAD_REG22, task_struct, thread.reg22);
+       OFFSET(THREAD_REG23, task_struct, thread.reg23);
+       OFFSET(THREAD_REG24, task_struct, thread.reg24);
+       OFFSET(THREAD_REG25, task_struct, thread.reg25);
+       OFFSET(THREAD_REG26, task_struct, thread.reg26);
+       OFFSET(THREAD_REG27, task_struct, thread.reg27);
+       OFFSET(THREAD_REG28, task_struct, thread.reg28);
+       OFFSET(THREAD_REG29, task_struct, thread.reg29);
+       OFFSET(THREAD_REG30, task_struct, thread.reg30);
+       OFFSET(THREAD_REG31, task_struct, thread.reg31);
+       OFFSET(THREAD_CSRCRMD, task_struct,
+              thread.csr_crmd);
+       OFFSET(THREAD_CSRPRMD, task_struct,
+              thread.csr_prmd);
+       OFFSET(THREAD_CSREUEN, task_struct,
+              thread.csr_euen);
+       OFFSET(THREAD_CSRECFG, task_struct,
+              thread.csr_ecfg);
+
+       OFFSET(THREAD_SCR0, task_struct, thread.scr0);
+       OFFSET(THREAD_SCR1, task_struct, thread.scr1);
+       OFFSET(THREAD_SCR2, task_struct, thread.scr2);
+       OFFSET(THREAD_SCR3, task_struct, thread.scr3);
+
+       OFFSET(THREAD_EFLAGS, task_struct, thread.eflags);
+
+       OFFSET(THREAD_FPU, task_struct, thread.fpu);
+
+       OFFSET(THREAD_BVADDR, task_struct, \
+              thread.csr_badvaddr);
+       OFFSET(THREAD_ECODE, task_struct, \
+              thread.error_code);
+       OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
+       BLANK();
+}
+
+void output_thread_fpu_defines(void)
+{
+       OFFSET(THREAD_FPR0, loongarch_fpu, fpr[0]);
+       OFFSET(THREAD_FPR1, loongarch_fpu, fpr[1]);
+       OFFSET(THREAD_FPR2, loongarch_fpu, fpr[2]);
+       OFFSET(THREAD_FPR3, loongarch_fpu, fpr[3]);
+       OFFSET(THREAD_FPR4, loongarch_fpu, fpr[4]);
+       OFFSET(THREAD_FPR5, loongarch_fpu, fpr[5]);
+       OFFSET(THREAD_FPR6, loongarch_fpu, fpr[6]);
+       OFFSET(THREAD_FPR7, loongarch_fpu, fpr[7]);
+       OFFSET(THREAD_FPR8, loongarch_fpu, fpr[8]);
+       OFFSET(THREAD_FPR9, loongarch_fpu, fpr[9]);
+       OFFSET(THREAD_FPR10, loongarch_fpu, fpr[10]);
+       OFFSET(THREAD_FPR11, loongarch_fpu, fpr[11]);
+       OFFSET(THREAD_FPR12, loongarch_fpu, fpr[12]);
+       OFFSET(THREAD_FPR13, loongarch_fpu, fpr[13]);
+       OFFSET(THREAD_FPR14, loongarch_fpu, fpr[14]);
+       OFFSET(THREAD_FPR15, loongarch_fpu, fpr[15]);
+       OFFSET(THREAD_FPR16, loongarch_fpu, fpr[16]);
+       OFFSET(THREAD_FPR17, loongarch_fpu, fpr[17]);
+       OFFSET(THREAD_FPR18, loongarch_fpu, fpr[18]);
+       OFFSET(THREAD_FPR19, loongarch_fpu, fpr[19]);
+       OFFSET(THREAD_FPR20, loongarch_fpu, fpr[20]);
+       OFFSET(THREAD_FPR21, loongarch_fpu, fpr[21]);
+       OFFSET(THREAD_FPR22, loongarch_fpu, fpr[22]);
+       OFFSET(THREAD_FPR23, loongarch_fpu, fpr[23]);
+       OFFSET(THREAD_FPR24, loongarch_fpu, fpr[24]);
+       OFFSET(THREAD_FPR25, loongarch_fpu, fpr[25]);
+       OFFSET(THREAD_FPR26, loongarch_fpu, fpr[26]);
+       OFFSET(THREAD_FPR27, loongarch_fpu, fpr[27]);
+       OFFSET(THREAD_FPR28, loongarch_fpu, fpr[28]);
+       OFFSET(THREAD_FPR29, loongarch_fpu, fpr[29]);
+       OFFSET(THREAD_FPR30, loongarch_fpu, fpr[30]);
+       OFFSET(THREAD_FPR31, loongarch_fpu, fpr[31]);
+
+       OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
+       OFFSET(THREAD_FCC,  loongarch_fpu, fcc);
+       OFFSET(THREAD_VCSR, loongarch_fpu, vcsr);
+       BLANK();
+}
+
+void output_mm_defines(void)
+{
+       COMMENT("Size of struct page");
+       DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+       BLANK();
+       COMMENT("Linux mm_struct offsets.");
+       OFFSET(MM_USERS, mm_struct, mm_users);
+       OFFSET(MM_PGD, mm_struct, pgd);
+       OFFSET(MM_CONTEXT, mm_struct, context);
+       BLANK();
+       DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+       DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+       DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+       BLANK();
+       DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+       DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+       DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+       BLANK();
+       DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
+       DEFINE(_PMD_ORDER, PMD_ORDER);
+#endif
+       DEFINE(_PTE_ORDER, PTE_ORDER);
+       BLANK();
+       DEFINE(_PMD_SHIFT, PMD_SHIFT);
+       DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+       BLANK();
+       DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+       DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+       DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+       BLANK();
+       DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+       DEFINE(_PAGE_SIZE, PAGE_SIZE);
+       BLANK();
+}
+
+void output_sc_defines(void)
+{
+       COMMENT("Linux sigcontext offsets.");
+       OFFSET(SC_REGS, sigcontext, sc_regs);
+       OFFSET(SC_PC, sigcontext, sc_pc);
+       BLANK();
+}
+
+void output_signal_defines(void)
+{
+       COMMENT("Linux signal numbers.");
+       DEFINE(_SIGHUP, SIGHUP);
+       DEFINE(_SIGINT, SIGINT);
+       DEFINE(_SIGQUIT, SIGQUIT);
+       DEFINE(_SIGILL, SIGILL);
+       DEFINE(_SIGTRAP, SIGTRAP);
+       DEFINE(_SIGIOT, SIGIOT);
+       DEFINE(_SIGABRT, SIGABRT);
+       DEFINE(_SIGFPE, SIGFPE);
+       DEFINE(_SIGKILL, SIGKILL);
+       DEFINE(_SIGBUS, SIGBUS);
+       DEFINE(_SIGSEGV, SIGSEGV);
+       DEFINE(_SIGSYS, SIGSYS);
+       DEFINE(_SIGPIPE, SIGPIPE);
+       DEFINE(_SIGALRM, SIGALRM);
+       DEFINE(_SIGTERM, SIGTERM);
+       DEFINE(_SIGUSR1, SIGUSR1);
+       DEFINE(_SIGUSR2, SIGUSR2);
+       DEFINE(_SIGCHLD, SIGCHLD);
+       DEFINE(_SIGPWR, SIGPWR);
+       DEFINE(_SIGWINCH, SIGWINCH);
+       DEFINE(_SIGURG, SIGURG);
+       DEFINE(_SIGIO, SIGIO);
+       DEFINE(_SIGSTOP, SIGSTOP);
+       DEFINE(_SIGTSTP, SIGTSTP);
+       DEFINE(_SIGCONT, SIGCONT);
+       DEFINE(_SIGTTIN, SIGTTIN);
+       DEFINE(_SIGTTOU, SIGTTOU);
+       DEFINE(_SIGVTALRM, SIGVTALRM);
+       DEFINE(_SIGPROF, SIGPROF);
+       DEFINE(_SIGXCPU, SIGXCPU);
+       DEFINE(_SIGXFSZ, SIGXFSZ);
+       BLANK();
+}
+
+#ifdef CONFIG_SMP
+void output_smpboot_defines(void)
+{
+       COMMENT("Linux smp cpu boot offsets.");
+       OFFSET(CPU_BOOT_STACK, secondary_data, stack);
+       OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info);
+       BLANK();
+}
+#endif
diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c
new file mode 100644 (file)
index 0000000..8c9fe29
--- /dev/null
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LoongArch cacheinfo support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cacheinfo.h>
+
+/* Populates leaf and increments to next leaf */
+#define populate_cache(cache, leaf, c_level, c_type)           \
+do {                                                           \
+       leaf->type = c_type;                                    \
+       leaf->level = c_level;                                  \
+       leaf->coherency_line_size = c->cache.linesz;            \
+       leaf->number_of_sets = c->cache.sets;                   \
+       leaf->ways_of_associativity = c->cache.ways;            \
+       leaf->size = c->cache.linesz * c->cache.sets *          \
+               c->cache.ways;                                  \
+       leaf++;                                                 \
+} while (0)
+
+int init_cache_level(unsigned int cpu)
+{
+       struct cpuinfo_loongarch *c = &current_cpu_data;
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       int levels = 0, leaves = 0;
+
+       /*
+        * If Dcache is not set, we assume the cache structures
+        * are not properly initialized.
+        */
+       if (c->dcache.waysize)
+               levels += 1;
+       else
+               return -ENOENT;
+
+
+       leaves += (c->icache.waysize) ? 2 : 1;
+
+       if (c->vcache.waysize) {
+               levels++;
+               leaves++;
+       }
+
+       if (c->scache.waysize) {
+               levels++;
+               leaves++;
+       }
+
+       if (c->tcache.waysize) {
+               levels++;
+               leaves++;
+       }
+
+       this_cpu_ci->num_levels = levels;
+       this_cpu_ci->num_leaves = leaves;
+       return 0;
+}
+
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+                                          struct cacheinfo *sib_leaf)
+{
+       return !((this_leaf->level == 1) || (this_leaf->level == 2));
+}
+
+static void cache_cpumap_setup(unsigned int cpu)
+{
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       struct cacheinfo *this_leaf, *sib_leaf;
+       unsigned int index;
+
+       for (index = 0; index < this_cpu_ci->num_leaves; index++) {
+               unsigned int i;
+
+               this_leaf = this_cpu_ci->info_list + index;
+               /* skip if shared_cpu_map is already populated */
+               if (!cpumask_empty(&this_leaf->shared_cpu_map))
+                       continue;
+
+               cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+               for_each_online_cpu(i) {
+                       struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+                       if (i == cpu || !sib_cpu_ci->info_list)
+                               continue;/* skip if itself or no cacheinfo */
+                       sib_leaf = sib_cpu_ci->info_list + index;
+                       if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+                               cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+                               cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+                       }
+               }
+       }
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+       int level = 1;
+       struct cpuinfo_loongarch *c = &current_cpu_data;
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+       if (c->icache.waysize) {
+               populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
+               populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
+       } else {
+               populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+       }
+
+       if (c->vcache.waysize)
+               populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+
+       if (c->scache.waysize)
+               populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+
+       if (c->tcache.waysize)
+               populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
+
+       cache_cpumap_setup(cpu);
+       this_cpu_ci->cpu_map_populated = true;
+
+       return 0;
+}
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
new file mode 100644 (file)
index 0000000..6c87ea3
--- /dev/null
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpu-features.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable-bits.h>
+#include <asm/setup.h>
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+/*
+ * Determine the FCSR mask for FPU hardware.
+ */
+static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c)
+{
+       unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+
+       fcsr = c->fpu_csr0;
+       mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
+
+       sr = read_csr_euen();
+       enable_fpu();
+
+       fcsr0 = fcsr & mask;
+       write_fcsr(LOONGARCH_FCSR0, fcsr0);
+       fcsr0 = read_fcsr(LOONGARCH_FCSR0);
+
+       fcsr1 = fcsr | ~mask;
+       write_fcsr(LOONGARCH_FCSR0, fcsr1);
+       fcsr1 = read_fcsr(LOONGARCH_FCSR0);
+
+       write_fcsr(LOONGARCH_FCSR0, fcsr);
+
+       write_csr_euen(sr);
+
+       c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask;
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+       if (cpu == 0)
+               __elf_platform = plat;
+}
+
+/* MAP BASE */
+unsigned long vm_map_base;
+EXPORT_SYMBOL_GPL(vm_map_base);
+
+static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
+{
+#ifdef __NEED_ADDRBITS_PROBE
+       c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
+       c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
+       vm_map_base = 0UL - (1UL << c->vabits);
+#endif
+}
+
+static void set_isa(struct cpuinfo_loongarch *c, unsigned int isa)
+{
+       switch (isa) {
+       case LOONGARCH_CPU_ISA_LA64:
+               c->isa_level |= LOONGARCH_CPU_ISA_LA64;
+               fallthrough;
+       case LOONGARCH_CPU_ISA_LA32S:
+               c->isa_level |= LOONGARCH_CPU_ISA_LA32S;
+               fallthrough;
+       case LOONGARCH_CPU_ISA_LA32R:
+               c->isa_level |= LOONGARCH_CPU_ISA_LA32R;
+               break;
+       }
+}
+
+static void cpu_probe_common(struct cpuinfo_loongarch *c)
+{
+       unsigned int config;
+       unsigned long asid_mask;
+
+       c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+                    LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+
+       elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+
+       config = read_cpucfg(LOONGARCH_CPUCFG1);
+       if (config & CPUCFG1_UAL) {
+               c->options |= LOONGARCH_CPU_UAL;
+               elf_hwcap |= HWCAP_LOONGARCH_UAL;
+       }
+
+       config = read_cpucfg(LOONGARCH_CPUCFG2);
+       if (config & CPUCFG2_LAM) {
+               c->options |= LOONGARCH_CPU_LAM;
+               elf_hwcap |= HWCAP_LOONGARCH_LAM;
+       }
+       if (config & CPUCFG2_FP) {
+               c->options |= LOONGARCH_CPU_FPU;
+               elf_hwcap |= HWCAP_LOONGARCH_FPU;
+       }
+       if (config & CPUCFG2_COMPLEX) {
+               c->options |= LOONGARCH_CPU_COMPLEX;
+               elf_hwcap |= HWCAP_LOONGARCH_COMPLEX;
+       }
+       if (config & CPUCFG2_CRYPTO) {
+               c->options |= LOONGARCH_CPU_CRYPTO;
+               elf_hwcap |= HWCAP_LOONGARCH_CRYPTO;
+       }
+       if (config & CPUCFG2_LVZP) {
+               c->options |= LOONGARCH_CPU_LVZ;
+               elf_hwcap |= HWCAP_LOONGARCH_LVZ;
+       }
+
+       config = read_cpucfg(LOONGARCH_CPUCFG6);
+       if (config & CPUCFG6_PMP)
+               c->options |= LOONGARCH_CPU_PMP;
+
+       config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+       if (config & IOCSRF_CSRIPI)
+               c->options |= LOONGARCH_CPU_CSRIPI;
+       if (config & IOCSRF_EXTIOI)
+               c->options |= LOONGARCH_CPU_EXTIOI;
+       if (config & IOCSRF_FREQSCALE)
+               c->options |= LOONGARCH_CPU_SCALEFREQ;
+       if (config & IOCSRF_FLATMODE)
+               c->options |= LOONGARCH_CPU_FLATMODE;
+       if (config & IOCSRF_EIODECODE)
+               c->options |= LOONGARCH_CPU_EIODECODE;
+       if (config & IOCSRF_VM)
+               c->options |= LOONGARCH_CPU_HYPERVISOR;
+
+       config = csr_read32(LOONGARCH_CSR_ASID);
+       config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
+       asid_mask = GENMASK(config - 1, 0);
+       set_cpu_asid_mask(c, asid_mask);
+
+       config = read_csr_prcfg1();
+       c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
+       c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
+
+       config = read_csr_prcfg3();
+       switch (config & CSR_CONF3_TLBTYPE) {
+       case 0:
+               c->tlbsizemtlb = 0;
+               c->tlbsizestlbsets = 0;
+               c->tlbsizestlbways = 0;
+               c->tlbsize = 0;
+               break;
+       case 1:
+               c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+               c->tlbsizestlbsets = 0;
+               c->tlbsizestlbways = 0;
+               c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+               break;
+       case 2:
+               c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+               c->tlbsizestlbsets = 1 << ((config & CSR_CONF3_STLBIDX) >> CSR_CONF3_STLBIDX_SHIFT);
+               c->tlbsizestlbways = ((config & CSR_CONF3_STLBWAYS) >> CSR_CONF3_STLBWAYS_SHIFT) + 1;
+               c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+               break;
+       default:
+               pr_warn("Warning: unknown TLB type\n");
+       }
+}
+
+#define MAX_NAME_LEN   32
+#define VENDOR_OFFSET  0
+#define CPUNAME_OFFSET 9
+
+static char cpu_full_name[MAX_NAME_LEN] = "        -        ";
+
+static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
+{
+       uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
+       uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+
+       __cpu_full_name[cpu] = cpu_full_name;
+       *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+       *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+       switch (c->processor_id & PRID_SERIES_MASK) {
+       case PRID_SERIES_LA132:
+               c->cputype = CPU_LOONGSON32;
+               set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+               __cpu_family[cpu] = "Loongson-32bit";
+               pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
+               break;
+       case PRID_SERIES_LA264:
+               c->cputype = CPU_LOONGSON64;
+               set_isa(c, LOONGARCH_CPU_ISA_LA64);
+               __cpu_family[cpu] = "Loongson-64bit";
+               pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
+               break;
+       case PRID_SERIES_LA364:
+               c->cputype = CPU_LOONGSON64;
+               set_isa(c, LOONGARCH_CPU_ISA_LA64);
+               __cpu_family[cpu] = "Loongson-64bit";
+               pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
+               break;
+       case PRID_SERIES_LA464:
+               c->cputype = CPU_LOONGSON64;
+               set_isa(c, LOONGARCH_CPU_ISA_LA64);
+               __cpu_family[cpu] = "Loongson-64bit";
+               pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
+               break;
+       case PRID_SERIES_LA664:
+               c->cputype = CPU_LOONGSON64;
+               set_isa(c, LOONGARCH_CPU_ISA_LA64);
+               __cpu_family[cpu] = "Loongson-64bit";
+               pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
+               break;
+       default: /* Default to 64 bit */
+               c->cputype = CPU_LOONGSON64;
+               set_isa(c, LOONGARCH_CPU_ISA_LA64);
+               __cpu_family[cpu] = "Loongson-64bit";
+               pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
+       }
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_family[NR_CPUS];
+const char *__cpu_full_name[NR_CPUS];
+const char *__elf_platform;
+
+static void cpu_report(void)
+{
+       struct cpuinfo_loongarch *c = &current_cpu_data;
+
+       pr_info("CPU%d revision is: %08x (%s)\n",
+               smp_processor_id(), c->processor_id, cpu_family_string());
+       if (c->options & LOONGARCH_CPU_FPU)
+               pr_info("FPU%d revision is: %08x\n", smp_processor_id(), c->fpu_vers);
+}
+
+void cpu_probe(void)
+{
+       unsigned int cpu = smp_processor_id();
+       struct cpuinfo_loongarch *c = &current_cpu_data;
+
+       /*
+        * Set a default ELF platform, cpu probe may later
+        * overwrite it with a more precise value
+        */
+       set_elf_platform(cpu, "loongarch");
+
+       c->cputype      = CPU_UNKNOWN;
+       c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0);
+       c->fpu_vers     = (read_cpucfg(LOONGARCH_CPUCFG2) >> 3) & 0x3;
+
+       c->fpu_csr0     = FPU_CSR_RN;
+       c->fpu_mask     = FPU_CSR_RSVD;
+
+       cpu_probe_common(c);
+
+       per_cpu_trap_init(cpu);
+
+       switch (c->processor_id & PRID_COMP_MASK) {
+       case PRID_COMP_LOONGSON:
+               cpu_probe_loongson(c, cpu);
+               break;
+       }
+
+       BUG_ON(!__cpu_family[cpu]);
+       BUG_ON(c->cputype == CPU_UNKNOWN);
+
+       cpu_probe_addrbits(c);
+
+#ifdef CONFIG_64BIT
+       if (cpu == 0)
+               __ua_limit = ~((1ull << cpu_vabits) - 1);
+#endif
+
+       cpu_report();
+}
diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c
new file mode 100644 (file)
index 0000000..8c9b531
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
+#include <linux/swiotlb.h>
+
+#include <asm/bootinfo.h>
+#include <asm/dma.h>
+#include <asm/loongson.h>
+
+/*
+ * We extract 4bit node id (bit 44~47) from Loongson-3's
+ * 48bit physical address space and embed it into 40bit.
+ */
+
+static int node_id_offset;
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+       long nid = (paddr >> 44) & 0xf;
+
+       return ((nid << 44) ^ paddr) | (nid << node_id_offset);
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+       long nid = (daddr >> node_id_offset) & 0xf;
+
+       return ((nid << node_id_offset) ^ daddr) | (nid << 44);
+}
+
+void __init plat_swiotlb_setup(void)
+{
+       swiotlb_init(true, SWIOTLB_VERBOSE);
+       node_id_offset = ((readl(LS7A_DMA_CFG) & LS7A_DMA_NODE_MASK) >> LS7A_DMA_NODE_SHF) + 36;
+}
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
new file mode 100644 (file)
index 0000000..a50b60c
--- /dev/null
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI initialization
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/memblock.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+
+#include <asm/early_ioremap.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+
+static unsigned long efi_nr_tables;
+static unsigned long efi_config_table;
+
+static efi_system_table_t *efi_systab;
+static efi_config_table_type_t arch_tables[] __initdata = {{},};
+
+void __init efi_runtime_init(void)
+{
+       if (!efi_enabled(EFI_BOOT))
+               return;
+
+       if (efi_runtime_disabled()) {
+               pr_info("EFI runtime services will be disabled.\n");
+               return;
+       }
+
+       efi.runtime = (efi_runtime_services_t *)efi_systab->runtime;
+       efi.runtime_version = (unsigned int)efi.runtime->hdr.revision;
+
+       efi_native_runtime_setup();
+       set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+}
+
+void __init efi_init(void)
+{
+       int size;
+       void *config_tables;
+
+       if (!efi_system_table)
+               return;
+
+       efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab));
+       if (!efi_systab) {
+               pr_err("Can't find EFI system table.\n");
+               return;
+       }
+
+       set_bit(EFI_64BIT, &efi.flags);
+       efi_nr_tables    = efi_systab->nr_tables;
+       efi_config_table = (unsigned long)efi_systab->tables;
+
+       size = sizeof(efi_config_table_t);
+       config_tables = early_memremap(efi_config_table, efi_nr_tables * size);
+       efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables);
+       early_memunmap(config_tables, efi_nr_tables * size);
+}
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
new file mode 100644 (file)
index 0000000..183e94f
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-info.h>
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+                    bool is_interp, struct arch_elf_state *state)
+{
+       return 0;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+                  struct arch_elf_state *state)
+{
+       return 0;
+}
+
+void loongarch_set_personality_fcsr(struct arch_elf_state *state)
+{
+       current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+}
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
new file mode 100644 (file)
index 0000000..d5b3dbc
--- /dev/null
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+       .text
+       .cfi_sections   .debug_frame
+       .align  5
+SYM_FUNC_START(handle_syscall)
+       csrrd   t0, PERCPU_BASE_KS
+       la.abs  t1, kernelsp
+       add.d   t1, t1, t0
+       move    t2, sp
+       ld.d    sp, t1, 0
+
+       addi.d  sp, sp, -PT_SIZE
+       cfi_st  t2, PT_R3
+       cfi_rel_offset  sp, PT_R3
+       st.d    zero, sp, PT_R0
+       csrrd   t2, LOONGARCH_CSR_PRMD
+       st.d    t2, sp, PT_PRMD
+       csrrd   t2, LOONGARCH_CSR_CRMD
+       st.d    t2, sp, PT_CRMD
+       csrrd   t2, LOONGARCH_CSR_EUEN
+       st.d    t2, sp, PT_EUEN
+       csrrd   t2, LOONGARCH_CSR_ECFG
+       st.d    t2, sp, PT_ECFG
+       csrrd   t2, LOONGARCH_CSR_ESTAT
+       st.d    t2, sp, PT_ESTAT
+       cfi_st  ra, PT_R1
+       cfi_st  a0, PT_R4
+       cfi_st  a1, PT_R5
+       cfi_st  a2, PT_R6
+       cfi_st  a3, PT_R7
+       cfi_st  a4, PT_R8
+       cfi_st  a5, PT_R9
+       cfi_st  a6, PT_R10
+       cfi_st  a7, PT_R11
+       csrrd   ra, LOONGARCH_CSR_ERA
+       st.d    ra, sp, PT_ERA
+       cfi_rel_offset ra, PT_ERA
+
+       cfi_st  tp, PT_R2
+       cfi_st  u0, PT_R21
+       cfi_st  fp, PT_R22
+
+       SAVE_STATIC
+
+       move    u0, t0
+       li.d    tp, ~_THREAD_MASK
+       and     tp, tp, sp
+
+       move    a0, sp
+       bl      do_syscall
+
+       RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_syscall)
+
+SYM_CODE_START(ret_from_fork)
+       bl      schedule_tail           # a0 = struct task_struct *prev
+       move    a0, sp
+       bl      syscall_exit_to_user_mode
+       RESTORE_STATIC
+       RESTORE_SOME
+       RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_fork)
+
+SYM_CODE_START(ret_from_kernel_thread)
+       bl      schedule_tail           # a0 = struct task_struct *prev
+       move    a0, s1
+       jirl    ra, s0, 0
+       move    a0, sp
+       bl      syscall_exit_to_user_mode
+       RESTORE_STATIC
+       RESTORE_SOME
+       RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_kernel_thread)
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
new file mode 100644 (file)
index 0000000..467946e
--- /dev/null
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <asm/early_ioremap.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+
+u64 efi_system_table;
+struct loongson_system_configuration loongson_sysconf;
+EXPORT_SYMBOL(loongson_sysconf);
+
+u64 loongson_chipcfg[MAX_PACKAGES];
+u64 loongson_chiptemp[MAX_PACKAGES];
+u64 loongson_freqctrl[MAX_PACKAGES];
+unsigned long long smp_group[MAX_PACKAGES];
+
+static void __init register_addrs_set(u64 *registers, const u64 addr, int num)
+{
+       u64 i;
+
+       for (i = 0; i < num; i++) {
+               *registers = (i << 44) | addr;
+               registers++;
+       }
+}
+
+void __init init_environ(void)
+{
+       int efi_boot = fw_arg0;
+       struct efi_memory_map_data data;
+       void *fdt_ptr = early_memremap_ro(fw_arg1, SZ_64K);
+
+       if (efi_boot)
+               set_bit(EFI_BOOT, &efi.flags);
+       else
+               clear_bit(EFI_BOOT, &efi.flags);
+
+       early_init_dt_scan(fdt_ptr);
+       early_init_fdt_reserve_self();
+       efi_system_table = efi_get_fdt_params(&data);
+
+       efi_memmap_init_early(&data);
+       memblock_reserve(data.phys_map & PAGE_MASK,
+                        PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
+
+       register_addrs_set(smp_group, TO_UNCACHE(0x1fe01000), 16);
+       register_addrs_set(loongson_chipcfg, TO_UNCACHE(0x1fe00180), 16);
+       register_addrs_set(loongson_chiptemp, TO_UNCACHE(0x1fe0019c), 16);
+       register_addrs_set(loongson_freqctrl, TO_UNCACHE(0x1fe001d0), 16);
+}
+
+static int __init init_cpu_fullname(void)
+{
+       int cpu;
+
+       if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
+               for (cpu = 0; cpu < NR_CPUS; cpu++)
+                       __cpu_full_name[cpu] = loongson_sysconf.cpuname;
+       }
+       return 0;
+}
+arch_initcall(init_cpu_fullname);
+
+static ssize_t boardinfo_show(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf,
+               "BIOS Information\n"
+               "Vendor\t\t\t: %s\n"
+               "Version\t\t\t: %s\n"
+               "ROM Size\t\t: %d KB\n"
+               "Release Date\t\t: %s\n\n"
+               "Board Information\n"
+               "Manufacturer\t\t: %s\n"
+               "Board Name\t\t: %s\n"
+               "Family\t\t\t: LOONGSON64\n\n",
+               b_info.bios_vendor, b_info.bios_version,
+               b_info.bios_size, b_info.bios_release_date,
+               b_info.board_vendor, b_info.board_name);
+}
+
+static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
+                                                    boardinfo_show, NULL);
+
+static int __init boardinfo_init(void)
+{
+       struct kobject *loongson_kobj;
+
+       loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
+
+       return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
+}
+late_initcall(boardinfo_init);
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
new file mode 100644 (file)
index 0000000..75c6ce0
--- /dev/null
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Lu Zeng <zenglu@loongson.cn>
+ *         Pei Huang <huangpei@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+
+#define FPU_REG_WIDTH          8
+#define LSX_REG_WIDTH          16
+#define LASX_REG_WIDTH         32
+
+       .macro  EX insn, reg, src, offs
+.ex\@: \insn   \reg, \src, \offs
+       .section __ex_table,"a"
+       PTR     .ex\@, fault
+       .previous
+       .endm
+
+       .macro sc_save_fp base
+       EX      fst.d $f0,  \base, (0 * FPU_REG_WIDTH)
+       EX      fst.d $f1,  \base, (1 * FPU_REG_WIDTH)
+       EX      fst.d $f2,  \base, (2 * FPU_REG_WIDTH)
+       EX      fst.d $f3,  \base, (3 * FPU_REG_WIDTH)
+       EX      fst.d $f4,  \base, (4 * FPU_REG_WIDTH)
+       EX      fst.d $f5,  \base, (5 * FPU_REG_WIDTH)
+       EX      fst.d $f6,  \base, (6 * FPU_REG_WIDTH)
+       EX      fst.d $f7,  \base, (7 * FPU_REG_WIDTH)
+       EX      fst.d $f8,  \base, (8 * FPU_REG_WIDTH)
+       EX      fst.d $f9,  \base, (9 * FPU_REG_WIDTH)
+       EX      fst.d $f10, \base, (10 * FPU_REG_WIDTH)
+       EX      fst.d $f11, \base, (11 * FPU_REG_WIDTH)
+       EX      fst.d $f12, \base, (12 * FPU_REG_WIDTH)
+       EX      fst.d $f13, \base, (13 * FPU_REG_WIDTH)
+       EX      fst.d $f14, \base, (14 * FPU_REG_WIDTH)
+       EX      fst.d $f15, \base, (15 * FPU_REG_WIDTH)
+       EX      fst.d $f16, \base, (16 * FPU_REG_WIDTH)
+       EX      fst.d $f17, \base, (17 * FPU_REG_WIDTH)
+       EX      fst.d $f18, \base, (18 * FPU_REG_WIDTH)
+       EX      fst.d $f19, \base, (19 * FPU_REG_WIDTH)
+       EX      fst.d $f20, \base, (20 * FPU_REG_WIDTH)
+       EX      fst.d $f21, \base, (21 * FPU_REG_WIDTH)
+       EX      fst.d $f22, \base, (22 * FPU_REG_WIDTH)
+       EX      fst.d $f23, \base, (23 * FPU_REG_WIDTH)
+       EX      fst.d $f24, \base, (24 * FPU_REG_WIDTH)
+       EX      fst.d $f25, \base, (25 * FPU_REG_WIDTH)
+       EX      fst.d $f26, \base, (26 * FPU_REG_WIDTH)
+       EX      fst.d $f27, \base, (27 * FPU_REG_WIDTH)
+       EX      fst.d $f28, \base, (28 * FPU_REG_WIDTH)
+       EX      fst.d $f29, \base, (29 * FPU_REG_WIDTH)
+       EX      fst.d $f30, \base, (30 * FPU_REG_WIDTH)
+       EX      fst.d $f31, \base, (31 * FPU_REG_WIDTH)
+       .endm
+
+       .macro sc_restore_fp base
+       EX      fld.d $f0,  \base, (0 * FPU_REG_WIDTH)
+       EX      fld.d $f1,  \base, (1 * FPU_REG_WIDTH)
+       EX      fld.d $f2,  \base, (2 * FPU_REG_WIDTH)
+       EX      fld.d $f3,  \base, (3 * FPU_REG_WIDTH)
+       EX      fld.d $f4,  \base, (4 * FPU_REG_WIDTH)
+       EX      fld.d $f5,  \base, (5 * FPU_REG_WIDTH)
+       EX      fld.d $f6,  \base, (6 * FPU_REG_WIDTH)
+       EX      fld.d $f7,  \base, (7 * FPU_REG_WIDTH)
+       EX      fld.d $f8,  \base, (8 * FPU_REG_WIDTH)
+       EX      fld.d $f9,  \base, (9 * FPU_REG_WIDTH)
+       EX      fld.d $f10, \base, (10 * FPU_REG_WIDTH)
+       EX      fld.d $f11, \base, (11 * FPU_REG_WIDTH)
+       EX      fld.d $f12, \base, (12 * FPU_REG_WIDTH)
+       EX      fld.d $f13, \base, (13 * FPU_REG_WIDTH)
+       EX      fld.d $f14, \base, (14 * FPU_REG_WIDTH)
+       EX      fld.d $f15, \base, (15 * FPU_REG_WIDTH)
+       EX      fld.d $f16, \base, (16 * FPU_REG_WIDTH)
+       EX      fld.d $f17, \base, (17 * FPU_REG_WIDTH)
+       EX      fld.d $f18, \base, (18 * FPU_REG_WIDTH)
+       EX      fld.d $f19, \base, (19 * FPU_REG_WIDTH)
+       EX      fld.d $f20, \base, (20 * FPU_REG_WIDTH)
+       EX      fld.d $f21, \base, (21 * FPU_REG_WIDTH)
+       EX      fld.d $f22, \base, (22 * FPU_REG_WIDTH)
+       EX      fld.d $f23, \base, (23 * FPU_REG_WIDTH)
+       EX      fld.d $f24, \base, (24 * FPU_REG_WIDTH)
+       EX      fld.d $f25, \base, (25 * FPU_REG_WIDTH)
+       EX      fld.d $f26, \base, (26 * FPU_REG_WIDTH)
+       EX      fld.d $f27, \base, (27 * FPU_REG_WIDTH)
+       EX      fld.d $f28, \base, (28 * FPU_REG_WIDTH)
+       EX      fld.d $f29, \base, (29 * FPU_REG_WIDTH)
+       EX      fld.d $f30, \base, (30 * FPU_REG_WIDTH)
+       EX      fld.d $f31, \base, (31 * FPU_REG_WIDTH)
+       .endm
+
+       .macro sc_save_fcc base, tmp0, tmp1
+       movcf2gr        \tmp0, $fcc0
+       move    \tmp1, \tmp0
+       movcf2gr        \tmp0, $fcc1
+       bstrins.d       \tmp1, \tmp0, 15, 8
+       movcf2gr        \tmp0, $fcc2
+       bstrins.d       \tmp1, \tmp0, 23, 16
+       movcf2gr        \tmp0, $fcc3
+       bstrins.d       \tmp1, \tmp0, 31, 24
+       movcf2gr        \tmp0, $fcc4
+       bstrins.d       \tmp1, \tmp0, 39, 32
+       movcf2gr        \tmp0, $fcc5
+       bstrins.d       \tmp1, \tmp0, 47, 40
+       movcf2gr        \tmp0, $fcc6
+       bstrins.d       \tmp1, \tmp0, 55, 48
+       movcf2gr        \tmp0, $fcc7
+       bstrins.d       \tmp1, \tmp0, 63, 56
+       EX      st.d \tmp1, \base, 0
+       .endm
+
+       .macro sc_restore_fcc base, tmp0, tmp1
+       EX      ld.d \tmp0, \base, 0
+       bstrpick.d      \tmp1, \tmp0, 7, 0
+       movgr2cf        $fcc0, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 15, 8
+       movgr2cf        $fcc1, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 23, 16
+       movgr2cf        $fcc2, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 31, 24
+       movgr2cf        $fcc3, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 39, 32
+       movgr2cf        $fcc4, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 47, 40
+       movgr2cf        $fcc5, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 55, 48
+       movgr2cf        $fcc6, \tmp1
+       bstrpick.d      \tmp1, \tmp0, 63, 56
+       movgr2cf        $fcc7, \tmp1
+       .endm
+
+       .macro sc_save_fcsr base, tmp0
+       movfcsr2gr      \tmp0, fcsr0
+       EX      st.w \tmp0, \base, 0
+       .endm
+
+       .macro sc_restore_fcsr base, tmp0
+       EX      ld.w \tmp0, \base, 0
+       movgr2fcsr      fcsr0, \tmp0
+       .endm
+
+       .macro sc_save_vcsr base, tmp0
+       movfcsr2gr      \tmp0, vcsr16
+       EX      st.w \tmp0, \base, 0
+       .endm
+
+       .macro sc_restore_vcsr base, tmp0
+       EX      ld.w \tmp0, \base, 0
+       movgr2fcsr      vcsr16, \tmp0
+       .endm
+
+/*
+ * Save a thread's fp context.
+ */
+SYM_FUNC_START(_save_fp)
+       fpu_save_csr    a0 t1
+       fpu_save_double a0 t1                   # clobbers t1
+       fpu_save_cc     a0 t1 t2                # clobbers t1, t2
+       jirl zero, ra, 0
+SYM_FUNC_END(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+SYM_FUNC_START(_restore_fp)
+       fpu_restore_double a0 t1                # clobbers t1
+       fpu_restore_csr a0 t1
+       fpu_restore_cc  a0 t1 t2                # clobbers t1, t2
+       jirl zero, ra, 0
+SYM_FUNC_END(_restore_fp)
+
+/*
+ * Load the FPU with signalling NANS.  This bit pattern we're using has
+ * the property that no matter whether considered as single or as double
+ * precision represents signaling NANS.
+ *
+ * The value to initialize fcsr0 to comes in $a0.
+ */
+
+SYM_FUNC_START(_init_fpu)
+       li.w    t1, CSR_EUEN_FPEN
+       csrxchg t1, t1, LOONGARCH_CSR_EUEN
+
+       movgr2fcsr      fcsr0, a0
+
+       li.w    t1, -1                          # SNaN
+
+       movgr2fr.d      $f0, t1
+       movgr2fr.d      $f1, t1
+       movgr2fr.d      $f2, t1
+       movgr2fr.d      $f3, t1
+       movgr2fr.d      $f4, t1
+       movgr2fr.d      $f5, t1
+       movgr2fr.d      $f6, t1
+       movgr2fr.d      $f7, t1
+       movgr2fr.d      $f8, t1
+       movgr2fr.d      $f9, t1
+       movgr2fr.d      $f10, t1
+       movgr2fr.d      $f11, t1
+       movgr2fr.d      $f12, t1
+       movgr2fr.d      $f13, t1
+       movgr2fr.d      $f14, t1
+       movgr2fr.d      $f15, t1
+       movgr2fr.d      $f16, t1
+       movgr2fr.d      $f17, t1
+       movgr2fr.d      $f18, t1
+       movgr2fr.d      $f19, t1
+       movgr2fr.d      $f20, t1
+       movgr2fr.d      $f21, t1
+       movgr2fr.d      $f22, t1
+       movgr2fr.d      $f23, t1
+       movgr2fr.d      $f24, t1
+       movgr2fr.d      $f25, t1
+       movgr2fr.d      $f26, t1
+       movgr2fr.d      $f27, t1
+       movgr2fr.d      $f28, t1
+       movgr2fr.d      $f29, t1
+       movgr2fr.d      $f30, t1
+       movgr2fr.d      $f31, t1
+
+       jirl zero, ra, 0
+SYM_FUNC_END(_init_fpu)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_save_fp_context)
+       sc_save_fcc a1 t1 t2
+       sc_save_fcsr a2 t1
+       sc_save_fp a0
+       li.w    a0, 0                                   # success
+       jirl zero, ra, 0
+SYM_FUNC_END(_save_fp_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_restore_fp_context)
+       sc_restore_fp a0
+       sc_restore_fcc a1 t1 t2
+       sc_restore_fcsr a2 t1
+       li.w    a0, 0                                   # success
+       jirl zero, ra, 0
+SYM_FUNC_END(_restore_fp_context)
+
+SYM_FUNC_START(fault)
+       li.w    a0, -EFAULT                             # failure
+       jirl zero, ra, 0
+SYM_FUNC_END(fault)
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
new file mode 100644 (file)
index 0000000..9349685
--- /dev/null
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2002, 2007  Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+       .align  5
+SYM_FUNC_START(__arch_cpu_idle)
+       /* start of rollback region */
+       LONG_L  t0, tp, TI_FLAGS
+       nop
+       andi    t0, t0, _TIF_NEED_RESCHED
+       bnez    t0, 1f
+       nop
+       nop
+       nop
+       idle    0
+       /* end of rollback region */
+1:     jirl    zero, ra, 0
+SYM_FUNC_END(__arch_cpu_idle)
+
+SYM_FUNC_START(handle_vint)
+       BACKUP_T0T1
+       SAVE_ALL
+       la.abs  t1, __arch_cpu_idle
+       LONG_L  t0, sp, PT_ERA
+       /* 32 byte rollback region */
+       ori     t0, t0, 0x1f
+       xori    t0, t0, 0x1f
+       bne     t0, t1, 1f
+       LONG_S  t0, sp, PT_ERA
+1:     move    a0, sp
+       move    a1, sp
+       la.abs  t0, do_vint
+       jirl    ra, t0, 0
+       RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_vint)
+
+SYM_FUNC_START(except_vec_cex)
+       b       cache_parity_error
+SYM_FUNC_END(except_vec_cex)
+
+       .macro  build_prep_badv
+       csrrd   t0, LOONGARCH_CSR_BADV
+       PTR_S   t0, sp, PT_BVADDR
+       .endm
+
+       .macro  build_prep_fcsr
+       movfcsr2gr      a1, fcsr0
+       .endm
+
+       .macro  build_prep_none
+       .endm
+
+       .macro  BUILD_HANDLER exception handler prep
+       .align  5
+       SYM_FUNC_START(handle_\exception)
+       BACKUP_T0T1
+       SAVE_ALL
+       build_prep_\prep
+       move    a0, sp
+       la.abs  t0, do_\handler
+       jirl    ra, t0, 0
+       RESTORE_ALL_AND_RET
+       SYM_FUNC_END(handle_\exception)
+       .endm
+
+       BUILD_HANDLER ade ade badv
+       BUILD_HANDLER ale ale badv
+       BUILD_HANDLER bp bp none
+       BUILD_HANDLER fpe fpe fcsr
+       BUILD_HANDLER fpu fpu none
+       BUILD_HANDLER lsx lsx none
+       BUILD_HANDLER lasx lasx none
+       BUILD_HANDLER lbt lbt none
+       BUILD_HANDLER ri ri none
+       BUILD_HANDLER watch watch none
+       BUILD_HANDLER reserved reserved none    /* others */
+
+SYM_FUNC_START(handle_sys)
+       la.abs  t0, handle_syscall
+       jirl    zero, t0, 0
+SYM_FUNC_END(handle_sys)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
new file mode 100644 (file)
index 0000000..e596dfc
--- /dev/null
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/loongarch.h>
+#include <asm/stackframe.h>
+
+       __REF
+
+SYM_ENTRY(_stext, SYM_L_GLOBAL, SYM_A_NONE)
+
+SYM_CODE_START(kernel_entry)                   # kernel entry point
+
+       /* Config direct window and set PG */
+       li.d            t0, CSR_DMW0_INIT       # UC, PLV0, 0x8000 xxxx xxxx xxxx
+       csrwr           t0, LOONGARCH_CSR_DMWIN0
+       li.d            t0, CSR_DMW1_INIT       # CA, PLV0, 0x9000 xxxx xxxx xxxx
+       csrwr           t0, LOONGARCH_CSR_DMWIN1
+       /* Enable PG */
+       li.w            t0, 0xb0                # PLV=0, IE=0, PG=1
+       csrwr           t0, LOONGARCH_CSR_CRMD
+       li.w            t0, 0x04                # PLV=0, PIE=1, PWE=0
+       csrwr           t0, LOONGARCH_CSR_PRMD
+       li.w            t0, 0x00                # FPE=0, SXE=0, ASXE=0, BTE=0
+       csrwr           t0, LOONGARCH_CSR_EUEN
+
+       /* We might not get launched at the address the kernel is linked to,
+          so we jump there.  */
+       la.abs          t0, 0f
+       jirl            zero, t0, 0
+0:
+       la              t0, __bss_start         # clear .bss
+       st.d            zero, t0, 0
+       la              t1, __bss_stop - LONGSIZE
+1:
+       addi.d          t0, t0, LONGSIZE
+       st.d            zero, t0, 0
+       bne             t0, t1, 1b
+
+       la              t0, fw_arg0
+       st.d            a0, t0, 0               # firmware arguments
+       la              t0, fw_arg1
+       st.d            a1, t0, 0
+
+       /* KSave3 used for percpu base, initialized as 0 */
+       csrwr           zero, PERCPU_BASE_KS
+       /* GPR21 used for percpu base (runtime), initialized as 0 */
+       or              u0, zero, zero
+
+       la              tp, init_thread_union
+       /* Set the SP after an empty pt_regs.  */
+       PTR_LI          sp, (_THREAD_SIZE - 32 - PT_SIZE)
+       PTR_ADD         sp, sp, tp
+       set_saved_sp    sp, t0, t1
+       PTR_ADDI        sp, sp, -4 * SZREG      # init stack pointer
+
+       bl              start_kernel
+
+SYM_CODE_END(kernel_entry)
+
+#ifdef CONFIG_SMP
+
+/*
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * function after setting up the stack and tp registers.
+ */
+SYM_CODE_START(smpboot_entry)
+       li.d            t0, CSR_DMW0_INIT       # UC, PLV0
+       csrwr           t0, LOONGARCH_CSR_DMWIN0
+       li.d            t0, CSR_DMW1_INIT       # CA, PLV0
+       csrwr           t0, LOONGARCH_CSR_DMWIN1
+       li.w            t0, 0xb0                # PLV=0, IE=0, PG=1
+       csrwr           t0, LOONGARCH_CSR_CRMD
+       li.w            t0, 0x04                # PLV=0, PIE=1, PWE=0
+       csrwr           t0, LOONGARCH_CSR_PRMD
+       li.w            t0, 0x00                # FPE=0, SXE=0, ASXE=0, BTE=0
+       csrwr           t0, LOONGARCH_CSR_EUEN
+
+       la.abs          t0, cpuboot_data
+       ld.d            sp, t0, CPU_BOOT_STACK
+       ld.d            tp, t0, CPU_BOOT_TINFO
+
+       la.abs  t0, 0f
+       jirl    zero, t0, 0
+0:
+       bl              start_secondary
+SYM_CODE_END(smpboot_entry)
+
+#endif /* CONFIG_SMP */
+
+SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE)
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
new file mode 100644 (file)
index 0000000..1a65d05
--- /dev/null
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch idle loop support.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+#include <asm/cpu.h>
+#include <asm/idle.h>
+
+void __cpuidle arch_cpu_idle(void)
+{
+       raw_local_irq_enable();
+       __arch_cpu_idle(); /* idle instruction needs irq enabled */
+}
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
new file mode 100644 (file)
index 0000000..b1df0ec
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/inst.h>
+
+u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
+{
+       union loongarch_instruction insn;
+
+       insn.reg1i20_format.opcode = lu32id_op;
+       insn.reg1i20_format.rd = rd;
+       insn.reg1i20_format.immediate = imm;
+
+       return insn.word;
+}
+
+u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+       union loongarch_instruction insn;
+
+       insn.reg2i12_format.opcode = lu52id_op;
+       insn.reg2i12_format.rd = rd;
+       insn.reg2i12_format.rj = rj;
+       insn.reg2i12_format.immediate = imm;
+
+       return insn.word;
+}
+
+u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest)
+{
+       union loongarch_instruction insn;
+
+       insn.reg2i16_format.opcode = jirl_op;
+       insn.reg2i16_format.rd = rd;
+       insn.reg2i16_format.rj = rj;
+       insn.reg2i16_format.immediate = (dest - pc) >> 2;
+
+       return insn.word;
+}
diff --git a/arch/loongarch/kernel/io.c b/arch/loongarch/kernel/io.c
new file mode 100644 (file)
index 0000000..cb85bda
--- /dev/null
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+       while (count && !IS_ALIGNED((unsigned long)from, 8)) {
+               *(u8 *)to = __raw_readb(from);
+               from++;
+               to++;
+               count--;
+       }
+
+       while (count >= 8) {
+               *(u64 *)to = __raw_readq(from);
+               from += 8;
+               to += 8;
+               count -= 8;
+       }
+
+       while (count) {
+               *(u8 *)to = __raw_readb(from);
+               from++;
+               to++;
+               count--;
+       }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+       while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+               __raw_writeb(*(u8 *)from, to);
+               from++;
+               to++;
+               count--;
+       }
+
+       while (count >= 8) {
+               __raw_writeq(*(u64 *)from, to);
+               from += 8;
+               to += 8;
+               count -= 8;
+       }
+
+       while (count) {
+               __raw_writeb(*(u8 *)from, to);
+               from++;
+               to++;
+               count--;
+       }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ */
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+       u64 qc = (u8)c;
+
+       qc |= qc << 8;
+       qc |= qc << 16;
+       qc |= qc << 32;
+
+       while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
+               __raw_writeb(c, dst);
+               dst++;
+               count--;
+       }
+
+       while (count >= 8) {
+               __raw_writeq(qc, dst);
+               dst += 8;
+               count -= 8;
+       }
+
+       while (count) {
+               __raw_writeb(c, dst);
+               dst++;
+               count--;
+       }
+}
+EXPORT_SYMBOL(__memset_io);
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
new file mode 100644 (file)
index 0000000..4b671d3
--- /dev/null
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/kernel_stat.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/loongson.h>
+#include <asm/setup.h>
+
+DEFINE_PER_CPU(unsigned long, irq_stack);
+
+struct irq_domain *cpu_domain;
+struct irq_domain *liointc_domain;
+struct irq_domain *pch_lpc_domain;
+struct irq_domain *pch_msi_domain[MAX_IO_PICS];
+struct irq_domain *pch_pic_domain[MAX_IO_PICS];
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+       pr_warn("Unexpected IRQ # %d\n", irq);
+}
+
+atomic_t irq_err_count;
+
+asmlinkage void spurious_interrupt(void)
+{
+       atomic_inc(&irq_err_count);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+#ifdef CONFIG_SMP
+       show_ipi_list(p, prec);
+#endif
+       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+       return 0;
+}
+
+void __init init_IRQ(void)
+{
+       int i, r, ipi_irq;
+       static int ipi_dummy_dev;
+       unsigned int order = get_order(IRQ_STACK_SIZE);
+       struct page *page;
+
+       clear_csr_ecfg(ECFG0_IM);
+       clear_csr_estat(ESTATF_IP);
+
+       irqchip_init();
+#ifdef CONFIG_SMP
+       ipi_irq = EXCCODE_IPI - EXCCODE_INT_START;
+       irq_set_percpu_devid(ipi_irq);
+       r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
+       if (r < 0)
+               panic("IPI IRQ request failed\n");
+#endif
+
+       for (i = 0; i < NR_IRQS; i++)
+               irq_set_noprobe(i);
+
+       for_each_possible_cpu(i) {
+               page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
+
+               per_cpu(irq_stack, i) = (unsigned long)page_address(page);
+               pr_debug("CPU%d IRQ stack at 0x%lx - 0x%lx\n", i,
+                       per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
+       }
+
+       set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
+}
diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
new file mode 100644 (file)
index 0000000..7423361
--- /dev/null
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/efi.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/sections.h>
+
+void __init memblock_init(void)
+{
+       u32 mem_type;
+       u64 mem_start, mem_end, mem_size;
+       efi_memory_desc_t *md;
+
+       /* Parse memory information */
+       for_each_efi_memory_desc(md) {
+               mem_type = md->type;
+               mem_start = md->phys_addr;
+               mem_size = md->num_pages << EFI_PAGE_SHIFT;
+               mem_end = mem_start + mem_size;
+
+               switch (mem_type) {
+               case EFI_LOADER_CODE:
+               case EFI_LOADER_DATA:
+               case EFI_BOOT_SERVICES_CODE:
+               case EFI_BOOT_SERVICES_DATA:
+               case EFI_PERSISTENT_MEMORY:
+               case EFI_CONVENTIONAL_MEMORY:
+                       memblock_add(mem_start, mem_size);
+                       if (max_low_pfn < (mem_end >> PAGE_SHIFT))
+                               max_low_pfn = mem_end >> PAGE_SHIFT;
+                       break;
+               case EFI_PAL_CODE:
+               case EFI_UNUSABLE_MEMORY:
+               case EFI_ACPI_RECLAIM_MEMORY:
+                       memblock_add(mem_start, mem_size);
+                       fallthrough;
+               case EFI_RESERVED_TYPE:
+               case EFI_RUNTIME_SERVICES_CODE:
+               case EFI_RUNTIME_SERVICES_DATA:
+               case EFI_MEMORY_MAPPED_IO:
+               case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+                       memblock_reserve(mem_start, mem_size);
+                       break;
+               }
+       }
+
+       memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+       memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+
+       /* Reserve the first 2MB */
+       memblock_reserve(PHYS_OFFSET, 0x200000);
+
+       /* Reserve the kernel text/data/bss */
+       memblock_reserve(__pa_symbol(&_text),
+                        __pa_symbol(&_end) - __pa_symbol(&_text));
+
+       /* Reserve the initrd */
+       reserve_initrd_mem();
+}
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
new file mode 100644 (file)
index 0000000..6d49828
--- /dev/null
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val)
+{
+       int nr;
+       struct mod_section *plt_sec = &mod->arch.plt;
+       struct mod_section *plt_idx_sec = &mod->arch.plt_idx;
+       struct plt_entry *plt = get_plt_entry(val, plt_sec, plt_idx_sec);
+       struct plt_idx_entry *plt_idx;
+
+       if (plt)
+               return (Elf_Addr)plt;
+
+       nr = plt_sec->num_entries;
+
+       /* There is no duplicate entry, create a new one */
+       plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
+       plt[nr] = emit_plt_entry(val);
+       plt_idx = (struct plt_idx_entry *)plt_idx_sec->shdr->sh_addr;
+       plt_idx[nr] = emit_plt_idx_entry(val);
+
+       plt_sec->num_entries++;
+       plt_idx_sec->num_entries++;
+       BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
+
+       return (Elf_Addr)&plt[nr];
+}
+
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+{
+       return x->r_info == y->r_info && x->r_addend == y->r_addend;
+}
+
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
+{
+       int i;
+
+       for (i = 0; i < idx; i++) {
+               if (is_rela_equal(&rela[i], &rela[idx]))
+                       return true;
+       }
+
+       return false;
+}
+
+static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts)
+{
+       unsigned int i, type;
+
+       for (i = 0; i < num; i++) {
+               type = ELF_R_TYPE(relas[i].r_info);
+               if (type == R_LARCH_SOP_PUSH_PLT_PCREL) {
+                       if (!duplicate_rela(relas, i))
+                               (*plts)++;
+               }
+       }
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+                             char *secstrings, struct module *mod)
+{
+       unsigned int i, num_plts = 0;
+
+       /*
+        * Find the empty .plt sections.
+        */
+       for (i = 0; i < ehdr->e_shnum; i++) {
+               if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+                       mod->arch.plt.shdr = sechdrs + i;
+               else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx"))
+                       mod->arch.plt_idx.shdr = sechdrs + i;
+       }
+
+       if (!mod->arch.plt.shdr) {
+               pr_err("%s: module PLT section(s) missing\n", mod->name);
+               return -ENOEXEC;
+       }
+       if (!mod->arch.plt_idx.shdr) {
+               pr_err("%s: module PLT.IDX section(s) missing\n", mod->name);
+               return -ENOEXEC;
+       }
+
+       /* Calculate the maxinum number of entries */
+       for (i = 0; i < ehdr->e_shnum; i++) {
+               int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+               Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+               Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+
+               if (sechdrs[i].sh_type != SHT_RELA)
+                       continue;
+
+               /* ignore relocations that operate on non-exec sections */
+               if (!(dst_sec->sh_flags & SHF_EXECINSTR))
+                       continue;
+
+               count_max_entries(relas, num_rela, &num_plts);
+       }
+
+       mod->arch.plt.shdr->sh_type = SHT_NOBITS;
+       mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+       mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
+       mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
+       mod->arch.plt.num_entries = 0;
+       mod->arch.plt.max_entries = num_plts;
+
+       mod->arch.plt_idx.shdr->sh_type = SHT_NOBITS;
+       mod->arch.plt_idx.shdr->sh_flags = SHF_ALLOC;
+       mod->arch.plt_idx.shdr->sh_addralign = L1_CACHE_BYTES;
+       mod->arch.plt_idx.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry);
+       mod->arch.plt_idx.num_entries = 0;
+       mod->arch.plt_idx.max_entries = num_plts;
+
+       return 0;
+}
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
new file mode 100644 (file)
index 0000000..638427f
--- /dev/null
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "kmod: " fmt
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+static inline bool signed_imm_check(long val, unsigned int bit)
+{
+       return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1));
+}
+
+static inline bool unsigned_imm_check(unsigned long val, unsigned int bit)
+{
+       return val < (1UL << bit);
+}
+
+static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+       if (*rela_stack_top >= RELA_STACK_DEPTH)
+               return -ENOEXEC;
+
+       rela_stack[(*rela_stack_top)++] = stack_value;
+       pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
+
+       return 0;
+}
+
+static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+       if (*rela_stack_top == 0)
+               return -ENOEXEC;
+
+       *stack_value = rela_stack[--(*rela_stack_top)];
+       pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
+
+       return 0;
+}
+
+static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       return 0;
+}
+
+static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
+       return -EINVAL;
+}
+
+static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       *location = v;
+       return 0;
+}
+
+static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       *(Elf_Addr *)location = v;
+       return 0;
+}
+
+static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       return rela_stack_push(v, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       int err = 0;
+       s64 opr1;
+
+       err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+       if (err)
+               return err;
+       err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+       if (err)
+               return err;
+       err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       ptrdiff_t offset = (void *)v - (void *)location;
+
+       if (offset >= SZ_128M)
+               v = module_emit_plt_entry(mod, v);
+
+       if (offset < -SZ_128M)
+               v = module_emit_plt_entry(mod, v);
+
+       return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type);
+}
+
+static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       int err = 0;
+       s64 opr1, opr2, opr3;
+
+       if (type == R_LARCH_SOP_IF_ELSE) {
+               err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
+               if (err)
+                       return err;
+       }
+
+       err = rela_stack_pop(&opr2, rela_stack, rela_stack_top);
+       if (err)
+               return err;
+       err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+       if (err)
+               return err;
+
+       switch (type) {
+       case R_LARCH_SOP_AND:
+               err = rela_stack_push(opr1 & opr2, rela_stack, rela_stack_top);
+               break;
+       case R_LARCH_SOP_ADD:
+               err = rela_stack_push(opr1 + opr2, rela_stack, rela_stack_top);
+               break;
+       case R_LARCH_SOP_SUB:
+               err = rela_stack_push(opr1 - opr2, rela_stack, rela_stack_top);
+               break;
+       case R_LARCH_SOP_SL:
+               err = rela_stack_push(opr1 << opr2, rela_stack, rela_stack_top);
+               break;
+       case R_LARCH_SOP_SR:
+               err = rela_stack_push(opr1 >> opr2, rela_stack, rela_stack_top);
+               break;
+       case R_LARCH_SOP_IF_ELSE:
+               err = rela_stack_push(opr1 ? opr2 : opr3, rela_stack, rela_stack_top);
+               break;
+       default:
+               pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+               return -EINVAL;
+       }
+
+       return err;
+}
+
+static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       int err = 0;
+       s64 opr1;
+       union loongarch_instruction *insn = (union loongarch_instruction *)location;
+
+       err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+       if (err)
+               return err;
+
+       switch (type) {
+       case R_LARCH_SOP_POP_32_U_10_12:
+               if (!unsigned_imm_check(opr1, 12))
+                       goto overflow;
+
+               /* (*(uint32_t *) PC) [21 ... 10] = opr [11 ... 0] */
+               insn->reg2i12_format.immediate = opr1 & 0xfff;
+               return 0;
+       case R_LARCH_SOP_POP_32_S_10_12:
+               if (!signed_imm_check(opr1, 12))
+                       goto overflow;
+
+               insn->reg2i12_format.immediate = opr1 & 0xfff;
+               return 0;
+       case R_LARCH_SOP_POP_32_S_10_16:
+               if (!signed_imm_check(opr1, 16))
+                       goto overflow;
+
+               insn->reg2i16_format.immediate = opr1 & 0xffff;
+               return 0;
+       case R_LARCH_SOP_POP_32_S_10_16_S2:
+               if (opr1 % 4)
+                       goto unaligned;
+
+               if (!signed_imm_check(opr1, 18))
+                       goto overflow;
+
+               insn->reg2i16_format.immediate = (opr1 >> 2) & 0xffff;
+               return 0;
+       case R_LARCH_SOP_POP_32_S_5_20:
+               if (!signed_imm_check(opr1, 20))
+                       goto overflow;
+
+               insn->reg1i20_format.immediate = (opr1) & 0xfffff;
+               return 0;
+       case R_LARCH_SOP_POP_32_S_0_5_10_16_S2:
+               if (opr1 % 4)
+                       goto unaligned;
+
+               if (!signed_imm_check(opr1, 23))
+                       goto overflow;
+
+               opr1 >>= 2;
+               insn->reg1i21_format.immediate_l = opr1 & 0xffff;
+               insn->reg1i21_format.immediate_h = (opr1 >> 16) & 0x1f;
+               return 0;
+       case R_LARCH_SOP_POP_32_S_0_10_10_16_S2:
+               if (opr1 % 4)
+                       goto unaligned;
+
+               if (!signed_imm_check(opr1, 28))
+                       goto overflow;
+
+               opr1 >>= 2;
+               insn->reg0i26_format.immediate_l = opr1 & 0xffff;
+               insn->reg0i26_format.immediate_h = (opr1 >> 16) & 0x3ff;
+               return 0;
+       case R_LARCH_SOP_POP_32_U:
+               if (!unsigned_imm_check(opr1, 32))
+                       goto overflow;
+
+               /* (*(uint32_t *) PC) = opr */
+               *location = (u32)opr1;
+               return 0;
+       default:
+               pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+               return -EINVAL;
+       }
+
+overflow:
+       pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
+               mod->name, opr1, __func__, type);
+       return -ENOEXEC;
+
+unaligned:
+       pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
+               mod->name, opr1, __func__, type);
+       return -ENOEXEC;
+}
+
+static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+       switch (type) {
+       case R_LARCH_ADD32:
+               *(s32 *)location += v;
+               return 0;
+       case R_LARCH_ADD64:
+               *(s64 *)location += v;
+               return 0;
+       case R_LARCH_SUB32:
+               *(s32 *)location -= v;
+               return 0;
+       case R_LARCH_SUB64:
+               *(s64 *)location -= v;
+               return 0;
+       default:
+               pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+               return -EINVAL;
+       }
+}
+
+/*
+ * reloc_handlers_rela() - Apply a particular relocation to a module
+ * @mod: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @v: the value of the reloc, with addend for RELA-style
+ * @rela_stack: the stack used for store relocation info, LOCAL to THIS module
+ * @rela_stac_top: where the stack operation(pop/push) applies to
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
+                       s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
+
+/* The handlers for known reloc types */
+static reloc_rela_handler reloc_rela_handlers[] = {
+       [R_LARCH_NONE ... R_LARCH_SUB64]                     = apply_r_larch_error,
+
+       [R_LARCH_NONE]                                       = apply_r_larch_none,
+       [R_LARCH_32]                                         = apply_r_larch_32,
+       [R_LARCH_64]                                         = apply_r_larch_64,
+       [R_LARCH_MARK_LA]                                    = apply_r_larch_none,
+       [R_LARCH_MARK_PCREL]                                 = apply_r_larch_none,
+       [R_LARCH_SOP_PUSH_PCREL]                             = apply_r_larch_sop_push_pcrel,
+       [R_LARCH_SOP_PUSH_ABSOLUTE]                          = apply_r_larch_sop_push_absolute,
+       [R_LARCH_SOP_PUSH_DUP]                               = apply_r_larch_sop_push_dup,
+       [R_LARCH_SOP_PUSH_PLT_PCREL]                         = apply_r_larch_sop_push_plt_pcrel,
+       [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE]            = apply_r_larch_sop,
+       [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
+       [R_LARCH_ADD32 ... R_LARCH_SUB64]                    = apply_r_larch_add_sub,
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+                      unsigned int symindex, unsigned int relsec,
+                      struct module *mod)
+{
+       int i, err;
+       unsigned int type;
+       s64 rela_stack[RELA_STACK_DEPTH];
+       size_t rela_stack_top = 0;
+       reloc_rela_handler handler;
+       void *location;
+       Elf_Addr v;
+       Elf_Sym *sym;
+       Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+
+       pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec,
+              sechdrs[relsec].sh_info);
+
+       rela_stack_top = 0;
+       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+               /* This is where to make the change */
+               location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
+               /* This is the symbol it is referring to */
+               sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info);
+               if (IS_ERR_VALUE(sym->st_value)) {
+                       /* Ignore unresolved weak symbol */
+                       if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+                               continue;
+                       pr_warn("%s: Unknown symbol %s\n", mod->name, strtab + sym->st_name);
+                       return -ENOENT;
+               }
+
+               type = ELF_R_TYPE(rel[i].r_info);
+
+               if (type < ARRAY_SIZE(reloc_rela_handlers))
+                       handler = reloc_rela_handlers[type];
+               else
+                       handler = NULL;
+
+               if (!handler) {
+                       pr_err("%s: Unknown relocation type %u\n", mod->name, type);
+                       return -EINVAL;
+               }
+
+               pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
+                      (int)ELF_R_TYPE(rel[i].r_info),
+                      sym->st_value, rel[i].r_addend, (u64)location);
+
+               v = sym->st_value + rel[i].r_addend;
+               err = handler(mod, location, v, rela_stack, &rela_stack_top, type);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+void *module_alloc(unsigned long size)
+{
+       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+                       GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
+}
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
new file mode 100644 (file)
index 0000000..a76f547
--- /dev/null
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author:  Xiang Gao <gaoxiang@loongson.cn>
+ *          Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/export.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+int numa_off;
+struct pglist_data *node_data[MAX_NUMNODES];
+unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(node_distances);
+
+static struct numa_meminfo numa_meminfo;
+cpumask_t cpus_on_node[MAX_NUMNODES];
+cpumask_t phys_cpus_on_node[MAX_NUMNODES];
+EXPORT_SYMBOL(cpus_on_node);
+
+/*
+ * apicid, cpu, node mappings
+ */
+s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
+       [0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE
+};
+EXPORT_SYMBOL(__cpuid_to_node);
+
+nodemask_t numa_nodes_parsed __initdata;
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+       return early_cpu_to_node(cpu);
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+       if (early_cpu_to_node(from) == early_cpu_to_node(to))
+               return LOCAL_DISTANCE;
+       else
+               return REMOTE_DISTANCE;
+}
+
+void __init pcpu_populate_pte(unsigned long addr)
+{
+       pgd_t *pgd = pgd_offset_k(addr);
+       p4d_t *p4d = p4d_offset(pgd, addr);
+       pud_t *pud;
+       pmd_t *pmd;
+
+       if (p4d_none(*p4d)) {
+               pud_t *new;
+
+               new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+               pgd_populate(&init_mm, pgd, new);
+#ifndef __PAGETABLE_PUD_FOLDED
+               pud_init((unsigned long)new, (unsigned long)invalid_pmd_table);
+#endif
+       }
+
+       pud = pud_offset(p4d, addr);
+       if (pud_none(*pud)) {
+               pmd_t *new;
+
+               new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+               pud_populate(&init_mm, pud, new);
+#ifndef __PAGETABLE_PMD_FOLDED
+               pmd_init((unsigned long)new, (unsigned long)invalid_pte_table);
+#endif
+       }
+
+       pmd = pmd_offset(pud, addr);
+       if (!pmd_present(*pmd)) {
+               pte_t *new;
+
+               new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+               pmd_populate_kernel(&init_mm, pmd, new);
+       }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+       unsigned long delta;
+       unsigned int cpu;
+       int rc = -EINVAL;
+
+       if (pcpu_chosen_fc == PCPU_FC_AUTO) {
+               if (nr_node_ids >= 8)
+                       pcpu_chosen_fc = PCPU_FC_PAGE;
+               else
+                       pcpu_chosen_fc = PCPU_FC_EMBED;
+       }
+
+       /*
+        * Always reserve area for module percpu variables.  That's
+        * what the legacy allocator did.
+        */
+       if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+               rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+                                           PERCPU_DYNAMIC_RESERVE, PMD_SIZE,
+                                           pcpu_cpu_distance, pcpu_cpu_to_node);
+               if (rc < 0)
+                       pr_warn("%s allocator failed (%d), falling back to page size\n",
+                               pcpu_fc_names[pcpu_chosen_fc], rc);
+       }
+       if (rc < 0)
+               rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node);
+       if (rc < 0)
+               panic("cannot initialize percpu area (err=%d)", rc);
+
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu)
+               __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
+/*
+ * Get nodeid by logical cpu number.
+ * __cpuid_to_node maps phyical cpu id to node, so we
+ * should use cpu_logical_map(cpu) to index it.
+ *
+ * This routine is only used in early phase during
+ * booting, after setup_per_cpu_areas calling and numa_node
+ * initialization, cpu_to_node will be used instead.
+ */
+int early_cpu_to_node(int cpu)
+{
+       int physid = cpu_logical_map(cpu);
+
+       if (physid < 0)
+               return NUMA_NO_NODE;
+
+       return __cpuid_to_node[physid];
+}
+
+void __init early_numa_add_cpu(int cpuid, s16 node)
+{
+       int cpu = __cpu_number_map[cpuid];
+
+       if (cpu < 0)
+               return;
+
+       cpumask_set_cpu(cpu, &cpus_on_node[node]);
+       cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]);
+}
+
+void numa_add_cpu(unsigned int cpu)
+{
+       int nid = cpu_to_node(cpu);
+       cpumask_set_cpu(cpu, &cpus_on_node[nid]);
+}
+
+void numa_remove_cpu(unsigned int cpu)
+{
+       int nid = cpu_to_node(cpu);
+       cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
+}
+
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+                                    struct numa_meminfo *mi)
+{
+       /* ignore zero length blks */
+       if (start == end)
+               return 0;
+
+       /* whine about and ignore invalid blks */
+       if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+               pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+                          nid, start, end - 1);
+               return 0;
+       }
+
+       if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+               pr_err("NUMA: too many memblk ranges\n");
+               return -EINVAL;
+       }
+
+       mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
+       mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
+       mi->blk[mi->nr_blks].nid = nid;
+       mi->nr_blks++;
+       return 0;
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+       return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
+static void __init alloc_node_data(int nid)
+{
+       void *nd;
+       unsigned long nd_pa;
+       size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
+
+       nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
+       if (!nd_pa) {
+               pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
+               return;
+       }
+
+       nd = __va(nd_pa);
+
+       node_data[nid] = nd;
+       memset(nd, 0, sizeof(pg_data_t));
+}
+
+static void __init node_mem_init(unsigned int node)
+{
+       unsigned long start_pfn, end_pfn;
+       unsigned long node_addrspace_offset;
+
+       node_addrspace_offset = nid_to_addrbase(node);
+       pr_info("Node%d's addrspace_offset is 0x%lx\n",
+                       node, node_addrspace_offset);
+
+       get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+       pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
+               node, start_pfn, end_pfn);
+
+       alloc_node_data(node);
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+/*
+ * Sanity check to catch more bad NUMA configurations (they are amazingly
+ * common).  Make sure the nodes cover all memory.
+ */
+static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+{
+       int i;
+       u64 numaram, biosram;
+
+       numaram = 0;
+       for (i = 0; i < mi->nr_blks; i++) {
+               u64 s = mi->blk[i].start >> PAGE_SHIFT;
+               u64 e = mi->blk[i].end >> PAGE_SHIFT;
+
+               numaram += e - s;
+               numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+               if ((s64)numaram < 0)
+                       numaram = 0;
+       }
+       max_pfn = max_low_pfn;
+       biosram = max_pfn - absent_pages_in_range(0, max_pfn);
+
+       BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
+       return true;
+}
+
+static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
+{
+       static unsigned long num_physpages;
+
+       num_physpages += (size >> PAGE_SHIFT);
+       pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+               node, type, start, size);
+       pr_info("       start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+               start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
+       memblock_set_node(start, size, &memblock.memory, node);
+}
+
+/*
+ * add_numamem_region
+ *
+ * Add a uasable memory region described by BIOS. The
+ * routine gets each intersection between BIOS's region
+ * and node's region, and adds them into node's memblock
+ * pool.
+ *
+ */
+static void __init add_numamem_region(u64 start, u64 end, u32 type)
+{
+       u32 i;
+       u64 ofs = start;
+
+       if (start >= end) {
+               pr_debug("Invalid region: %016llx-%016llx\n", start, end);
+               return;
+       }
+
+       for (i = 0; i < numa_meminfo.nr_blks; i++) {
+               struct numa_memblk *mb = &numa_meminfo.blk[i];
+
+               if (ofs > mb->end)
+                       continue;
+
+               if (end > mb->end) {
+                       add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
+                       ofs = mb->end;
+               } else {
+                       add_node_intersection(mb->nid, ofs, end - ofs, type);
+                       break;
+               }
+       }
+}
+
+static void __init init_node_memblock(void)
+{
+       u32 mem_type;
+       u64 mem_end, mem_start, mem_size;
+       efi_memory_desc_t *md;
+
+       /* Parse memory information and activate */
+       for_each_efi_memory_desc(md) {
+               mem_type = md->type;
+               mem_start = md->phys_addr;
+               mem_size = md->num_pages << EFI_PAGE_SHIFT;
+               mem_end = mem_start + mem_size;
+
+               switch (mem_type) {
+               case EFI_LOADER_CODE:
+               case EFI_LOADER_DATA:
+               case EFI_BOOT_SERVICES_CODE:
+               case EFI_BOOT_SERVICES_DATA:
+               case EFI_PERSISTENT_MEMORY:
+               case EFI_CONVENTIONAL_MEMORY:
+                       add_numamem_region(mem_start, mem_end, mem_type);
+                       break;
+               case EFI_PAL_CODE:
+               case EFI_UNUSABLE_MEMORY:
+               case EFI_ACPI_RECLAIM_MEMORY:
+                       add_numamem_region(mem_start, mem_end, mem_type);
+                       fallthrough;
+               case EFI_RESERVED_TYPE:
+               case EFI_RUNTIME_SERVICES_CODE:
+               case EFI_RUNTIME_SERVICES_DATA:
+               case EFI_MEMORY_MAPPED_IO:
+               case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+                       pr_info("Resvd: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+                                       mem_type, mem_start, mem_size);
+                       break;
+               }
+       }
+}
+
+static void __init numa_default_distance(void)
+{
+       int row, col;
+
+       for (row = 0; row < MAX_NUMNODES; row++)
+               for (col = 0; col < MAX_NUMNODES; col++) {
+                       if (col == row)
+                               node_distances[row][col] = LOCAL_DISTANCE;
+                       else
+                               /* We assume that one node per package here!
+                                *
+                                * A SLIT should be used for multiple nodes
+                                * per package to override default setting.
+                                */
+                               node_distances[row][col] = REMOTE_DISTANCE;
+       }
+}
+
+int __init init_numa_memory(void)
+{
+       int i;
+       int ret;
+       int node;
+
+       for (i = 0; i < NR_CPUS; i++)
+               set_cpuid_to_node(i, NUMA_NO_NODE);
+
+       numa_default_distance();
+       nodes_clear(numa_nodes_parsed);
+       nodes_clear(node_possible_map);
+       nodes_clear(node_online_map);
+       memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+
+       /* Parse SRAT and SLIT if provided by firmware. */
+       ret = acpi_numa_init();
+       if (ret < 0)
+               return ret;
+
+       node_possible_map = numa_nodes_parsed;
+       if (WARN_ON(nodes_empty(node_possible_map)))
+               return -EINVAL;
+
+       init_node_memblock();
+       if (numa_meminfo_cover_memory(&numa_meminfo) == false)
+               return -EINVAL;
+
+       for_each_node_mask(node, node_possible_map) {
+               node_mem_init(node);
+               node_set_online(node);
+       }
+       max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
+       setup_nr_node_ids();
+       loongson_sysconf.nr_nodes = nr_node_ids;
+       loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]);
+
+       return 0;
+}
+
+EXPORT_SYMBOL(init_numa_memory);
+#endif
+
+void __init paging_init(void)
+{
+       unsigned int node;
+       unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+       for_each_online_node(node) {
+               unsigned long start_pfn, end_pfn;
+
+               get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+
+               if (end_pfn > max_low_pfn)
+                       max_low_pfn = end_pfn;
+       }
+#ifdef CONFIG_ZONE_DMA32
+       zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+       zones_size[ZONE_NORMAL] = max_low_pfn;
+       free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+       high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
+       memblock_free_all();
+       setup_zero_pages();     /* This comes from node 0 */
+}
+
+int pcibus_to_node(struct pci_bus *bus)
+{
+       return dev_to_node(&bus->dev);
+}
+EXPORT_SYMBOL(pcibus_to_node);
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
new file mode 100644 (file)
index 0000000..1effc73
--- /dev/null
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/idle.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+       return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+       return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+       unsigned long n = (unsigned long) v - 1;
+       unsigned int version = cpu_data[n].processor_id & 0xff;
+       unsigned int fp_version = cpu_data[n].fpu_vers;
+       struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
+
+#ifdef CONFIG_SMP
+       if (!cpu_online(n))
+               return 0;
+#endif
+
+       /*
+        * For the first processor also print the system type
+        */
+       if (n == 0)
+               seq_printf(m, "system type\t\t: %s\n\n", get_system_type());
+
+       seq_printf(m, "processor\t\t: %ld\n", n);
+       seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
+       seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+       seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
+       seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
+       seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
+       seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
+       seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
+                     cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
+       seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
+                     (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
+                     ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
+       seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
+       seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
+                     cpu_pabits + 1, cpu_vabits + 1);
+
+       seq_printf(m, "ISA\t\t\t:");
+       if (cpu_has_loongarch32)
+               seq_printf(m, " loongarch32");
+       if (cpu_has_loongarch64)
+               seq_printf(m, " loongarch64");
+       seq_printf(m, "\n");
+
+       seq_printf(m, "Features\t\t:");
+       if (cpu_has_cpucfg)     seq_printf(m, " cpucfg");
+       if (cpu_has_lam)        seq_printf(m, " lam");
+       if (cpu_has_ual)        seq_printf(m, " ual");
+       if (cpu_has_fpu)        seq_printf(m, " fpu");
+       if (cpu_has_lsx)        seq_printf(m, " lsx");
+       if (cpu_has_lasx)       seq_printf(m, " lasx");
+       if (cpu_has_complex)    seq_printf(m, " complex");
+       if (cpu_has_crypto)     seq_printf(m, " crypto");
+       if (cpu_has_lvz)        seq_printf(m, " lvz");
+       if (cpu_has_lbt_x86)    seq_printf(m, " lbt_x86");
+       if (cpu_has_lbt_arm)    seq_printf(m, " lbt_arm");
+       if (cpu_has_lbt_mips)   seq_printf(m, " lbt_mips");
+       seq_printf(m, "\n");
+
+       seq_printf(m, "Hardware Watchpoint\t: %s",
+                     cpu_has_watch ? "yes, " : "no\n");
+       if (cpu_has_watch) {
+               seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
+                     cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
+       }
+
+       proc_cpuinfo_notifier_args.m = m;
+       proc_cpuinfo_notifier_args.n = n;
+
+       raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+                               &proc_cpuinfo_notifier_args);
+
+       seq_printf(m, "\n");
+
+       return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       unsigned long i = *pos;
+
+       return i < NR_CPUS ? (void *)(i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+       .start  = c_start,
+       .next   = c_next,
+       .stop   = c_stop,
+       .show   = show_cpuinfo,
+};
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
new file mode 100644 (file)
index 0000000..6d944d6
--- /dev/null
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
+ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013  Imagination Technologies Ltd.
+ */
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/export.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/personality.h>
+#include <linux/sys.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/random.h>
+#include <linux/prctl.h>
+#include <linux/nmi.h>
+
+#include <asm/asm.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/vdso.h>
+
+/*
+ * Idle related variables and functions
+ */
+
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+       play_dead();
+}
+#endif
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+       unsigned long crmd;
+       unsigned long prmd;
+       unsigned long euen;
+
+       /* New thread loses kernel privileges. */
+       crmd = regs->csr_crmd & ~(PLV_MASK);
+       crmd |= PLV_USER;
+       regs->csr_crmd = crmd;
+
+       prmd = regs->csr_prmd & ~(PLV_MASK);
+       prmd |= PLV_USER;
+       regs->csr_prmd = prmd;
+
+       euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
+       regs->csr_euen = euen;
+       lose_fpu(0);
+
+       clear_thread_flag(TIF_LSX_CTX_LIVE);
+       clear_thread_flag(TIF_LASX_CTX_LIVE);
+       clear_used_math();
+       regs->csr_era = pc;
+       regs->regs[3] = sp;
+}
+
+void exit_thread(struct task_struct *tsk)
+{
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+       /*
+        * Save any process state which is live in hardware registers to the
+        * parent context prior to duplication. This prevents the new child
+        * state becoming stale if the parent is preempted before copy_thread()
+        * gets a chance to save the parent's live hardware registers to the
+        * child context.
+        */
+       preempt_disable();
+
+       if (is_fpu_owner())
+               save_fp(current);
+
+       preempt_enable();
+
+       if (used_math())
+               memcpy(dst, src, sizeof(struct task_struct));
+       else
+               memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
+
+       return 0;
+}
+
+/*
+ * Copy architecture-specific thread state
+ */
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+       unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
+{
+       unsigned long childksp;
+       struct pt_regs *childregs, *regs = current_pt_regs();
+
+       childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
+
+       /* set up new TSS. */
+       childregs = (struct pt_regs *) childksp - 1;
+       /*  Put the stack after the struct pt_regs.  */
+       childksp = (unsigned long) childregs;
+       p->thread.csr_euen = 0;
+       p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
+       p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
+       p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
+       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+               /* kernel thread */
+               p->thread.reg23 = usp; /* fn */
+               p->thread.reg24 = kthread_arg;
+               p->thread.reg03 = childksp;
+               p->thread.reg01 = (unsigned long) ret_from_kernel_thread;
+               memset(childregs, 0, sizeof(struct pt_regs));
+               childregs->csr_euen = p->thread.csr_euen;
+               childregs->csr_crmd = p->thread.csr_crmd;
+               childregs->csr_prmd = p->thread.csr_prmd;
+               childregs->csr_ecfg = p->thread.csr_ecfg;
+               return 0;
+       }
+
+       /* user thread */
+       *childregs = *regs;
+       childregs->regs[4] = 0; /* Child gets zero as return value */
+       if (usp)
+               childregs->regs[3] = usp;
+
+       p->thread.reg03 = (unsigned long) childregs;
+       p->thread.reg01 = (unsigned long) ret_from_fork;
+
+       /*
+        * New tasks lose permission to use the fpu. This accelerates context
+        * switching for most programs since they don't use the fpu.
+        */
+       childregs->csr_euen = 0;
+
+       clear_tsk_thread_flag(p, TIF_USEDFPU);
+       clear_tsk_thread_flag(p, TIF_USEDSIMD);
+       clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
+       clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
+
+       if (clone_flags & CLONE_SETTLS)
+               childregs->regs[2] = tls;
+
+       return 0;
+}
+
+unsigned long __get_wchan(struct task_struct *task)
+{
+       return 0;
+}
+
+unsigned long stack_top(void)
+{
+       unsigned long top = TASK_SIZE & PAGE_MASK;
+
+       /* Space for the VDSO & data page */
+       top -= PAGE_ALIGN(current->thread.vdso->size);
+       top -= PAGE_SIZE;
+
+       /* Space to randomize the VDSO base */
+       if (current->flags & PF_RANDOMIZE)
+               top -= VDSO_RANDOMIZE_SIZE;
+
+       return top;
+}
+
+/*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+ */
+unsigned long arch_align_stack(unsigned long sp)
+{
+       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+               sp -= get_random_int() & ~PAGE_MASK;
+
+       return sp & STACK_ALIGN;
+}
+
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
+{
+       nmi_cpu_backtrace(get_irq_regs());
+       cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
+
+static void raise_backtrace(cpumask_t *mask)
+{
+       call_single_data_t *csd;
+       int cpu;
+
+       for_each_cpu(cpu, mask) {
+               /*
+                * If we previously sent an IPI to the target CPU & it hasn't
+                * cleared its bit in the busy cpumask then it didn't handle
+                * our previous IPI & it's not safe for us to reuse the
+                * call_single_data_t.
+                */
+               if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+                       pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+                               cpu);
+                       continue;
+               }
+
+               csd = &per_cpu(backtrace_csd, cpu);
+               csd->func = handle_backtrace;
+               smp_call_function_single_async(cpu, csd);
+       }
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+       nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
+}
+
+#ifdef CONFIG_64BIT
+void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+{
+       unsigned int i;
+
+       for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
+               uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
+       }
+
+       uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
+       uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
+       uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
+       uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
+       uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
+       uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
+       uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
+       uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
+}
+#endif /* CONFIG_64BIT */
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
new file mode 100644 (file)
index 0000000..e6ab879
--- /dev/null
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ */
+#include <linux/kernel.h>
+#include <linux/audit.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/seccomp.h>
+#include <linux/uaccess.h>
+
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/syscall.h>
+
+static void init_fp_ctx(struct task_struct *target)
+{
+       /* The target already has context */
+       if (tsk_used_math(target))
+               return;
+
+       /* Begin with data registers set to all 1s... */
+       memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+       set_stopped_child_used_math(target);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+       /* Don't load the watchpoint registers for the ex-child. */
+       clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+       clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  struct membuf to)
+{
+       int r;
+       struct pt_regs *regs = task_pt_regs(target);
+
+       r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
+       r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
+       r = membuf_write(&to, &regs->csr_era, sizeof(u64));
+       r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
+
+       return r;
+}
+
+static int gpr_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       int err;
+       int a0_start = sizeof(u64) * GPR_NUM;
+       int era_start = a0_start + sizeof(u64);
+       int badvaddr_start = era_start + sizeof(u64);
+       struct pt_regs *regs = task_pt_regs(target);
+
+       err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->regs,
+                                0, a0_start);
+       err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->orig_a0,
+                                a0_start, a0_start + sizeof(u64));
+       err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->csr_era,
+                                era_start, era_start + sizeof(u64));
+       err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->csr_badvaddr,
+                                badvaddr_start, badvaddr_start + sizeof(u64));
+
+       return err;
+}
+
+
+/*
+ * Get the general floating-point registers.
+ */
+static int gfpr_get(struct task_struct *target, struct membuf *to)
+{
+       return membuf_write(to, &target->thread.fpu.fpr,
+                           sizeof(elf_fpreg_t) * NUM_FPU_REGS);
+}
+
+static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
+{
+       int i, r;
+       u64 fpr_val;
+
+       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+               r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
+       }
+
+       return r;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC and FCSR registers separately.
+ */
+static int fpr_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  struct membuf to)
+{
+       int r;
+
+       if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+               r = gfpr_get(target, &to);
+       else
+               r = gfpr_get_simd(target, &to);
+
+       r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
+       r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
+
+       return r;
+}
+
+static int gfpr_set(struct task_struct *target,
+                   unsigned int *pos, unsigned int *count,
+                   const void **kbuf, const void __user **ubuf)
+{
+       return user_regset_copyin(pos, count, kbuf, ubuf,
+                                 &target->thread.fpu.fpr,
+                                 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+static int gfpr_set_simd(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      const void **kbuf, const void __user **ubuf)
+{
+       int i, err;
+       u64 fpr_val;
+
+       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+       for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+               err = user_regset_copyin(pos, count, kbuf, ubuf,
+                                        &fpr_val, i * sizeof(elf_fpreg_t),
+                                        (i + 1) * sizeof(elf_fpreg_t));
+               if (err)
+                       return err;
+               set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+       }
+
+       return 0;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC register separately.
+ */
+static int fpr_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+       const int fcc_end = fcc_start + sizeof(u64);
+       int err;
+
+       BUG_ON(count % sizeof(elf_fpreg_t));
+       if (pos + count > sizeof(elf_fpregset_t))
+               return -EIO;
+
+       init_fp_ctx(target);
+
+       if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+               err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
+       else
+               err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
+       if (err)
+               return err;
+
+       if (count > 0)
+               err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                         &target->thread.fpu.fcc,
+                                         fcc_start, fcc_end);
+
+       return err;
+}
+
+static int cfg_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  struct membuf to)
+{
+       int i, r;
+       u32 cfg_val;
+
+       i = 0;
+       while (to.left > 0) {
+               cfg_val = read_cpucfg(i++);
+               r = membuf_write(&to, &cfg_val, sizeof(u32));
+       }
+
+       return r;
+}
+
+/*
+ * CFG registers are read-only.
+ */
+static int cfg_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
+struct pt_regs_offset {
+       const char *name;
+       int offset;
+};
+
+#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+       REG_OFFSET_NAME(r0, regs[0]),
+       REG_OFFSET_NAME(r1, regs[1]),
+       REG_OFFSET_NAME(r2, regs[2]),
+       REG_OFFSET_NAME(r3, regs[3]),
+       REG_OFFSET_NAME(r4, regs[4]),
+       REG_OFFSET_NAME(r5, regs[5]),
+       REG_OFFSET_NAME(r6, regs[6]),
+       REG_OFFSET_NAME(r7, regs[7]),
+       REG_OFFSET_NAME(r8, regs[8]),
+       REG_OFFSET_NAME(r9, regs[9]),
+       REG_OFFSET_NAME(r10, regs[10]),
+       REG_OFFSET_NAME(r11, regs[11]),
+       REG_OFFSET_NAME(r12, regs[12]),
+       REG_OFFSET_NAME(r13, regs[13]),
+       REG_OFFSET_NAME(r14, regs[14]),
+       REG_OFFSET_NAME(r15, regs[15]),
+       REG_OFFSET_NAME(r16, regs[16]),
+       REG_OFFSET_NAME(r17, regs[17]),
+       REG_OFFSET_NAME(r18, regs[18]),
+       REG_OFFSET_NAME(r19, regs[19]),
+       REG_OFFSET_NAME(r20, regs[20]),
+       REG_OFFSET_NAME(r21, regs[21]),
+       REG_OFFSET_NAME(r22, regs[22]),
+       REG_OFFSET_NAME(r23, regs[23]),
+       REG_OFFSET_NAME(r24, regs[24]),
+       REG_OFFSET_NAME(r25, regs[25]),
+       REG_OFFSET_NAME(r26, regs[26]),
+       REG_OFFSET_NAME(r27, regs[27]),
+       REG_OFFSET_NAME(r28, regs[28]),
+       REG_OFFSET_NAME(r29, regs[29]),
+       REG_OFFSET_NAME(r30, regs[30]),
+       REG_OFFSET_NAME(r31, regs[31]),
+       REG_OFFSET_NAME(orig_a0, orig_a0),
+       REG_OFFSET_NAME(csr_era, csr_era),
+       REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
+       REG_OFFSET_NAME(csr_crmd, csr_crmd),
+       REG_OFFSET_NAME(csr_prmd, csr_prmd),
+       REG_OFFSET_NAME(csr_euen, csr_euen),
+       REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
+       REG_OFFSET_NAME(csr_estat, csr_estat),
+       REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:       the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+       const struct pt_regs_offset *roff;
+
+       for (roff = regoffset_table; roff->name != NULL; roff++)
+               if (!strcmp(roff->name, name))
+                       return roff->offset;
+       return -EINVAL;
+}
+
+enum loongarch_regset {
+       REGSET_GPR,
+       REGSET_FPR,
+       REGSET_CPUCFG,
+};
+
+static const struct user_regset loongarch64_regsets[] = {
+       [REGSET_GPR] = {
+               .core_note_type = NT_PRSTATUS,
+               .n              = ELF_NGREG,
+               .size           = sizeof(elf_greg_t),
+               .align          = sizeof(elf_greg_t),
+               .regset_get     = gpr_get,
+               .set            = gpr_set,
+       },
+       [REGSET_FPR] = {
+               .core_note_type = NT_PRFPREG,
+               .n              = ELF_NFPREG,
+               .size           = sizeof(elf_fpreg_t),
+               .align          = sizeof(elf_fpreg_t),
+               .regset_get     = fpr_get,
+               .set            = fpr_set,
+       },
+       [REGSET_CPUCFG] = {
+               .core_note_type = NT_LOONGARCH_CPUCFG,
+               .n              = 64,
+               .size           = sizeof(u32),
+               .align          = sizeof(u32),
+               .regset_get     = cfg_get,
+               .set            = cfg_set,
+       },
+};
+
+static const struct user_regset_view user_loongarch64_view = {
+       .name           = "loongarch64",
+       .e_machine      = ELF_ARCH,
+       .regsets        = loongarch64_regsets,
+       .n              = ARRAY_SIZE(loongarch64_regsets),
+};
+
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+       return &user_loongarch64_view;
+}
+
+static inline int read_user(struct task_struct *target, unsigned long addr,
+                           unsigned long __user *data)
+{
+       unsigned long tmp = 0;
+
+       switch (addr) {
+       case 0 ... 31:
+               tmp = task_pt_regs(target)->regs[addr];
+               break;
+       case ARG0:
+               tmp = task_pt_regs(target)->orig_a0;
+               break;
+       case PC:
+               tmp = task_pt_regs(target)->csr_era;
+               break;
+       case BADVADDR:
+               tmp = task_pt_regs(target)->csr_badvaddr;
+               break;
+       default:
+               return -EIO;
+       }
+
+       return put_user(tmp, data);
+}
+
+static inline int write_user(struct task_struct *target, unsigned long addr,
+                           unsigned long data)
+{
+       switch (addr) {
+       case 0 ... 31:
+               task_pt_regs(target)->regs[addr] = data;
+               break;
+       case ARG0:
+               task_pt_regs(target)->orig_a0 = data;
+               break;
+       case PC:
+               task_pt_regs(target)->csr_era = data;
+               break;
+       case BADVADDR:
+               task_pt_regs(target)->csr_badvaddr = data;
+               break;
+       default:
+               return -EIO;
+       }
+
+       return 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
+{
+       int ret;
+       unsigned long __user *datap = (void __user *) data;
+
+       switch (request) {
+       case PTRACE_PEEKUSR:
+               ret = read_user(child, addr, datap);
+               break;
+
+       case PTRACE_POKEUSR:
+               ret = write_user(child, addr, data);
+               break;
+
+       default:
+               ret = ptrace_request(child, request, addr, data);
+               break;
+       }
+
+       return ret;
+}
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
new file mode 100644 (file)
index 0000000..2b86469
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+
+#include <acpi/reboot.h>
+#include <asm/compiler.h>
+#include <asm/idle.h>
+#include <asm/loongarch.h>
+#include <asm/reboot.h>
+
+static void default_halt(void)
+{
+       local_irq_disable();
+       clear_csr_ecfg(ECFG0_IM);
+
+       pr_notice("\n\n** You can safely turn off the power now **\n\n");
+       console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
+       while (true) {
+               __arch_cpu_idle();
+       }
+}
+
+static void default_poweroff(void)
+{
+#ifdef CONFIG_EFI
+       efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+#endif
+       while (true) {
+               __arch_cpu_idle();
+       }
+}
+
+static void default_restart(void)
+{
+#ifdef CONFIG_EFI
+       if (efi_capsule_pending(NULL))
+               efi_reboot(REBOOT_WARM, NULL);
+       else
+               efi_reboot(REBOOT_COLD, NULL);
+#endif
+       if (!acpi_disabled)
+               acpi_reboot();
+
+       while (true) {
+               __arch_cpu_idle();
+       }
+}
+
+void (*pm_restart)(void);
+EXPORT_SYMBOL(pm_restart);
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_halt(void)
+{
+#ifdef CONFIG_SMP
+       preempt_disable();
+       smp_send_stop();
+#endif
+       default_halt();
+}
+
+void machine_power_off(void)
+{
+#ifdef CONFIG_SMP
+       preempt_disable();
+       smp_send_stop();
+#endif
+       pm_power_off();
+}
+
+void machine_restart(char *command)
+{
+#ifdef CONFIG_SMP
+       preempt_disable();
+       smp_send_stop();
+#endif
+       do_kernel_restart(command);
+       pm_restart();
+}
+
+static int __init loongarch_reboot_setup(void)
+{
+       pm_restart = default_restart;
+       pm_power_off = default_poweroff;
+
+       return 0;
+}
+
+arch_initcall(loongarch_reboot_setup);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
new file mode 100644 (file)
index 0000000..185e403
--- /dev/null
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995 Waldorf Electronics
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
+ * Copyright (C) 1996 Stoned Elipot
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002, 2007         Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/screen_info.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/ioport.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/swiotlb.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+
+#define SMBIOS_BIOSSIZE_OFFSET         0x09
+#define SMBIOS_BIOSEXTERN_OFFSET       0x13
+#define SMBIOS_FREQLOW_OFFSET          0x16
+#define SMBIOS_FREQHIGH_OFFSET         0x17
+#define SMBIOS_FREQLOW_MASK            0xFF
+#define SMBIOS_CORE_PACKAGE_OFFSET     0x23
+#define LOONGSON_EFI_ENABLE            (1 << 3)
+
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
+unsigned long fw_arg0, fw_arg1;
+DEFINE_PER_CPU(unsigned long, kernelsp);
+struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
+
+EXPORT_SYMBOL(cpu_data);
+
+struct loongson_board_info b_info;
+static const char dmi_empty_string[] = "        ";
+
+/*
+ * Setup information
+ *
+ * These are initialized so they are in the .data section
+ */
+
+static int num_standard_resources;
+static struct resource *standard_resources;
+
+static struct resource code_resource = { .name = "Kernel code", };
+static struct resource data_resource = { .name = "Kernel data", };
+static struct resource bss_resource  = { .name = "Kernel bss", };
+
+const char *get_system_type(void)
+{
+       return "generic-loongson-machine";
+}
+
+static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
+{
+       const u8 *bp = ((u8 *) dm) + dm->length;
+
+       if (s) {
+               s--;
+               while (s > 0 && *bp) {
+                       bp += strlen(bp) + 1;
+                       s--;
+               }
+
+               if (*bp != 0) {
+                       size_t len = strlen(bp)+1;
+                       size_t cmp_len = len > 8 ? 8 : len;
+
+                       if (!memcmp(bp, dmi_empty_string, cmp_len))
+                               return dmi_empty_string;
+
+                       return bp;
+               }
+       }
+
+       return "";
+}
+
+static void __init parse_cpu_table(const struct dmi_header *dm)
+{
+       long freq_temp = 0;
+       char *dmi_data = (char *)dm;
+
+       freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
+                       ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
+       cpu_clock_freq = freq_temp * 1000000;
+
+       loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
+       loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
+
+       pr_info("CpuClock = %llu\n", cpu_clock_freq);
+}
+
+static void __init parse_bios_table(const struct dmi_header *dm)
+{
+       int bios_extern;
+       char *dmi_data = (char *)dm;
+
+       bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET);
+       b_info.bios_size = *(dmi_data + SMBIOS_BIOSSIZE_OFFSET);
+
+       if (bios_extern & LOONGSON_EFI_ENABLE)
+               set_bit(EFI_BOOT, &efi.flags);
+       else
+               clear_bit(EFI_BOOT, &efi.flags);
+}
+
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+{
+       switch (dm->type) {
+       case 0x0: /* Extern BIOS */
+               parse_bios_table(dm);
+               break;
+       case 0x4: /* Calling interface */
+               parse_cpu_table(dm);
+               break;
+       }
+}
+static void __init smbios_parse(void)
+{
+       b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
+       b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
+       b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
+       b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
+       b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
+       dmi_walk(find_tokens, NULL);
+}
+
+static int usermem __initdata;
+
+static int __init early_parse_mem(char *p)
+{
+       phys_addr_t start, size;
+
+       if (!p) {
+               pr_err("mem parameter is empty, do nothing\n");
+               return -EINVAL;
+       }
+
+       /*
+        * If a user specifies memory size, we
+        * blow away any automatically generated
+        * size.
+        */
+       if (usermem == 0) {
+               usermem = 1;
+               memblock_remove(memblock_start_of_DRAM(),
+                       memblock_end_of_DRAM() - memblock_start_of_DRAM());
+       }
+       start = 0;
+       size = memparse(p, &p);
+       if (*p == '@')
+               start = memparse(p + 1, &p);
+       else {
+               pr_err("Invalid format!\n");
+               return -EINVAL;
+       }
+
+       if (!IS_ENABLED(CONFIG_NUMA))
+               memblock_add(start, size);
+       else
+               memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
+
+       return 0;
+}
+early_param("mem", early_parse_mem);
+
+void __init platform_init(void)
+{
+       efi_init();
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+       acpi_table_upgrade();
+#endif
+#ifdef CONFIG_ACPI
+       acpi_gbl_use_default_register_widths = false;
+       acpi_boot_table_init();
+       acpi_boot_init();
+#endif
+
+#ifdef CONFIG_NUMA
+       init_numa_memory();
+#endif
+       dmi_setup();
+       smbios_parse();
+       pr_info("The BIOS Version: %s\n", b_info.bios_version);
+
+       efi_runtime_init();
+}
+
+static void __init check_kernel_sections_mem(void)
+{
+       phys_addr_t start = __pa_symbol(&_text);
+       phys_addr_t size = __pa_symbol(&_end) - start;
+
+       if (!memblock_is_region_memory(start, size)) {
+               pr_info("Kernel sections are not in the memory maps\n");
+               memblock_add(start, size);
+       }
+}
+
+/*
+ * arch_mem_init - initialize memory management subsystem
+ */
+static void __init arch_mem_init(char **cmdline_p)
+{
+       if (usermem)
+               pr_info("User-defined physical RAM map overwrite\n");
+
+       check_kernel_sections_mem();
+
+       /*
+        * In order to reduce the possibility of kernel panic when failed to
+        * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+        * low memory as small as possible before plat_swiotlb_setup(), so
+        * make sparse_init() using top-down allocation.
+        */
+       memblock_set_bottom_up(false);
+       sparse_init();
+       memblock_set_bottom_up(true);
+
+       plat_swiotlb_setup();
+
+       dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+       memblock_dump_all();
+
+       early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
+}
+
+static void __init resource_init(void)
+{
+       long i = 0;
+       size_t res_size;
+       struct resource *res;
+       struct memblock_region *region;
+
+       code_resource.start = __pa_symbol(&_text);
+       code_resource.end = __pa_symbol(&_etext) - 1;
+       data_resource.start = __pa_symbol(&_etext);
+       data_resource.end = __pa_symbol(&_edata) - 1;
+       bss_resource.start = __pa_symbol(&__bss_start);
+       bss_resource.end = __pa_symbol(&__bss_stop) - 1;
+
+       num_standard_resources = memblock.memory.cnt;
+       res_size = num_standard_resources * sizeof(*standard_resources);
+       standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+
+       for_each_mem_region(region) {
+               res = &standard_resources[i++];
+               if (!memblock_is_nomap(region)) {
+                       res->name  = "System RAM";
+                       res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+                       res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+                       res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+               } else {
+                       res->name  = "Reserved";
+                       res->flags = IORESOURCE_MEM;
+                       res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
+                       res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
+               }
+
+               request_resource(&iomem_resource, res);
+
+               /*
+                *  We don't know which RAM region contains kernel data,
+                *  so we try it repeatedly and let the resource manager
+                *  test it.
+                */
+               request_resource(res, &code_resource);
+               request_resource(res, &data_resource);
+               request_resource(res, &bss_resource);
+       }
+}
+
+static int __init reserve_memblock_reserved_regions(void)
+{
+       u64 i, j;
+
+       for (i = 0; i < num_standard_resources; ++i) {
+               struct resource *mem = &standard_resources[i];
+               phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+               if (!memblock_is_region_reserved(mem->start, mem_size))
+                       continue;
+
+               for_each_reserved_mem_range(j, &r_start, &r_end) {
+                       resource_size_t start, end;
+
+                       start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+                       end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+                       if (start > mem->end || end < mem->start)
+                               continue;
+
+                       reserve_region_with_split(mem, start, end, "Reserved");
+               }
+       }
+
+       return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
+
+#ifdef CONFIG_SMP
+static void __init prefill_possible_map(void)
+{
+       int i, possible;
+
+       possible = num_processors + disabled_cpus;
+       if (possible > nr_cpu_ids)
+               possible = nr_cpu_ids;
+
+       pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+                       possible, max((possible - num_processors), 0));
+
+       for (i = 0; i < possible; i++)
+               set_cpu_possible(i, true);
+       for (; i < NR_CPUS; i++)
+               set_cpu_possible(i, false);
+
+       nr_cpu_ids = possible;
+}
+#else
+static inline void prefill_possible_map(void) {}
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+       cpu_probe();
+       *cmdline_p = boot_command_line;
+
+       init_environ();
+       memblock_init();
+       parse_early_param();
+
+       platform_init();
+       pagetable_init();
+       arch_mem_init(cmdline_p);
+
+       resource_init();
+       plat_smp_setup();
+       prefill_possible_map();
+
+       paging_init();
+}
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
new file mode 100644 (file)
index 0000000..7f4889d
--- /dev/null
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1991, 1992  Linus Torvalds
+ * Copyright (C) 1994 - 2000  Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/audit.h>
+#include <linux/cache.h>
+#include <linux/context_tracking.h>
+#include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/asm.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+
+#ifdef DEBUG_SIG
+#  define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
+#else
+#  define DEBUGP(fmt, args...)
+#endif
+
+/* Make sure we will not lose FPU ownership */
+#define lock_fpu_owner()       ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner()     ({ pagefault_enable(); preempt_enable(); })
+
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+
+struct rt_sigframe {
+       struct siginfo rs_info;
+       struct ucontext rs_uctx;
+};
+
+struct _ctx_layout {
+       struct sctx_info *addr;
+       unsigned int size;
+};
+
+struct extctx_layout {
+       unsigned long size;
+       unsigned int flags;
+       struct _ctx_layout fpu;
+       struct _ctx_layout end;
+};
+
+static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
+{
+       return (void __user *)((char *)info + sizeof(struct sctx_info));
+}
+
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
+{
+       int i;
+       int err = 0;
+       uint64_t __user *regs   = (uint64_t *)&ctx->regs;
+       uint64_t __user *fcc    = &ctx->fcc;
+       uint32_t __user *fcsr   = &ctx->fcsr;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |=
+                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+                              &regs[i]);
+       }
+       err |= __put_user(current->thread.fpu.fcc, fcc);
+       err |= __put_user(current->thread.fpu.fcsr, fcsr);
+
+       return err;
+}
+
+static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
+{
+       int i;
+       int err = 0;
+       u64 fpr_val;
+       uint64_t __user *regs   = (uint64_t *)&ctx->regs;
+       uint64_t __user *fcc    = &ctx->fcc;
+       uint32_t __user *fcsr   = &ctx->fcsr;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |= __get_user(fpr_val, &regs[i]);
+               set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+       }
+       err |= __get_user(current->thread.fpu.fcc, fcc);
+       err |= __get_user(current->thread.fpu.fcsr, fcsr);
+
+       return err;
+}
+
+/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fpu_context(struct fpu_context __user *ctx)
+{
+       uint64_t __user *regs   = (uint64_t *)&ctx->regs;
+       uint64_t __user *fcc    = &ctx->fcc;
+       uint32_t __user *fcsr   = &ctx->fcsr;
+
+       return _save_fp_context(regs, fcc, fcsr);
+}
+
+static int restore_hw_fpu_context(struct fpu_context __user *ctx)
+{
+       uint64_t __user *regs   = (uint64_t *)&ctx->regs;
+       uint64_t __user *fcc    = &ctx->fcc;
+       uint32_t __user *fcsr   = &ctx->fcsr;
+
+       return _restore_fp_context(regs, fcc, fcsr);
+}
+
+static int fcsr_pending(unsigned int __user *fcsr)
+{
+       int err, sig = 0;
+       unsigned int csr, enabled;
+
+       err = __get_user(csr, fcsr);
+       enabled = ((csr & FPU_CSR_ALL_E) << 24);
+       /*
+        * If the signal handler set some FPU exceptions, clear it and
+        * send SIGFPE.
+        */
+       if (csr & enabled) {
+               csr &= ~enabled;
+               err |= __put_user(csr, fcsr);
+               sig = SIGFPE;
+       }
+       return err ?: sig;
+}
+
+/*
+ * Helper routines
+ */
+static int protected_save_fpu_context(struct extctx_layout *extctx)
+{
+       int err = 0;
+       struct sctx_info __user *info = extctx->fpu.addr;
+       struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+       uint64_t __user *regs   = (uint64_t *)&fpu_ctx->regs;
+       uint64_t __user *fcc    = &fpu_ctx->fcc;
+       uint32_t __user *fcsr   = &fpu_ctx->fcsr;
+
+       while (1) {
+               lock_fpu_owner();
+               if (is_fpu_owner())
+                       err = save_hw_fpu_context(fpu_ctx);
+               else
+                       err = copy_fpu_to_sigcontext(fpu_ctx);
+               unlock_fpu_owner();
+
+               err |= __put_user(FPU_CTX_MAGIC, &info->magic);
+               err |= __put_user(extctx->fpu.size, &info->size);
+
+               if (likely(!err))
+                       break;
+               /* Touch the FPU context and try again */
+               err = __put_user(0, &regs[0]) |
+                       __put_user(0, &regs[31]) |
+                       __put_user(0, fcc) |
+                       __put_user(0, fcsr);
+               if (err)
+                       return err;     /* really bad sigcontext */
+       }
+
+       return err;
+}
+
+static int protected_restore_fpu_context(struct extctx_layout *extctx)
+{
+       int err = 0, sig = 0, tmp __maybe_unused;
+       struct sctx_info __user *info = extctx->fpu.addr;
+       struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+       uint64_t __user *regs   = (uint64_t *)&fpu_ctx->regs;
+       uint64_t __user *fcc    = &fpu_ctx->fcc;
+       uint32_t __user *fcsr   = &fpu_ctx->fcsr;
+
+       err = sig = fcsr_pending(fcsr);
+       if (err < 0)
+               return err;
+
+       while (1) {
+               lock_fpu_owner();
+               if (is_fpu_owner())
+                       err = restore_hw_fpu_context(fpu_ctx);
+               else
+                       err = copy_fpu_from_sigcontext(fpu_ctx);
+               unlock_fpu_owner();
+
+               if (likely(!err))
+                       break;
+               /* Touch the FPU context and try again */
+               err = __get_user(tmp, &regs[0]) |
+                       __get_user(tmp, &regs[31]) |
+                       __get_user(tmp, fcc) |
+                       __get_user(tmp, fcsr);
+               if (err)
+                       break;  /* really bad sigcontext */
+       }
+
+       return err ?: sig;
+}
+
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+                           struct extctx_layout *extctx)
+{
+       int i, err = 0;
+       struct sctx_info __user *info;
+
+       err |= __put_user(regs->csr_era, &sc->sc_pc);
+       err |= __put_user(extctx->flags, &sc->sc_flags);
+
+       err |= __put_user(0, &sc->sc_regs[0]);
+       for (i = 1; i < 32; i++)
+               err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+       if (extctx->fpu.addr)
+               err |= protected_save_fpu_context(extctx);
+
+       /* Set the "end" magic */
+       info = (struct sctx_info *)extctx->end.addr;
+       err |= __put_user(0, &info->magic);
+       err |= __put_user(0, &info->size);
+
+       return err;
+}
+
+static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
+{
+       int err = 0;
+       unsigned int magic, size;
+       struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
+
+       while(1) {
+               err |= __get_user(magic, &info->magic);
+               err |= __get_user(size, &info->size);
+               if (err)
+                       return err;
+
+               switch (magic) {
+               case 0: /* END */
+                       goto done;
+
+               case FPU_CTX_MAGIC:
+                       if (size < (sizeof(struct sctx_info) +
+                                   sizeof(struct fpu_context)))
+                               goto invalid;
+                       extctx->fpu.addr = info;
+                       break;
+
+               default:
+                       goto invalid;
+               }
+
+               info = (struct sctx_info *)((char *)info + size);
+       }
+
+done:
+       return 0;
+
+invalid:
+       return -EINVAL;
+}
+
+static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+       int i, err = 0;
+       struct extctx_layout extctx;
+
+       memset(&extctx, 0, sizeof(struct extctx_layout));
+
+       err = __get_user(extctx.flags, &sc->sc_flags);
+       if (err)
+               goto bad;
+
+       err = parse_extcontext(sc, &extctx);
+       if (err)
+               goto bad;
+
+       conditional_used_math(extctx.flags & SC_USED_FP);
+
+       /*
+        * The signal handler may have used FPU; give it up if the program
+        * doesn't want it following sigreturn.
+        */
+       if (!(extctx.flags & SC_USED_FP))
+               lose_fpu(0);
+
+       /* Always make any pending restarted system calls return -EINTR */
+       current->restart_block.fn = do_no_restart_syscall;
+
+       err |= __get_user(regs->csr_era, &sc->sc_pc);
+       for (i = 1; i < 32; i++)
+               err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+       if (extctx.fpu.addr)
+               err |= protected_restore_fpu_context(&extctx);
+
+bad:
+       return err;
+}
+
+static unsigned int handle_flags(void)
+{
+       unsigned int flags = 0;
+
+       flags = used_math() ? SC_USED_FP : 0;
+
+       switch (current->thread.error_code) {
+       case 1:
+               flags |= SC_ADDRERR_RD;
+               break;
+       case 2:
+               flags |= SC_ADDRERR_WR;
+               break;
+       }
+
+       return flags;
+}
+
+static unsigned long extframe_alloc(struct extctx_layout *extctx,
+                                   struct _ctx_layout *layout,
+                                   size_t size, unsigned int align, unsigned long base)
+{
+       unsigned long new_base = base - size;
+
+       new_base = round_down(new_base, (align < 16 ? 16 : align));
+       new_base -= sizeof(struct sctx_info);
+
+       layout->addr = (void *)new_base;
+       layout->size = (unsigned int)(base - new_base);
+       extctx->size += layout->size;
+
+       return new_base;
+}
+
+static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
+{
+       unsigned long new_sp = sp;
+
+       memset(extctx, 0, sizeof(struct extctx_layout));
+
+       extctx->flags = handle_flags();
+
+       /* Grow down, alloc "end" context info first. */
+       new_sp -= sizeof(struct sctx_info);
+       extctx->end.addr = (void *)new_sp;
+       extctx->end.size = (unsigned int)sizeof(struct sctx_info);
+       extctx->size += extctx->end.size;
+
+       if (extctx->flags & SC_USED_FP) {
+               if (cpu_has_fpu)
+                       new_sp = extframe_alloc(extctx, &extctx->fpu,
+                         sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
+       }
+
+       return new_sp;
+}
+
+void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+                         struct extctx_layout *extctx)
+{
+       unsigned long sp;
+
+       /* Default to using normal stack */
+       sp = regs->regs[3];
+
+       /*
+        * If we are on the alternate signal stack and would overflow it, don't.
+        * Return an always-bogus address instead so we will die with SIGSEGV.
+        */
+       if (on_sig_stack(sp) &&
+           !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
+               return (void __user __force *)(-1UL);
+
+       sp = sigsp(sp, ksig);
+       sp = round_down(sp, 16);
+       sp = setup_extcontext(extctx, sp);
+       sp -= sizeof(struct rt_sigframe);
+
+       if (!IS_ALIGNED(sp, 16))
+               BUG();
+
+       return (void __user *)sp;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+asmlinkage long sys_rt_sigreturn(void)
+{
+       int sig;
+       sigset_t set;
+       struct pt_regs *regs;
+       struct rt_sigframe __user *frame;
+
+       regs = current_pt_regs();
+       frame = (struct rt_sigframe __user *)regs->regs[3];
+       if (!access_ok(frame, sizeof(*frame)))
+               goto badframe;
+       if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       set_current_blocked(&set);
+
+       sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
+       if (sig < 0)
+               goto badframe;
+       else if (sig)
+               force_sig(sig);
+
+       regs->regs[0] = 0; /* No syscall restarting */
+       if (restore_altstack(&frame->rs_uctx.uc_stack))
+               goto badframe;
+
+       return regs->regs[4];
+
+badframe:
+       force_sig(SIGSEGV);
+       return 0;
+}
+
+static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
+                         struct pt_regs *regs, sigset_t *set)
+{
+       int err = 0;
+       struct extctx_layout extctx;
+       struct rt_sigframe __user *frame;
+
+       frame = get_sigframe(ksig, regs, &extctx);
+       if (!access_ok(frame, sizeof(*frame) + extctx.size))
+               return -EFAULT;
+
+       /* Create siginfo.  */
+       err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
+
+       /* Create the ucontext.  */
+       err |= __put_user(0, &frame->rs_uctx.uc_flags);
+       err |= __put_user(NULL, &frame->rs_uctx.uc_link);
+       err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
+       err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
+       err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
+
+       if (err)
+               return -EFAULT;
+
+       /*
+        * Arguments to signal handler:
+        *
+        *   a0 = signal number
+        *   a1 = pointer to siginfo
+        *   a2 = pointer to ucontext
+        *
+        * c0_era point to the signal handler, $r3 (sp) points to
+        * the struct rt_sigframe.
+        */
+       regs->regs[4] = ksig->sig;
+       regs->regs[5] = (unsigned long) &frame->rs_info;
+       regs->regs[6] = (unsigned long) &frame->rs_uctx;
+       regs->regs[3] = (unsigned long) frame;
+       regs->regs[1] = (unsigned long) sig_return;
+       regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
+
+       DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+              current->comm, current->pid,
+              frame, regs->csr_era, regs->regs[1]);
+
+       return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+       int ret;
+       sigset_t *oldset = sigmask_to_save();
+       void *vdso = current->mm->context.vdso;
+
+       /* Are we from a system call? */
+       if (regs->regs[0]) {
+               switch (regs->regs[4]) {
+               case -ERESTART_RESTARTBLOCK:
+               case -ERESTARTNOHAND:
+                       regs->regs[4] = -EINTR;
+                       break;
+               case -ERESTARTSYS:
+                       if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+                               regs->regs[4] = -EINTR;
+                               break;
+                       }
+                       fallthrough;
+               case -ERESTARTNOINTR:
+                       regs->regs[4] = regs->orig_a0;
+                       regs->csr_era -= 4;
+               }
+
+               regs->regs[0] = 0;      /* Don't deal with this again.  */
+       }
+
+       rseq_signal_deliver(ksig, regs);
+
+       ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
+
+       signal_setup_done(ret, ksig, 0);
+}
+
+void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
+{
+       struct ksignal ksig;
+
+       if (has_signal && get_signal(&ksig)) {
+               /* Whee!  Actually deliver the signal.  */
+               handle_signal(&ksig, regs);
+               return;
+       }
+
+       /* Are we from a system call? */
+       if (regs->regs[0]) {
+               switch (regs->regs[4]) {
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
+                       regs->regs[4] = regs->orig_a0;
+                       regs->csr_era -= 4;
+                       break;
+
+               case -ERESTART_RESTARTBLOCK:
+                       regs->regs[4] = regs->orig_a0;
+                       regs->regs[11] = __NR_restart_syscall;
+                       regs->csr_era -= 4;
+                       break;
+               }
+               regs->regs[0] = 0;      /* Don't deal with this again.  */
+       }
+
+       /*
+        * If there's no signal to deliver, we just put the saved sigmask
+        * back
+        */
+       restore_saved_sigmask();
+}
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
new file mode 100644 (file)
index 0000000..b8c53b7
--- /dev/null
@@ -0,0 +1,751 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2000, 2001 Kanoj Sarcar
+ * Copyright (C) 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
+ */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/tracepoint.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cpu.h>
+#include <asm/idle.h>
+#include <asm/loongson.h>
+#include <asm/mmu_context.h>
+#include <asm/numa.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+
+int __cpu_number_map[NR_CPUS];   /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
+int __cpu_logical_map[NR_CPUS];                /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
+
+/* Number of threads (siblings) per CPU core */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
+
+/* Representing the threads (siblings) of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* Representing the core map of multi-core chips of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+/* representing cpus for which core maps can be computed */
+static cpumask_t cpu_core_setup_map;
+
+struct secondary_data cpuboot_data;
+static DEFINE_PER_CPU(int, cpu_state);
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+enum ipi_msg_type {
+       IPI_RESCHEDULE,
+       IPI_CALL_FUNCTION,
+};
+
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+       [IPI_RESCHEDULE] = "Rescheduling interrupts",
+       [IPI_CALL_FUNCTION] = "Function call interrupts",
+};
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+       unsigned int cpu, i;
+
+       for (i = 0; i < NR_IPI; i++) {
+               seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
+               for_each_online_cpu(cpu)
+                       seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
+               seq_printf(p, " LoongArch  %d  %s\n", i + 1, ipi_types[i]);
+       }
+}
+
+/* Send mailbox buffer via Mail_Send */
+static void csr_mail_send(uint64_t data, int cpu, int mailbox)
+{
+       uint64_t val;
+
+       /* Send high 32 bits */
+       val = IOCSR_MBUF_SEND_BLOCKING;
+       val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+       val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+       val |= (data & IOCSR_MBUF_SEND_H32_MASK);
+       iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+
+       /* Send low 32 bits */
+       val = IOCSR_MBUF_SEND_BLOCKING;
+       val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+       val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+       val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
+       iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+};
+
+static u32 ipi_read_clear(int cpu)
+{
+       u32 action;
+
+       /* Load the ipi register to figure out what we're supposed to do */
+       action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
+       /* Clear the ipi register to clear the interrupt */
+       iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
+       smp_mb();
+
+       return action;
+}
+
+static void ipi_write_action(int cpu, u32 action)
+{
+       unsigned int irq = 0;
+
+       while ((irq = ffs(action))) {
+               uint32_t val = IOCSR_IPI_SEND_BLOCKING;
+
+               val |= (irq - 1);
+               val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
+               iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
+               action &= ~BIT(irq - 1);
+       }
+}
+
+void loongson3_send_ipi_single(int cpu, unsigned int action)
+{
+       ipi_write_action(cpu_logical_map(cpu), (u32)action);
+}
+
+void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+       unsigned int i;
+
+       for_each_cpu(i, mask)
+               ipi_write_action(cpu_logical_map(i), (u32)action);
+}
+
+irqreturn_t loongson3_ipi_interrupt(int irq, void *dev)
+{
+       unsigned int action;
+       unsigned int cpu = smp_processor_id();
+
+       action = ipi_read_clear(cpu_logical_map(cpu));
+
+       if (action & SMP_RESCHEDULE) {
+               scheduler_ipi();
+               per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
+       }
+
+       if (action & SMP_CALL_FUNCTION) {
+               generic_smp_call_function_interrupt();
+               per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
+       }
+
+       return IRQ_HANDLED;
+}
+
+void __init loongson3_smp_setup(void)
+{
+       cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
+       cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
+
+       iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+       pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
+}
+
+void __init loongson3_prepare_cpus(unsigned int max_cpus)
+{
+       int i = 0;
+
+       for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
+               set_cpu_present(i, true);
+               csr_mail_send(0, __cpu_logical_map[i], 0);
+       }
+
+       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+/*
+ * Setup the PC, SP, and TP of a secondary processor and start it running!
+ */
+void loongson3_boot_secondary(int cpu, struct task_struct *idle)
+{
+       unsigned long entry;
+
+       pr_info("Booting CPU#%d...\n", cpu);
+
+       entry = __pa_symbol((unsigned long)&smpboot_entry);
+       cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
+       cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
+
+       csr_mail_send(entry, cpu_logical_map(cpu), 0);
+
+       loongson3_send_ipi_single(cpu, SMP_BOOT_CPU);
+}
+
+/*
+ * SMP init and finish on secondary CPUs
+ */
+void loongson3_init_secondary(void)
+{
+       unsigned int cpu = smp_processor_id();
+       unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
+                            ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
+
+       change_csr_ecfg(ECFG0_IM, imask);
+
+       iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+
+#ifdef CONFIG_NUMA
+       numa_add_cpu(cpu);
+#endif
+       per_cpu(cpu_state, cpu) = CPU_ONLINE;
+       cpu_data[cpu].core =
+                    cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+       cpu_data[cpu].package =
+                    cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
+}
+
+void loongson3_smp_finish(void)
+{
+       local_irq_enable();
+       iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
+       pr_info("CPU#%d finished\n", smp_processor_id());
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static bool io_master(int cpu)
+{
+       if (cpu == 0)
+               return true;
+
+       return false;
+}
+
+int loongson3_cpu_disable(void)
+{
+       unsigned long flags;
+       unsigned int cpu = smp_processor_id();
+
+       if (io_master(cpu))
+               return -EBUSY;
+
+#ifdef CONFIG_NUMA
+       numa_remove_cpu(cpu);
+#endif
+       set_cpu_online(cpu, false);
+       calculate_cpu_foreign_map();
+       local_irq_save(flags);
+       irq_migrate_all_off_this_cpu();
+       clear_csr_ecfg(ECFG0_IM);
+       local_irq_restore(flags);
+       local_flush_tlb_all();
+
+       return 0;
+}
+
+void loongson3_cpu_die(unsigned int cpu)
+{
+       while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+               cpu_relax();
+
+       mb();
+}
+
+/*
+ * The target CPU should go to XKPRANGE (uncached area) and flush
+ * ICache/DCache/VCache before the control CPU can safely disable its clock.
+ */
+static void loongson3_play_dead(int *state_addr)
+{
+       register int val;
+       register void *addr;
+       register void (*init_fn)(void);
+
+       __asm__ __volatile__(
+               "   li.d %[addr], 0x8000000000000000\n"
+               "1: cacop 0x8, %[addr], 0           \n" /* flush ICache */
+               "   cacop 0x8, %[addr], 1           \n"
+               "   cacop 0x8, %[addr], 2           \n"
+               "   cacop 0x8, %[addr], 3           \n"
+               "   cacop 0x9, %[addr], 0           \n" /* flush DCache */
+               "   cacop 0x9, %[addr], 1           \n"
+               "   cacop 0x9, %[addr], 2           \n"
+               "   cacop 0x9, %[addr], 3           \n"
+               "   addi.w %[sets], %[sets], -1     \n"
+               "   addi.d %[addr], %[addr], 0x40   \n"
+               "   bnez %[sets], 1b                \n"
+               "   li.d %[addr], 0x8000000000000000\n"
+               "2: cacop 0xa, %[addr], 0           \n" /* flush VCache */
+               "   cacop 0xa, %[addr], 1           \n"
+               "   cacop 0xa, %[addr], 2           \n"
+               "   cacop 0xa, %[addr], 3           \n"
+               "   cacop 0xa, %[addr], 4           \n"
+               "   cacop 0xa, %[addr], 5           \n"
+               "   cacop 0xa, %[addr], 6           \n"
+               "   cacop 0xa, %[addr], 7           \n"
+               "   cacop 0xa, %[addr], 8           \n"
+               "   cacop 0xa, %[addr], 9           \n"
+               "   cacop 0xa, %[addr], 10          \n"
+               "   cacop 0xa, %[addr], 11          \n"
+               "   cacop 0xa, %[addr], 12          \n"
+               "   cacop 0xa, %[addr], 13          \n"
+               "   cacop 0xa, %[addr], 14          \n"
+               "   cacop 0xa, %[addr], 15          \n"
+               "   addi.w %[vsets], %[vsets], -1   \n"
+               "   addi.d %[addr], %[addr], 0x40   \n"
+               "   bnez   %[vsets], 2b             \n"
+               "   li.w   %[val], 0x7              \n" /* *state_addr = CPU_DEAD; */
+               "   st.w   %[val], %[state_addr], 0 \n"
+               "   dbar 0                          \n"
+               "   cacop 0x11, %[state_addr], 0    \n" /* flush entry of *state_addr */
+               : [addr] "=&r" (addr), [val] "=&r" (val)
+               : [state_addr] "r" (state_addr),
+                 [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
+                 [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
+
+       local_irq_enable();
+       change_csr_ecfg(ECFG0_IM, ECFGF_IPI);
+
+       __asm__ __volatile__(
+               "   idle      0                     \n"
+               "   li.w      $t0, 0x1020           \n"
+               "   iocsrrd.d %[init_fn], $t0       \n" /* Get init PC */
+               : [init_fn] "=&r" (addr)
+               : /* No Input */
+               : "a0");
+       init_fn = __va(addr);
+
+       init_fn();
+       unreachable();
+}
+
+void play_dead(void)
+{
+       int *state_addr;
+       unsigned int cpu = smp_processor_id();
+       void (*play_dead_uncached)(int *s);
+
+       idle_task_exit();
+       play_dead_uncached = (void *)TO_UNCACHE(__pa((unsigned long)loongson3_play_dead));
+       state_addr = &per_cpu(cpu_state, cpu);
+       mb();
+       play_dead_uncached(state_addr);
+}
+
+static int loongson3_enable_clock(unsigned int cpu)
+{
+       uint64_t core_id = cpu_data[cpu].core;
+       uint64_t package_id = cpu_data[cpu].package;
+
+       LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
+
+       return 0;
+}
+
+static int loongson3_disable_clock(unsigned int cpu)
+{
+       uint64_t core_id = cpu_data[cpu].core;
+       uint64_t package_id = cpu_data[cpu].package;
+
+       LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
+
+       return 0;
+}
+
+static int register_loongson3_notifier(void)
+{
+       return cpuhp_setup_state_nocalls(CPUHP_LOONGARCH_SOC_PREPARE,
+                                        "loongarch/loongson:prepare",
+                                        loongson3_enable_clock,
+                                        loongson3_disable_clock);
+}
+early_initcall(register_loongson3_notifier);
+
+#endif
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+static int loongson3_ipi_suspend(void)
+{
+       return 0;
+}
+
+static void loongson3_ipi_resume(void)
+{
+       iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+}
+
+static struct syscore_ops loongson3_ipi_syscore_ops = {
+       .resume         = loongson3_ipi_resume,
+       .suspend        = loongson3_ipi_suspend,
+};
+
+/*
+ * Enable boot cpu ipi before enabling nonboot cpus
+ * during syscore_resume.
+ */
+static int __init ipi_pm_init(void)
+{
+       register_syscore_ops(&loongson3_ipi_syscore_ops);
+       return 0;
+}
+
+core_initcall(ipi_pm_init);
+#endif
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+       int i;
+
+       cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+       if (smp_num_siblings <= 1)
+               cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
+       else {
+               for_each_cpu(i, &cpu_sibling_setup_map) {
+                       if (cpus_are_siblings(cpu, i)) {
+                               cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+                               cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+                       }
+               }
+       }
+}
+
+static inline void set_cpu_core_map(int cpu)
+{
+       int i;
+
+       cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+       for_each_cpu(i, &cpu_core_setup_map) {
+               if (cpu_data[cpu].package == cpu_data[i].package) {
+                       cpumask_set_cpu(i, &cpu_core_map[cpu]);
+                       cpumask_set_cpu(cpu, &cpu_core_map[i]);
+               }
+       }
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+       int i, k, core_present;
+       cpumask_t temp_foreign_map;
+
+       /* Re-calculate the mask */
+       cpumask_clear(&temp_foreign_map);
+       for_each_online_cpu(i) {
+               core_present = 0;
+               for_each_cpu(k, &temp_foreign_map)
+                       if (cpus_are_siblings(i, k))
+                               core_present = 1;
+               if (!core_present)
+                       cpumask_set_cpu(i, &temp_foreign_map);
+       }
+
+       for_each_online_cpu(i)
+               cpumask_andnot(&cpu_foreign_map[i],
+                              &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
+/* Preload SMP state for boot cpu */
+void smp_prepare_boot_cpu(void)
+{
+       unsigned int cpu, node, rr_node;
+
+       set_cpu_possible(0, true);
+       set_cpu_online(0, true);
+       set_my_cpu_offset(per_cpu_offset(0));
+
+       rr_node = first_node(node_online_map);
+       for_each_possible_cpu(cpu) {
+               node = early_cpu_to_node(cpu);
+
+               /*
+                * The mapping between present cpus and nodes has been
+                * built during MADT and SRAT parsing.
+                *
+                * If possible cpus = present cpus here, early_cpu_to_node
+                * will return valid node.
+                *
+                * If possible cpus > present cpus here (e.g. some possible
+                * cpus will be added by cpu-hotplug later), for possible but
+                * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
+                * and we just map them to online nodes in round-robin way.
+                * Once hotplugged, new correct mapping will be built for them.
+                */
+               if (node != NUMA_NO_NODE)
+                       set_cpu_numa_node(cpu, node);
+               else {
+                       set_cpu_numa_node(cpu, rr_node);
+                       rr_node = next_node_in(rr_node, node_online_map);
+               }
+       }
+}
+
+/* called from main before smp_init() */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+       init_new_context(current, &init_mm);
+       current_thread_info()->cpu = 0;
+       loongson3_prepare_cpus(max_cpus);
+       set_cpu_sibling_map(0);
+       set_cpu_core_map(0);
+       calculate_cpu_foreign_map();
+#ifndef CONFIG_HOTPLUG_CPU
+       init_cpu_present(cpu_possible_mask);
+#endif
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+       loongson3_boot_secondary(cpu, tidle);
+
+       /* Wait for CPU to start and be ready to sync counters */
+       if (!wait_for_completion_timeout(&cpu_starting,
+                                        msecs_to_jiffies(5000))) {
+               pr_crit("CPU%u: failed to start\n", cpu);
+               return -EIO;
+       }
+
+       /* Wait for CPU to finish startup & mark itself online before return */
+       wait_for_completion(&cpu_running);
+
+       return 0;
+}
+
+/*
+ * First C code run on the secondary CPUs after being started up by
+ * the master.
+ */
+asmlinkage void start_secondary(void)
+{
+       unsigned int cpu;
+
+       sync_counter();
+       cpu = smp_processor_id();
+       set_my_cpu_offset(per_cpu_offset(cpu));
+
+       cpu_probe();
+       constant_clockevent_init();
+       loongson3_init_secondary();
+
+       set_cpu_sibling_map(cpu);
+       set_cpu_core_map(cpu);
+
+       notify_cpu_starting(cpu);
+
+       /* Notify boot CPU that we're starting */
+       complete(&cpu_starting);
+
+       /* The CPU is running, now mark it online */
+       set_cpu_online(cpu, true);
+
+       calculate_cpu_foreign_map();
+
+       /*
+        * Notify boot CPU that we're up & online and it can safely return
+        * from __cpu_up()
+        */
+       complete(&cpu_running);
+
+       /*
+        * irq will be enabled in loongson3_smp_finish(), enabling it too
+        * early is dangerous.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
+       loongson3_smp_finish();
+
+       cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static void stop_this_cpu(void *dummy)
+{
+       set_cpu_online(smp_processor_id(), false);
+       calculate_cpu_foreign_map();
+       local_irq_disable();
+       while (true);
+}
+
+void smp_send_stop(void)
+{
+       smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return 0;
+}
+
+static void flush_tlb_all_ipi(void *info)
+{
+       local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+       on_each_cpu(flush_tlb_all_ipi, NULL, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+       local_flush_tlb_mm((struct mm_struct *)mm);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+       if (atomic_read(&mm->mm_users) == 0)
+               return;         /* happens as a result of exit_mmap() */
+
+       preempt_disable();
+
+       if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+               on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
+       } else {
+               unsigned int cpu;
+
+               for_each_online_cpu(cpu) {
+                       if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+                               cpu_context(cpu, mm) = 0;
+               }
+               local_flush_tlb_mm(mm);
+       }
+
+       preempt_enable();
+}
+
+struct flush_tlb_data {
+       struct vm_area_struct *vma;
+       unsigned long addr1;
+       unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+       struct flush_tlb_data *fd = info;
+
+       local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+       struct mm_struct *mm = vma->vm_mm;
+
+       preempt_disable();
+       if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+               struct flush_tlb_data fd = {
+                       .vma = vma,
+                       .addr1 = start,
+                       .addr2 = end,
+               };
+
+               on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
+       } else {
+               unsigned int cpu;
+
+               for_each_online_cpu(cpu) {
+                       if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+                               cpu_context(cpu, mm) = 0;
+               }
+               local_flush_tlb_range(vma, start, end);
+       }
+       preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+       struct flush_tlb_data *fd = info;
+
+       local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       struct flush_tlb_data fd = {
+               .addr1 = start,
+               .addr2 = end,
+       };
+
+       on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+       struct flush_tlb_data *fd = info;
+
+       local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+       preempt_disable();
+       if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+               struct flush_tlb_data fd = {
+                       .vma = vma,
+                       .addr1 = page,
+               };
+
+               on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
+       } else {
+               unsigned int cpu;
+
+               for_each_online_cpu(cpu) {
+                       if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
+                               cpu_context(cpu, vma->vm_mm) = 0;
+               }
+               local_flush_tlb_page(vma, page);
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+static void flush_tlb_one_ipi(void *info)
+{
+       unsigned long vaddr = (unsigned long) info;
+
+       local_flush_tlb_one(vaddr);
+}
+
+void flush_tlb_one(unsigned long vaddr)
+{
+       on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
+}
+EXPORT_SYMBOL(flush_tlb_one);
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
new file mode 100644 (file)
index 0000000..53e2fa8
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+/*
+ * task_struct *__switch_to(task_struct *prev, task_struct *next,
+ *                         struct thread_info *next_ti)
+ */
+       .align  5
+SYM_FUNC_START(__switch_to)
+       csrrd   t1, LOONGARCH_CSR_PRMD
+       stptr.d t1, a0, THREAD_CSRPRMD
+
+       cpu_save_nonscratch a0
+       stptr.d ra, a0, THREAD_REG01
+       move    tp, a2
+       cpu_restore_nonscratch a1
+
+       li.w    t0, _THREAD_SIZE - 32
+       PTR_ADD t0, t0, tp
+       set_saved_sp    t0, t1, t2
+
+       ldptr.d t1, a1, THREAD_CSRPRMD
+       csrwr   t1, LOONGARCH_CSR_PRMD
+
+       jr      ra
+SYM_FUNC_END(__switch_to)
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
new file mode 100644 (file)
index 0000000..3fc4211
--- /dev/null
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ *         Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/capability.h>
+#include <linux/entry-common.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+
+#include <asm/asm.h>
+#include <asm/signal.h>
+#include <asm/switch_to.h>
+#include <asm-generic/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call)    [nr] = (call),
+
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
+               prot, unsigned long, flags, unsigned long, fd, off_t, offset)
+{
+       if (offset & ~PAGE_MASK)
+               return -EINVAL;
+
+       return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+}
+
+void *sys_call_table[__NR_syscalls] = {
+       [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
+
+typedef long (*sys_call_fn)(unsigned long, unsigned long,
+       unsigned long, unsigned long, unsigned long, unsigned long);
+
+void noinstr do_syscall(struct pt_regs *regs)
+{
+       unsigned long nr;
+       sys_call_fn syscall_fn;
+
+       nr = regs->regs[11];
+       /* Set for syscall restarting */
+       if (nr < NR_syscalls)
+               regs->regs[0] = nr + 1;
+
+       regs->csr_era += 4;
+       regs->orig_a0 = regs->regs[4];
+       regs->regs[4] = -ENOSYS;
+
+       nr = syscall_enter_from_user_mode(regs, nr);
+
+       if (nr < NR_syscalls) {
+               syscall_fn = sys_call_table[nr];
+               regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
+                                          regs->regs[7], regs->regs[8], regs->regs[9]);
+       }
+
+       syscall_exit_to_user_mode(regs);
+}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
new file mode 100644 (file)
index 0000000..fe68238
--- /dev/null
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common time service routines for LoongArch machines.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched_clock.h>
+#include <linux/spinlock.h>
+
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+#include <asm/time.h>
+
+u64 cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
+u64 const_clock_freq;
+EXPORT_SYMBOL(const_clock_freq);
+
+static DEFINE_RAW_SPINLOCK(state_lock);
+static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device);
+
+static void constant_event_handler(struct clock_event_device *dev)
+{
+}
+
+irqreturn_t constant_timer_interrupt(int irq, void *data)
+{
+       int cpu = smp_processor_id();
+       struct clock_event_device *cd;
+
+       /* Clear Timer Interrupt */
+       write_csr_tintclear(CSR_TINTCLR_TI);
+       cd = &per_cpu(constant_clockevent_device, cpu);
+       cd->event_handler(cd);
+
+       return IRQ_HANDLED;
+}
+
+static int constant_set_state_oneshot(struct clock_event_device *evt)
+{
+       unsigned long timer_config;
+
+       raw_spin_lock(&state_lock);
+
+       timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+       timer_config |= CSR_TCFG_EN;
+       timer_config &= ~CSR_TCFG_PERIOD;
+       csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+       raw_spin_unlock(&state_lock);
+
+       return 0;
+}
+
+static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
+{
+       unsigned long timer_config;
+
+       raw_spin_lock(&state_lock);
+
+       timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+       timer_config &= ~CSR_TCFG_EN;
+       csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+       raw_spin_unlock(&state_lock);
+
+       return 0;
+}
+
+static int constant_set_state_periodic(struct clock_event_device *evt)
+{
+       unsigned long period;
+       unsigned long timer_config;
+
+       raw_spin_lock(&state_lock);
+
+       period = const_clock_freq / HZ;
+       timer_config = period & CSR_TCFG_VAL;
+       timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
+       csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+       raw_spin_unlock(&state_lock);
+
+       return 0;
+}
+
+static int constant_set_state_shutdown(struct clock_event_device *evt)
+{
+       return 0;
+}
+
+static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+       unsigned long timer_config;
+
+       delta &= CSR_TCFG_VAL;
+       timer_config = delta | CSR_TCFG_EN;
+       csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+       return 0;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+       unsigned long lpj = (unsigned long)const_clock_freq;
+
+       do_div(lpj, HZ);
+
+       return lpj;
+}
+
+static long init_timeval;
+
+void sync_counter(void)
+{
+       /* Ensure counter begin at 0 */
+       csr_write64(-init_timeval, LOONGARCH_CSR_CNTC);
+}
+
+int constant_clockevent_init(void)
+{
+       unsigned int irq;
+       unsigned int cpu = smp_processor_id();
+       unsigned long min_delta = 0x600;
+       unsigned long max_delta = (1UL << 48) - 1;
+       struct clock_event_device *cd;
+       static int timer_irq_installed = 0;
+
+       irq = EXCCODE_TIMER - EXCCODE_INT_START;
+
+       cd = &per_cpu(constant_clockevent_device, cpu);
+
+       cd->name = "Constant";
+       cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU;
+
+       cd->irq = irq;
+       cd->rating = 320;
+       cd->cpumask = cpumask_of(cpu);
+       cd->set_state_oneshot = constant_set_state_oneshot;
+       cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
+       cd->set_state_periodic = constant_set_state_periodic;
+       cd->set_state_shutdown = constant_set_state_shutdown;
+       cd->set_next_event = constant_timer_next_event;
+       cd->event_handler = constant_event_handler;
+
+       clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta);
+
+       if (timer_irq_installed)
+               return 0;
+
+       timer_irq_installed = 1;
+
+       sync_counter();
+
+       if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL))
+               pr_err("Failed to request irq %d (timer)\n", irq);
+
+       lpj_fine = get_loops_per_jiffy();
+       pr_info("Constant clock event device register\n");
+
+       return 0;
+}
+
+static u64 read_const_counter(struct clocksource *clk)
+{
+       return drdtime();
+}
+
+static u64 native_sched_clock(void)
+{
+       return read_const_counter(NULL);
+}
+
+static struct clocksource clocksource_const = {
+       .name = "Constant",
+       .rating = 400,
+       .read = read_const_counter,
+       .mask = CLOCKSOURCE_MASK(64),
+       .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+       .vdso_clock_mode = VDSO_CLOCKMODE_CPU,
+};
+
+int __init constant_clocksource_init(void)
+{
+       int res;
+       unsigned long freq = const_clock_freq;
+
+       res = clocksource_register_hz(&clocksource_const, freq);
+
+       sched_clock_register(native_sched_clock, 64, freq);
+
+       pr_info("Constant clock source device register\n");
+
+       return res;
+}
+
+void __init time_init(void)
+{
+       if (!cpu_has_cpucfg)
+               const_clock_freq = cpu_clock_freq;
+       else
+               const_clock_freq = calc_const_freq();
+
+       init_timeval = drdtime() - csr_read64(LOONGARCH_CSR_CNTC);
+
+       constant_clockevent_init();
+       constant_clocksource_init();
+}
diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c
new file mode 100644 (file)
index 0000000..ab1a75c
--- /dev/null
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/percpu.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+#ifdef CONFIG_HOTPLUG_CPU
+int arch_register_cpu(int cpu)
+{
+       int ret;
+       struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+       c->hotpluggable = 1;
+       ret = register_cpu(c, cpu);
+       if (ret < 0)
+               pr_warn("register_cpu %d failed (%d)\n", cpu, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(arch_register_cpu);
+
+void arch_unregister_cpu(int cpu)
+{
+       struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+       c->hotpluggable = 0;
+       unregister_cpu(c);
+}
+EXPORT_SYMBOL(arch_unregister_cpu);
+#endif
+
+static int __init topology_init(void)
+{
+       int i, ret;
+
+       for_each_present_cpu(i) {
+               struct cpu *c = &per_cpu(cpu_devices, i);
+
+               c->hotpluggable = !!i;
+               ret = register_cpu(c, i);
+               if (ret < 0)
+                       pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
+       }
+
+       return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
new file mode 100644 (file)
index 0000000..e4060f8
--- /dev/null
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/memblock.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/cpu.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/siginfo.h>
+#include <asm/stacktrace.h>
+#include <asm/tlb.h>
+#include <asm/types.h>
+
+#include "access-helper.h"
+
+extern asmlinkage void handle_ade(void);
+extern asmlinkage void handle_ale(void);
+extern asmlinkage void handle_sys(void);
+extern asmlinkage void handle_bp(void);
+extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_fpu(void);
+extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_lbt(void);
+extern asmlinkage void handle_lsx(void);
+extern asmlinkage void handle_lasx(void);
+extern asmlinkage void handle_reserved(void);
+extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_vint(void);
+
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+                          const char *loglvl, bool user)
+{
+       unsigned long addr;
+       unsigned long *sp = (unsigned long *)(regs->regs[3] & ~3);
+
+       printk("%sCall Trace:", loglvl);
+#ifdef CONFIG_KALLSYMS
+       printk("%s\n", loglvl);
+#endif
+       while (!kstack_end(sp)) {
+               if (__get_addr(&addr, sp++, user)) {
+                       printk("%s (Bad stack address)", loglvl);
+                       break;
+               }
+               if (__kernel_text_address(addr))
+                       print_ip_sym(loglvl, addr);
+       }
+       printk("%s\n", loglvl);
+}
+
+static void show_stacktrace(struct task_struct *task,
+       const struct pt_regs *regs, const char *loglvl, bool user)
+{
+       int i;
+       const int field = 2 * sizeof(unsigned long);
+       unsigned long stackdata;
+       unsigned long *sp = (unsigned long *)regs->regs[3];
+
+       printk("%sStack :", loglvl);
+       i = 0;
+       while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+               if (i && ((i % (64 / field)) == 0)) {
+                       pr_cont("\n");
+                       printk("%s       ", loglvl);
+               }
+               if (i > 39) {
+                       pr_cont(" ...");
+                       break;
+               }
+
+               if (__get_addr(&stackdata, sp++, user)) {
+                       pr_cont(" (Bad stack address)");
+                       break;
+               }
+
+               pr_cont(" %0*lx", field, stackdata);
+               i++;
+       }
+       pr_cont("\n");
+       show_backtrace(task, regs, loglvl, user);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+       struct pt_regs regs;
+
+       regs.csr_crmd = 0;
+       if (sp) {
+               regs.csr_era = 0;
+               regs.regs[1] = 0;
+               regs.regs[3] = (unsigned long)sp;
+       } else {
+               if (!task || task == current)
+                       prepare_frametrace(&regs);
+               else {
+                       regs.csr_era = task->thread.reg01;
+                       regs.regs[1] = 0;
+                       regs.regs[3] = task->thread.reg03;
+                       regs.regs[22] = task->thread.reg22;
+               }
+       }
+
+       show_stacktrace(task, &regs, loglvl, false);
+}
+
+static void show_code(unsigned int *pc, bool user)
+{
+       long i;
+       unsigned int insn;
+
+       printk("Code:");
+
+       for(i = -3 ; i < 6 ; i++) {
+               if (__get_inst(&insn, pc + i, user)) {
+                       pr_cont(" (Bad address in era)\n");
+                       break;
+               }
+               pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+       }
+       pr_cont("\n");
+}
+
+static void __show_regs(const struct pt_regs *regs)
+{
+       const int field = 2 * sizeof(unsigned long);
+       unsigned int excsubcode;
+       unsigned int exccode;
+       int i;
+
+       show_regs_print_info(KERN_DEFAULT);
+
+       /*
+        * Saved main processor registers
+        */
+       for (i = 0; i < 32; ) {
+               if ((i % 4) == 0)
+                       printk("$%2d   :", i);
+               pr_cont(" %0*lx", field, regs->regs[i]);
+
+               i++;
+               if ((i % 4) == 0)
+                       pr_cont("\n");
+       }
+
+       /*
+        * Saved csr registers
+        */
+       printk("era   : %0*lx %pS\n", field, regs->csr_era,
+              (void *) regs->csr_era);
+       printk("ra    : %0*lx %pS\n", field, regs->regs[1],
+              (void *) regs->regs[1]);
+
+       printk("CSR crmd: %08lx ", regs->csr_crmd);
+       printk("CSR prmd: %08lx ", regs->csr_prmd);
+       printk("CSR euen: %08lx ", regs->csr_euen);
+       printk("CSR ecfg: %08lx ", regs->csr_ecfg);
+       printk("CSR estat: %08lx        ", regs->csr_estat);
+
+       pr_cont("\n");
+
+       exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+       excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT;
+       printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode);
+
+       if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
+               printk("BadVA : %0*lx\n", field, regs->csr_badvaddr);
+
+       printk("PrId  : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
+              cpu_family_string());
+}
+
+void show_regs(struct pt_regs *regs)
+{
+       __show_regs((struct pt_regs *)regs);
+       dump_stack();
+}
+
+void show_registers(struct pt_regs *regs)
+{
+       __show_regs(regs);
+       print_modules();
+       printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
+              current->comm, current->pid, current_thread_info(), current);
+
+       show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
+       show_code((void *)regs->csr_era, user_mode(regs));
+       printk("\n");
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+void __noreturn die(const char *str, struct pt_regs *regs)
+{
+       static int die_counter;
+       int sig = SIGSEGV;
+
+       oops_enter();
+
+       if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
+                      SIGSEGV) == NOTIFY_STOP)
+               sig = 0;
+
+       console_verbose();
+       raw_spin_lock_irq(&die_lock);
+       bust_spinlocks(1);
+
+       printk("%s[#%d]:\n", str, ++die_counter);
+       show_registers(regs);
+       add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+       raw_spin_unlock_irq(&die_lock);
+
+       oops_exit();
+
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+
+       if (panic_on_oops)
+               panic("Fatal exception");
+
+       make_task_dead(sig);
+}
+
+static inline void setup_vint_size(unsigned int size)
+{
+       unsigned int vs;
+
+       vs = ilog2(size/4);
+
+       if (vs == 0 || vs > 7)
+               panic("vint_size %d Not support yet", vs);
+
+       csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
+}
+
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits.  This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
+                    struct task_struct *tsk)
+{
+       int si_code = FPE_FLTUNK;
+
+       if (fcsr & FPU_CSR_INV_X)
+               si_code = FPE_FLTINV;
+       else if (fcsr & FPU_CSR_DIV_X)
+               si_code = FPE_FLTDIV;
+       else if (fcsr & FPU_CSR_OVF_X)
+               si_code = FPE_FLTOVF;
+       else if (fcsr & FPU_CSR_UDF_X)
+               si_code = FPE_FLTUND;
+       else if (fcsr & FPU_CSR_INE_X)
+               si_code = FPE_FLTRES;
+
+       force_sig_fault(SIGFPE, si_code, fault_addr);
+}
+
+int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
+{
+       int si_code;
+
+       switch (sig) {
+       case 0:
+               return 0;
+
+       case SIGFPE:
+               force_fcsr_sig(fcsr, fault_addr, current);
+               return 1;
+
+       case SIGBUS:
+               force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
+               return 1;
+
+       case SIGSEGV:
+               mmap_read_lock(current->mm);
+               if (vma_lookup(current->mm, (unsigned long)fault_addr))
+                       si_code = SEGV_ACCERR;
+               else
+                       si_code = SEGV_MAPERR;
+               mmap_read_unlock(current->mm);
+               force_sig_fault(SIGSEGV, si_code, fault_addr);
+               return 1;
+
+       default:
+               force_sig(sig);
+               return 1;
+       }
+}
+
+/*
+ * Delayed fp exceptions when doing a lazy ctx switch
+ */
+asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
+{
+       int sig;
+       void __user *fault_addr;
+       irqentry_state_t state = irqentry_enter(regs);
+
+       if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
+                      SIGFPE) == NOTIFY_STOP)
+               goto out;
+
+       /* Clear FCSR.Cause before enabling interrupts */
+       write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
+       local_irq_enable();
+
+       die_if_kernel("FP exception in kernel code", regs);
+
+       sig = SIGFPE;
+       fault_addr = (void __user *) regs->csr_era;
+
+       /* Send a signal if required.  */
+       process_fpemu_return(sig, fault_addr, fcsr);
+
+out:
+       local_irq_disable();
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ade(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       die_if_kernel("Kernel ade access", regs);
+       force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
+
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ale(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       die_if_kernel("Kernel ale access", regs);
+       force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_bp(struct pt_regs *regs)
+{
+       bool user = user_mode(regs);
+       unsigned int opcode, bcode;
+       unsigned long era = exception_era(regs);
+       irqentry_state_t state = irqentry_enter(regs);
+
+       local_irq_enable();
+       current->thread.trap_nr = read_csr_excode();
+       if (__get_inst(&opcode, (u32 *)era, user))
+               goto out_sigsegv;
+
+       bcode = (opcode & 0x7fff);
+
+       /*
+        * notify the kprobe handlers, if instruction is likely to
+        * pertain to them.
+        */
+       switch (bcode) {
+       case BRK_KPROBE_BP:
+               if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
+                              current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+                       goto out;
+               else
+                       break;
+       case BRK_KPROBE_SSTEPBP:
+               if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
+                              current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+                       goto out;
+               else
+                       break;
+       case BRK_UPROBE_BP:
+               if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
+                              current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+                       goto out;
+               else
+                       break;
+       case BRK_UPROBE_XOLBP:
+               if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
+                              current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+                       goto out;
+               else
+                       break;
+       default:
+               if (notify_die(DIE_TRAP, "Break", regs, bcode,
+                              current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+                       goto out;
+               else
+                       break;
+       }
+
+       switch (bcode) {
+       case BRK_BUG:
+               die_if_kernel("Kernel bug detected", regs);
+               force_sig(SIGTRAP);
+               break;
+       case BRK_DIVZERO:
+               die_if_kernel("Break instruction in kernel code", regs);
+               force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
+               break;
+       case BRK_OVERFLOW:
+               die_if_kernel("Break instruction in kernel code", regs);
+               force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
+               break;
+       default:
+               die_if_kernel("Break instruction in kernel code", regs);
+               force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
+               break;
+       }
+
+out:
+       local_irq_disable();
+       irqentry_exit(regs, state);
+       return;
+
+out_sigsegv:
+       force_sig(SIGSEGV);
+       goto out;
+}
+
+asmlinkage void noinstr do_watch(struct pt_regs *regs)
+{
+       pr_warn("Hardware watch point handler not implemented!\n");
+}
+
+asmlinkage void noinstr do_ri(struct pt_regs *regs)
+{
+       int status = -1;
+       unsigned int opcode = 0;
+       unsigned int __user *era = (unsigned int __user *)exception_era(regs);
+       unsigned long old_era = regs->csr_era;
+       unsigned long old_ra = regs->regs[1];
+       irqentry_state_t state = irqentry_enter(regs);
+
+       local_irq_enable();
+       current->thread.trap_nr = read_csr_excode();
+
+       if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
+                      SIGILL) == NOTIFY_STOP)
+               goto out;
+
+       die_if_kernel("Reserved instruction in kernel code", regs);
+
+       if (unlikely(compute_return_era(regs) < 0))
+               goto out;
+
+       if (unlikely(get_user(opcode, era) < 0)) {
+               status = SIGSEGV;
+               current->thread.error_code = 1;
+       }
+
+       if (status < 0)
+               status = SIGILL;
+
+       if (unlikely(status > 0)) {
+               regs->csr_era = old_era;                /* Undo skip-over.  */
+               regs->regs[1] = old_ra;
+               force_sig(status);
+       }
+
+out:
+       local_irq_disable();
+       irqentry_exit(regs, state);
+}
+
+static void init_restore_fp(void)
+{
+       if (!used_math()) {
+               /* First time FP context user. */
+               init_fpu();
+       } else {
+               /* This task has formerly used the FP context */
+               if (!is_fpu_owner())
+                       own_fpu_inatomic(1);
+       }
+
+       BUG_ON(!is_fp_enabled());
+}
+
+asmlinkage void noinstr do_fpu(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       local_irq_enable();
+       die_if_kernel("do_fpu invoked from kernel context!", regs);
+
+       preempt_disable();
+       init_restore_fp();
+       preempt_enable();
+
+       local_irq_disable();
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lsx(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       local_irq_enable();
+       force_sig(SIGILL);
+       local_irq_disable();
+
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lasx(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       local_irq_enable();
+       force_sig(SIGILL);
+       local_irq_disable();
+
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       local_irq_enable();
+       force_sig(SIGILL);
+       local_irq_disable();
+
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_reserved(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       local_irq_enable();
+       /*
+        * Game over - no way to handle this if it ever occurs. Most probably
+        * caused by a fatal error after another hardware/software error.
+        */
+       pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
+               read_csr_excode(), current->pid, current->comm);
+       die_if_kernel("do_reserved exception", regs);
+       force_sig(SIGUNUSED);
+
+       local_irq_disable();
+
+       irqentry_exit(regs, state);
+}
+
+asmlinkage void cache_parity_error(void)
+{
+       /* For the moment, report the problem and hang. */
+       pr_err("Cache error exception:\n");
+       pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
+       pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
+       panic("Can't handle the cache error!");
+}
+
+asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs;
+
+       irq_enter_rcu();
+       old_regs = set_irq_regs(regs);
+       handle_arch_irq(regs);
+       set_irq_regs(old_regs);
+       irq_exit_rcu();
+}
+
+asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
+{
+       register int cpu;
+       register unsigned long stack;
+       irqentry_state_t state = irqentry_enter(regs);
+
+       cpu = smp_processor_id();
+
+       if (on_irq_stack(cpu, sp))
+               handle_loongarch_irq(regs);
+       else {
+               stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
+
+               /* Save task's sp on IRQ stack for unwinding */
+               *(unsigned long *)stack = sp;
+
+               __asm__ __volatile__(
+               "move   $s0, $sp                \n" /* Preserve sp */
+               "move   $sp, %[stk]             \n" /* Switch stack */
+               "move   $a0, %[regs]            \n"
+               "bl     handle_loongarch_irq    \n"
+               "move   $sp, $s0                \n" /* Restore sp */
+               : /* No outputs */
+               : [stk] "r" (stack), [regs] "r" (regs)
+               : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
+                 "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+                 "memory");
+       }
+
+       irqentry_exit(regs, state);
+}
+
+extern void tlb_init(int cpu);
+extern void cache_error_setup(void);
+
+unsigned long eentry;
+unsigned long tlbrentry;
+
+long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
+
+static void configure_exception_vector(void)
+{
+       eentry    = (unsigned long)exception_handlers;
+       tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
+
+       csr_write64(eentry, LOONGARCH_CSR_EENTRY);
+       csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
+       csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
+}
+
+void per_cpu_trap_init(int cpu)
+{
+       unsigned int i;
+
+       setup_vint_size(VECSIZE);
+
+       configure_exception_vector();
+
+       if (!cpu_data[cpu].asid_cache)
+               cpu_data[cpu].asid_cache = asid_first_version(cpu);
+
+       mmgrab(&init_mm);
+       current->active_mm = &init_mm;
+       BUG_ON(current->mm);
+       enter_lazy_tlb(&init_mm, current);
+
+       /* Initialise exception handlers */
+       if (cpu == 0)
+               for (i = 0; i < 64; i++)
+                       set_handler(i * VECSIZE, handle_reserved, VECSIZE);
+
+       tlb_init(cpu);
+       cpu_cache_init();
+}
+
+/* Install CPU exception handler */
+void set_handler(unsigned long offset, void *addr, unsigned long size)
+{
+       memcpy((void *)(eentry + offset), addr, size);
+       local_flush_icache_range(eentry + offset, eentry + offset + size);
+}
+
+static const char panic_null_cerr[] =
+       "Trying to set NULL cache error exception handler\n";
+
+/*
+ * Install uncached CPU exception handler.
+ * This is suitable only for the cache error exception which is the only
+ * exception handler that is being run uncached.
+ */
+void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
+{
+       unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
+
+       if (!addr)
+               panic(panic_null_cerr);
+
+       memcpy((void *)(uncached_eentry + offset), addr, size);
+}
+
+void __init trap_init(void)
+{
+       long i;
+
+       /* Set interrupt vector handler */
+       for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++)
+               set_handler(i * VECSIZE, handle_vint, VECSIZE);
+
+       set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
+       set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
+       set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
+       set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
+       set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
+       set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
+       set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
+       set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
+       set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
+       set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
+       set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
+       set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
+
+       cache_error_setup();
+
+       local_flush_icache_range(eentry, eentry + 0x400);
+}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
new file mode 100644 (file)
index 0000000..e20c8ca
--- /dev/null
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/page.h>
+#include <asm/vdso.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+#include <generated/vdso-offsets.h>
+
+extern char vdso_start[], vdso_end[];
+
+/* Kernel-provided data used by the VDSO. */
+static union loongarch_vdso_data {
+       u8 page[PAGE_SIZE];
+       struct vdso_data data[CS_BASES];
+} loongarch_vdso_data __page_aligned_data;
+struct vdso_data *vdso_data = loongarch_vdso_data.data;
+static struct page *vdso_pages[] = { NULL };
+
+static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
+{
+       current->mm->context.vdso = (void *)(new_vma->vm_start);
+
+       return 0;
+}
+
+struct loongarch_vdso_info vdso_info = {
+       .vdso = vdso_start,
+       .size = PAGE_SIZE,
+       .code_mapping = {
+               .name = "[vdso]",
+               .pages = vdso_pages,
+               .mremap = vdso_mremap,
+       },
+       .data_mapping = {
+               .name = "[vvar]",
+       },
+       .offset_sigreturn = vdso_offset_sigreturn,
+};
+
+static int __init init_vdso(void)
+{
+       unsigned long i, pfn;
+
+       BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
+       BUG_ON(!PAGE_ALIGNED(vdso_info.size));
+
+       pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
+       for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
+               vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
+
+       return 0;
+}
+subsys_initcall(init_vdso);
+
+static unsigned long vdso_base(void)
+{
+       unsigned long base = STACK_TOP;
+
+       if (current->flags & PF_RANDOMIZE) {
+               base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+               base = PAGE_ALIGN(base);
+       }
+
+       return base;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+       int ret;
+       unsigned long vvar_size, size, data_addr, vdso_addr;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       struct loongarch_vdso_info *info = current->thread.vdso;
+
+       if (mmap_write_lock_killable(mm))
+               return -EINTR;
+
+       /*
+        * Determine total area size. This includes the VDSO data itself
+        * and the data page.
+        */
+       vvar_size = PAGE_SIZE;
+       size = vvar_size + info->size;
+
+       data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+       if (IS_ERR_VALUE(data_addr)) {
+               ret = data_addr;
+               goto out;
+       }
+       vdso_addr = data_addr + PAGE_SIZE;
+
+       vma = _install_special_mapping(mm, data_addr, vvar_size,
+                                      VM_READ | VM_MAYREAD,
+                                      &info->data_mapping);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto out;
+       }
+
+       /* Map VDSO data page. */
+       ret = remap_pfn_range(vma, data_addr,
+                             virt_to_phys(vdso_data) >> PAGE_SHIFT,
+                             PAGE_SIZE, PAGE_READONLY);
+       if (ret)
+               goto out;
+
+       /* Map VDSO code page. */
+       vma = _install_special_mapping(mm, vdso_addr, info->size,
+                                      VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+                                      &info->code_mapping);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto out;
+       }
+
+       mm->context.vdso = (void *)vdso_addr;
+       ret = 0;
+
+out:
+       mmap_write_unlock(mm);
+       return ret;
+}
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
new file mode 100644 (file)
index 0000000..9d50815
--- /dev/null
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/sizes.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define PAGE_SIZE _PAGE_SIZE
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
+#include <asm-generic/vmlinux.lds.h>
+
+/*
+ * Max avaliable Page Size is 64K, so we set SectionAlignment
+ * field of EFI application to 64K.
+ */
+PECOFF_FILE_ALIGN = 0x200;
+PECOFF_SEGMENT_ALIGN = 0x10000;
+
+OUTPUT_ARCH(loongarch)
+ENTRY(kernel_entry)
+PHDRS {
+       text PT_LOAD FLAGS(7);  /* RWX */
+       note PT_NOTE FLAGS(4);  /* R__ */
+}
+
+jiffies         = jiffies_64;
+
+SECTIONS
+{
+       . = VMLINUX_LOAD_ADDRESS;
+
+       _text = .;
+       HEAD_TEXT_SECTION
+
+       . = ALIGN(PECOFF_SEGMENT_ALIGN);
+       .text : {
+               TEXT_TEXT
+               SCHED_TEXT
+               CPUIDLE_TEXT
+               LOCK_TEXT
+               KPROBES_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
+               *(.fixup)
+               *(.gnu.warning)
+       } :text = 0
+       . = ALIGN(PECOFF_SEGMENT_ALIGN);
+       _etext = .;
+
+       EXCEPTION_TABLE(16)
+
+       . = ALIGN(PECOFF_SEGMENT_ALIGN);
+       __init_begin = .;
+       __inittext_begin = .;
+
+       INIT_TEXT_SECTION(PAGE_SIZE)
+       .exit.text : {
+               EXIT_TEXT
+       }
+
+       . = ALIGN(PECOFF_SEGMENT_ALIGN);
+       __inittext_end = .;
+
+       __initdata_begin = .;
+
+       INIT_DATA_SECTION(16)
+       .exit.data : {
+               EXIT_DATA
+       }
+
+#ifdef CONFIG_SMP
+       PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
+#endif
+
+       .init.bss : {
+               *(.init.bss)
+       }
+       . = ALIGN(PECOFF_SEGMENT_ALIGN);
+       __initdata_end = .;
+
+       __init_end = .;
+
+       _sdata = .;
+       RO_DATA(4096)
+       RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
+
+       .sdata : {
+               *(.sdata)
+       }
+       .edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGN); }
+       _edata =  .;
+
+       BSS_SECTION(0, SZ_64K, 8)
+       . = ALIGN(PECOFF_SEGMENT_ALIGN);
+
+       _end = .;
+
+       STABS_DEBUG
+       DWARF_DEBUG
+
+       .gptab.sdata : {
+               *(.gptab.data)
+               *(.gptab.sdata)
+       }
+       .gptab.sbss : {
+               *(.gptab.bss)
+               *(.gptab.sbss)
+       }
+
+       DISCARDS
+       /DISCARD/ : {
+               *(.gnu.attributes)
+               *(.options)
+               *(.eh_frame)
+       }
+}
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
new file mode 100644 (file)
index 0000000..e36635f
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for LoongArch-specific library files.
+#
+
+lib-y  += delay.o clear_user.o copy_user.o dump_tlb.o
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
new file mode 100644 (file)
index 0000000..25d9be5
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+.macro fixup_ex from, to, offset, fix
+.if \fix
+       .section .fixup, "ax"
+\to:   addi.d  a0, a1, \offset
+       jr      ra
+       .previous
+.endif
+       .section __ex_table, "a"
+       PTR     \from\()b, \to\()b
+       .previous
+.endm
+
+/*
+ * unsigned long __clear_user(void *addr, size_t size)
+ *
+ * a0: addr
+ * a1: size
+ */
+SYM_FUNC_START(__clear_user)
+       beqz    a1, 2f
+
+1:     st.b    zero, a0, 0
+       addi.d  a0, a0, 1
+       addi.d  a1, a1, -1
+       bgt     a1, zero, 1b
+
+2:     move    a0, a1
+       jr      ra
+
+       fixup_ex 1, 3, 0, 1
+SYM_FUNC_END(__clear_user)
+
+EXPORT_SYMBOL(__clear_user)
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
new file mode 100644 (file)
index 0000000..9ae507f
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+.macro fixup_ex from, to, offset, fix
+.if \fix
+       .section .fixup, "ax"
+\to:   addi.d  a0, a2, \offset
+       jr      ra
+       .previous
+.endif
+       .section __ex_table, "a"
+       PTR     \from\()b, \to\()b
+       .previous
+.endm
+
+/*
+ * unsigned long __copy_user(void *to, const void *from, size_t n)
+ *
+ * a0: to
+ * a1: from
+ * a2: n
+ */
+SYM_FUNC_START(__copy_user)
+       beqz    a2, 3f
+
+1:     ld.b    t0, a1, 0
+2:     st.b    t0, a0, 0
+       addi.d  a0, a0, 1
+       addi.d  a1, a1, 1
+       addi.d  a2, a2, -1
+       bgt     a2, zero, 1b
+
+3:     move    a0, a2
+       jr      ra
+
+       fixup_ex 1, 4, 0, 1
+       fixup_ex 2, 4, 0, 0
+SYM_FUNC_END(__copy_user)
+
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/loongarch/lib/delay.c b/arch/loongarch/lib/delay.c
new file mode 100644 (file)
index 0000000..5d85669
--- /dev/null
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/smp.h>
+#include <linux/timex.h>
+
+#include <asm/compiler.h>
+#include <asm/processor.h>
+
+void __delay(unsigned long cycles)
+{
+       u64 t0 = get_cycles();
+
+       while ((unsigned long)(get_cycles() - t0) < cycles)
+               cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+/*
+ * Division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays.  This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+
+void __udelay(unsigned long us)
+{
+       __delay((us * 0x000010c7ull * HZ * lpj_fine) >> 32);
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long ns)
+{
+       __delay((ns * 0x00000005ull * HZ * lpj_fine) >> 32);
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c
new file mode 100644 (file)
index 0000000..cda2c6b
--- /dev/null
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+void dump_tlb_regs(void)
+{
+       const int field = 2 * sizeof(unsigned long);
+
+       pr_info("Index    : %0x\n", read_csr_tlbidx());
+       pr_info("PageSize : %0x\n", read_csr_pagesize());
+       pr_info("EntryHi  : %0*llx\n", field, read_csr_entryhi());
+       pr_info("EntryLo0 : %0*llx\n", field, read_csr_entrylo0());
+       pr_info("EntryLo1 : %0*llx\n", field, read_csr_entrylo1());
+}
+
+static void dump_tlb(int first, int last)
+{
+       unsigned long s_entryhi, entryhi, asid;
+       unsigned long long entrylo0, entrylo1, pa;
+       unsigned int index;
+       unsigned int s_index, s_asid;
+       unsigned int pagesize, c0, c1, i;
+       unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
+       int pwidth = 11;
+       int vwidth = 11;
+       int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
+
+       s_entryhi = read_csr_entryhi();
+       s_index = read_csr_tlbidx();
+       s_asid = read_csr_asid();
+
+       for (i = first; i <= last; i++) {
+               write_csr_index(i);
+               tlb_read();
+               pagesize = read_csr_pagesize();
+               entryhi  = read_csr_entryhi();
+               entrylo0 = read_csr_entrylo0();
+               entrylo1 = read_csr_entrylo1();
+               index = read_csr_tlbidx();
+               asid = read_csr_asid();
+
+               /* EHINV bit marks entire entry as invalid */
+               if (index & CSR_TLBIDX_EHINV)
+                       continue;
+               /*
+                * ASID takes effect in absence of G (global) bit.
+                */
+               if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
+                   asid != s_asid)
+                       continue;
+
+               /*
+                * Only print entries in use
+                */
+               pr_info("Index: %2d pgsize=%x ", i, (1 << pagesize));
+
+               c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+               c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+
+               pr_cont("va=%0*lx asid=%0*lx",
+                       vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
+
+               /* NR/NX are in awkward places, so mask them off separately */
+               pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
+               pa = pa & PAGE_MASK;
+               pr_cont("\n\t[");
+               pr_cont("ri=%d xi=%d ",
+                       (entrylo0 & ENTRYLO_NR) ? 1 : 0,
+                       (entrylo0 & ENTRYLO_NX) ? 1 : 0);
+               pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
+                       pwidth, pa, c0,
+                       (entrylo0 & ENTRYLO_D) ? 1 : 0,
+                       (entrylo0 & ENTRYLO_V) ? 1 : 0,
+                       (entrylo0 & ENTRYLO_G) ? 1 : 0,
+                       (entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
+               /* NR/NX are in awkward places, so mask them off separately */
+               pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
+               pa = pa & PAGE_MASK;
+               pr_cont("ri=%d xi=%d ",
+                       (entrylo1 & ENTRYLO_NR) ? 1 : 0,
+                       (entrylo1 & ENTRYLO_NX) ? 1 : 0);
+               pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
+                       pwidth, pa, c1,
+                       (entrylo1 & ENTRYLO_D) ? 1 : 0,
+                       (entrylo1 & ENTRYLO_V) ? 1 : 0,
+                       (entrylo1 & ENTRYLO_G) ? 1 : 0,
+                       (entrylo1 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
+       }
+       pr_info("\n");
+
+       write_csr_entryhi(s_entryhi);
+       write_csr_tlbidx(s_index);
+       write_csr_asid(s_asid);
+}
+
+void dump_tlb_all(void)
+{
+       dump_tlb(0, current_cpu_data.tlbsize - 1);
+}
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
new file mode 100644 (file)
index 0000000..8ffc638
--- /dev/null
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/LoongArch-specific parts of the memory manager.
+#
+
+obj-y                          += init.o cache.o tlb.o tlbex.o extable.o \
+                                  fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
+
+obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c
new file mode 100644 (file)
index 0000000..9e5ce5a
--- /dev/null
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ */
+#include <linux/export.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/dma.h>
+#include <asm/loongarch.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+
+/*
+ * LoongArch maintains ICache/DCache coherency by hardware,
+ * we just need "ibar" to avoid instruction hazard here.
+ */
+void local_flush_icache_range(unsigned long start, unsigned long end)
+{
+       asm volatile ("\tibar 0\n"::);
+}
+EXPORT_SYMBOL(local_flush_icache_range);
+
+void cache_error_setup(void)
+{
+       extern char __weak except_vec_cex;
+       set_merr_handler(0x0, &except_vec_cex, 0x80);
+}
+
+static unsigned long icache_size __read_mostly;
+static unsigned long dcache_size __read_mostly;
+static unsigned long vcache_size __read_mostly;
+static unsigned long scache_size __read_mostly;
+
+static char *way_string[] = { NULL, "direct mapped", "2-way",
+       "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
+       "9-way", "10-way", "11-way", "12-way",
+       "13-way", "14-way", "15-way", "16-way",
+};
+
+static void probe_pcache(void)
+{
+       struct cpuinfo_loongarch *c = &current_cpu_data;
+       unsigned int lsize, sets, ways;
+       unsigned int config;
+
+       config = read_cpucfg(LOONGARCH_CPUCFG17);
+       lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE);
+       sets  = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS);
+       ways  = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1;
+
+       c->icache.linesz = lsize;
+       c->icache.sets = sets;
+       c->icache.ways = ways;
+       icache_size = sets * ways * lsize;
+       c->icache.waysize = icache_size / c->icache.ways;
+
+       config = read_cpucfg(LOONGARCH_CPUCFG18);
+       lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE);
+       sets  = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS);
+       ways  = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1;
+
+       c->dcache.linesz = lsize;
+       c->dcache.sets = sets;
+       c->dcache.ways = ways;
+       dcache_size = sets * ways * lsize;
+       c->dcache.waysize = dcache_size / c->dcache.ways;
+
+       c->options |= LOONGARCH_CPU_PREFETCH;
+
+       pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
+               icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz);
+
+       pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+               dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz);
+}
+
+static void probe_vcache(void)
+{
+       struct cpuinfo_loongarch *c = &current_cpu_data;
+       unsigned int lsize, sets, ways;
+       unsigned int config;
+
+       config = read_cpucfg(LOONGARCH_CPUCFG19);
+       lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE);
+       sets  = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS);
+       ways  = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1;
+
+       c->vcache.linesz = lsize;
+       c->vcache.sets = sets;
+       c->vcache.ways = ways;
+       vcache_size = lsize * sets * ways;
+       c->vcache.waysize = vcache_size / c->vcache.ways;
+
+       pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
+               vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
+}
+
+static void probe_scache(void)
+{
+       struct cpuinfo_loongarch *c = &current_cpu_data;
+       unsigned int lsize, sets, ways;
+       unsigned int config;
+
+       config = read_cpucfg(LOONGARCH_CPUCFG20);
+       lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE);
+       sets  = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS);
+       ways  = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1;
+
+       c->scache.linesz = lsize;
+       c->scache.sets = sets;
+       c->scache.ways = ways;
+       /* 4 cores. scaches are shared */
+       scache_size = lsize * sets * ways;
+       c->scache.waysize = scache_size / c->scache.ways;
+
+       pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+}
+
+void cpu_cache_init(void)
+{
+       probe_pcache();
+       probe_vcache();
+       probe_scache();
+
+       shm_align_mask = PAGE_SIZE - 1;
+}
diff --git a/arch/loongarch/mm/extable.c b/arch/loongarch/mm/extable.c
new file mode 100644 (file)
index 0000000..bc20988
--- /dev/null
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/extable.h>
+#include <linux/spinlock.h>
+#include <asm/branch.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+       const struct exception_table_entry *fixup;
+
+       fixup = search_exception_tables(exception_era(regs));
+       if (fixup) {
+               regs->csr_era = fixup->fixup;
+
+               return 1;
+       }
+
+       return 0;
+}
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
new file mode 100644 (file)
index 0000000..605579b
--- /dev/null
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ */
+#include <linux/context_tracking.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/entry-common.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/ratelimit.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/branch.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+
+int show_unhandled_signals = 1;
+
+static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
+{
+       const int field = sizeof(unsigned long) * 2;
+
+       /* Are we prepared to handle this kernel fault?  */
+       if (fixup_exception(regs))
+               return;
+
+       /*
+        * Oops. The kernel tried to access some bad page. We'll have to
+        * terminate things with extreme prejudice.
+        */
+       bust_spinlocks(1);
+
+       pr_alert("CPU %d Unable to handle kernel paging request at "
+              "virtual address %0*lx, era == %0*lx, ra == %0*lx\n",
+              raw_smp_processor_id(), field, address, field, regs->csr_era,
+              field,  regs->regs[1]);
+       die("Oops", regs);
+}
+
+static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address)
+{
+       /*
+        * We ran out of memory, call the OOM killer, and return the userspace
+        * (which will retry the fault, or kill us if we got oom-killed).
+        */
+       if (!user_mode(regs)) {
+               no_context(regs, address);
+               return;
+       }
+       pagefault_out_of_memory();
+}
+
+static void __kprobes do_sigbus(struct pt_regs *regs,
+               unsigned long write, unsigned long address, int si_code)
+{
+       /* Kernel mode? Handle exceptions or die */
+       if (!user_mode(regs)) {
+               no_context(regs, address);
+               return;
+       }
+
+       /*
+        * Send a sigbus, regardless of whether we were in kernel
+        * or user mode.
+        */
+       current->thread.csr_badvaddr = address;
+       current->thread.trap_nr = read_csr_excode();
+       force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
+}
+
+static void __kprobes do_sigsegv(struct pt_regs *regs,
+               unsigned long write, unsigned long address, int si_code)
+{
+       const int field = sizeof(unsigned long) * 2;
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
+       /* Kernel mode? Handle exceptions or die */
+       if (!user_mode(regs)) {
+               no_context(regs, address);
+               return;
+       }
+
+       /* User mode accesses just cause a SIGSEGV */
+       current->thread.csr_badvaddr = address;
+       if (!write)
+               current->thread.error_code = 1;
+       else
+               current->thread.error_code = 2;
+       current->thread.trap_nr = read_csr_excode();
+
+       if (show_unhandled_signals &&
+           unhandled_signal(current, SIGSEGV) && __ratelimit(&ratelimit_state)) {
+               pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
+                       current->comm,
+                       write ? "write access to" : "read access from",
+                       field, address);
+               pr_info("era = %0*lx in", field,
+                       (unsigned long) regs->csr_era);
+               print_vma_addr(KERN_CONT " ", regs->csr_era);
+               pr_cont("\n");
+               pr_info("ra  = %0*lx in", field,
+                       (unsigned long) regs->regs[1]);
+               print_vma_addr(KERN_CONT " ", regs->regs[1]);
+               pr_cont("\n");
+       }
+       force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+}
+
+/*
+ * This routine handles page faults.  It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+static void __kprobes __do_page_fault(struct pt_regs *regs,
+                       unsigned long write, unsigned long address)
+{
+       int si_code = SEGV_MAPERR;
+       unsigned int flags = FAULT_FLAG_DEFAULT;
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+       struct vm_area_struct *vma = NULL;
+       vm_fault_t fault;
+
+       /*
+        * We fault-in kernel-space virtual memory on-demand. The
+        * 'reference' page table is init_mm.pgd.
+        *
+        * NOTE! We MUST NOT take any locks for this case. We may
+        * be in an interrupt or a critical region, and should
+        * only copy the information from the master page table,
+        * nothing more.
+        */
+       if (address & __UA_LIMIT) {
+               if (!user_mode(regs))
+                       no_context(regs, address);
+               else
+                       do_sigsegv(regs, write, address, si_code);
+               return;
+       }
+
+       /*
+        * If we're in an interrupt or have no user
+        * context, we must not take the fault..
+        */
+       if (faulthandler_disabled() || !mm) {
+               do_sigsegv(regs, write, address, si_code);
+               return;
+       }
+
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+retry:
+       mmap_read_lock(mm);
+       vma = find_vma(mm, address);
+       if (!vma)
+               goto bad_area;
+       if (vma->vm_start <= address)
+               goto good_area;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               goto bad_area;
+       if (!expand_stack(vma, address))
+               goto good_area;
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+       mmap_read_unlock(mm);
+       do_sigsegv(regs, write, address, si_code);
+       return;
+
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+       si_code = SEGV_ACCERR;
+
+       if (write) {
+               flags |= FAULT_FLAG_WRITE;
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto bad_area;
+       } else {
+               if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
+                       goto bad_area;
+               if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
+                       goto bad_area;
+       }
+
+       /*
+        * If for any reason at all we couldn't handle the fault,
+        * make sure we exit gracefully rather than endlessly redo
+        * the fault.
+        */
+       fault = handle_mm_fault(vma, address, flags, regs);
+
+       if (fault_signal_pending(fault, regs)) {
+               if (!user_mode(regs))
+                       no_context(regs, address);
+               return;
+       }
+
+       if (unlikely(fault & VM_FAULT_RETRY)) {
+               flags |= FAULT_FLAG_TRIED;
+
+               /*
+                * No need to mmap_read_unlock(mm) as we would
+                * have already released it in __lock_page_or_retry
+                * in mm/filemap.c.
+                */
+               goto retry;
+       }
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               mmap_read_unlock(mm);
+               if (fault & VM_FAULT_OOM) {
+                       do_out_of_memory(regs, address);
+                       return;
+               } else if (fault & VM_FAULT_SIGSEGV) {
+                       do_sigsegv(regs, write, address, si_code);
+                       return;
+               } else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+                       do_sigbus(regs, write, address, si_code);
+                       return;
+               }
+               BUG();
+       }
+
+       mmap_read_unlock(mm);
+}
+
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+                       unsigned long write, unsigned long address)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+
+       /* Enable interrupt if enabled in parent context */
+       if (likely(regs->csr_prmd & CSR_PRMD_PIE))
+               local_irq_enable();
+
+       __do_page_fault(regs, write, address);
+
+       local_irq_disable();
+
+       irqentry_exit(regs, state);
+}
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
new file mode 100644 (file)
index 0000000..ba13811
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+                     unsigned long addr, unsigned long sz)
+{
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pte_t *pte = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       pud = pud_alloc(mm, p4d, addr);
+       if (pud)
+               pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
+       return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+                      unsigned long sz)
+{
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       if (pgd_present(*pgd)) {
+               p4d = p4d_offset(pgd, addr);
+               if (p4d_present(*p4d)) {
+                       pud = pud_offset(p4d, addr);
+                       if (pud_present(*pud))
+                               pmd = pmd_offset(pud, addr);
+               }
+       }
+       return (pte_t *) pmd;
+}
+
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+       return (pmd_val(pmd) & _PAGE_HUGE) != 0;
+}
+
+int pud_huge(pud_t pud)
+{
+       return (pud_val(pud) & _PAGE_HUGE) != 0;
+}
+
+uint64_t pmd_to_entrylo(unsigned long pmd_val)
+{
+       uint64_t val;
+       /* PMD as PTE. Must be huge page */
+       if (!pmd_huge(__pmd(pmd_val)))
+               panic("%s", __func__);
+
+       val = pmd_val ^ _PAGE_HUGE;
+       val |= ((val & _PAGE_HGLOBAL) >>
+               (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
+
+       return val;
+}
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
new file mode 100644 (file)
index 0000000..7094a68
--- /dev/null
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/memblock.h>
+#include <linux/memremap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pfn.h>
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+#include <linux/initrd.h>
+#include <linux/mmzone.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+
+/*
+ * We have up to 8 empty zeroed pages so we can map one of the right colour
+ * when needed.         Since page is never written to after the initialization we
+ * don't have to care about aliases on other CPUs.
+ */
+unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
+
+void setup_zero_pages(void)
+{
+       unsigned int order, i;
+       struct page *page;
+
+       order = 0;
+
+       empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+       if (!empty_zero_page)
+               panic("Oh boy, that early out of memory?");
+
+       page = virt_to_page((void *)empty_zero_page);
+       split_page(page, order);
+       for (i = 0; i < (1 << order); i++, page++)
+               mark_page_reserved(page);
+
+       zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+       unsigned long vaddr, struct vm_area_struct *vma)
+{
+       void *vfrom, *vto;
+
+       vto = kmap_atomic(to);
+       vfrom = kmap_atomic(from);
+       copy_page(vto, vfrom);
+       kunmap_atomic(vfrom);
+       kunmap_atomic(vto);
+       /* Make sure this page is cleared on other CPU's too before using it */
+       smp_wmb();
+}
+
+int __ref page_is_ram(unsigned long pfn)
+{
+       unsigned long addr = PFN_PHYS(pfn);
+
+       return memblock_is_memory(addr) && !memblock_is_reserved(addr);
+}
+
+#ifndef CONFIG_NUMA
+void __init paging_init(void)
+{
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+
+#ifdef CONFIG_ZONE_DMA
+       max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+       max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+       max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+
+       free_area_init(max_zone_pfns);
+}
+
+void __init mem_init(void)
+{
+       max_mapnr = max_low_pfn;
+       high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+       memblock_free_all();
+       setup_zero_pages();     /* Setup zeroed pages.  */
+}
+#endif /* !CONFIG_NUMA */
+
+void __ref free_initmem(void)
+{
+       free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
+{
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long nr_pages = size >> PAGE_SHIFT;
+       int ret;
+
+       ret = __add_pages(nid, start_pfn, nr_pages, params);
+
+       if (ret)
+               pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
+                               __func__,  ret);
+
+       return ret;
+}
+
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
+{
+       int nid;
+
+       nid = pa_to_nid(start);
+       return nid;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+{
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long nr_pages = size >> PAGE_SHIFT;
+       struct page *page = pfn_to_page(start_pfn);
+
+       /* With altmap the first mapped page is offset from @start */
+       if (altmap)
+               page += vmem_altmap_offset(altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
+}
+#endif
+#endif
+
+/*
+ * Align swapper_pg_dir in to 64K, allows its address to be loaded
+ * with a single LUI instruction in the TLB handlers.  If we used
+ * __aligned(64K), its size would get rounded up to the alignment
+ * size, and waste space.  So we place it in its own section and align
+ * it in the linker script.
+ */
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+
+pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+EXPORT_SYMBOL_GPL(invalid_pmd_table);
+#endif
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pte_table);
diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
new file mode 100644 (file)
index 0000000..73b0980
--- /dev/null
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/io.h>
+
+void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
+{
+       return ((void __iomem *)TO_CACHE(phys_addr));
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
+{
+
+}
+
+void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+{
+       return early_memremap(phys_addr, size);
+}
+
+void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
+                   unsigned long prot_val)
+{
+       return early_memremap(phys_addr, size);
+}
diff --git a/arch/loongarch/mm/maccess.c b/arch/loongarch/mm/maccess.c
new file mode 100644 (file)
index 0000000..5817384
--- /dev/null
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+       /* highest bit set means kernel space */
+       return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
+}
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
new file mode 100644 (file)
index 0000000..52e40f0
--- /dev/null
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/compiler.h>
+#include <linux/elf-randomize.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/export.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+
+unsigned long shm_align_mask = PAGE_SIZE - 1;  /* Sane caches */
+EXPORT_SYMBOL(shm_align_mask);
+
+#define COLOUR_ALIGN(addr, pgoff)                              \
+       ((((addr) + shm_align_mask) & ~shm_align_mask) +        \
+        (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+
+enum mmap_allocation_direction {UP, DOWN};
+
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
+       unsigned long addr0, unsigned long len, unsigned long pgoff,
+       unsigned long flags, enum mmap_allocation_direction dir)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long addr = addr0;
+       int do_color_align;
+       struct vm_unmapped_area_info info;
+
+       if (unlikely(len > TASK_SIZE))
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED) {
+               /* Even MAP_FIXED mappings must reside within TASK_SIZE */
+               if (TASK_SIZE - len < addr)
+                       return -EINVAL;
+
+               /*
+                * We do not accept a shared mapping if it would violate
+                * cache aliasing constraints.
+                */
+               if ((flags & MAP_SHARED) &&
+                   ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+                       return -EINVAL;
+               return addr;
+       }
+
+       do_color_align = 0;
+       if (filp || (flags & MAP_SHARED))
+               do_color_align = 1;
+
+       /* requesting a specific address */
+       if (addr) {
+               if (do_color_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+               else
+                       addr = PAGE_ALIGN(addr);
+
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vm_start_gap(vma)))
+                       return addr;
+       }
+
+       info.length = len;
+       info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
+
+       if (dir == DOWN) {
+               info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+               info.low_limit = PAGE_SIZE;
+               info.high_limit = mm->mmap_base;
+               addr = vm_unmapped_area(&info);
+
+               if (!(addr & ~PAGE_MASK))
+                       return addr;
+
+               /*
+                * A failed mmap() very likely causes application failure,
+                * so fall back to the bottom-up function here. This scenario
+                * can happen with large stack limits and large mmap()
+                * allocations.
+                */
+       }
+
+       info.flags = 0;
+       info.low_limit = mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       return vm_unmapped_area(&info);
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
+       unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       return arch_get_unmapped_area_common(filp,
+                       addr0, len, pgoff, flags, UP);
+}
+
+/*
+ * There is no need to export this but sched.h declares the function as
+ * extern so making it static here results in an error.
+ */
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+       unsigned long addr0, unsigned long len, unsigned long pgoff,
+       unsigned long flags)
+{
+       return arch_get_unmapped_area_common(filp,
+                       addr0, len, pgoff, flags, DOWN);
+}
+
+int __virt_addr_valid(volatile void *kaddr)
+{
+       unsigned long vaddr = (unsigned long)kaddr;
+
+       if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
+               return 0;
+
+       return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+}
+EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S
new file mode 100644 (file)
index 0000000..ddc78ab
--- /dev/null
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/page.h>
+#include <asm/regdef.h>
+
+       .align 5
+SYM_FUNC_START(clear_page)
+       lu12i.w  t0, 1 << (PAGE_SHIFT - 12)
+       add.d    t0, t0, a0
+1:
+       st.d     zero, a0, 0
+       st.d     zero, a0, 8
+       st.d     zero, a0, 16
+       st.d     zero, a0, 24
+       st.d     zero, a0, 32
+       st.d     zero, a0, 40
+       st.d     zero, a0, 48
+       st.d     zero, a0, 56
+       addi.d   a0,   a0, 128
+       st.d     zero, a0, -64
+       st.d     zero, a0, -56
+       st.d     zero, a0, -48
+       st.d     zero, a0, -40
+       st.d     zero, a0, -32
+       st.d     zero, a0, -24
+       st.d     zero, a0, -16
+       st.d     zero, a0, -8
+       bne      t0,   a0, 1b
+
+       jirl     $r0, ra, 0
+SYM_FUNC_END(clear_page)
+EXPORT_SYMBOL(clear_page)
+
+.align 5
+SYM_FUNC_START(copy_page)
+       lu12i.w  t8, 1 << (PAGE_SHIFT - 12)
+       add.d    t8, t8, a0
+1:
+       ld.d     t0, a1,  0
+       ld.d     t1, a1,  8
+       ld.d     t2, a1,  16
+       ld.d     t3, a1,  24
+       ld.d     t4, a1,  32
+       ld.d     t5, a1,  40
+       ld.d     t6, a1,  48
+       ld.d     t7, a1,  56
+
+       st.d     t0, a0,  0
+       st.d     t1, a0,  8
+       ld.d     t0, a1,  64
+       ld.d     t1, a1,  72
+       st.d     t2, a0,  16
+       st.d     t3, a0,  24
+       ld.d     t2, a1,  80
+       ld.d     t3, a1,  88
+       st.d     t4, a0,  32
+       st.d     t5, a0,  40
+       ld.d     t4, a1,  96
+       ld.d     t5, a1,  104
+       st.d     t6, a0,  48
+       st.d     t7, a0,  56
+       ld.d     t6, a1,  112
+       ld.d     t7, a1,  120
+       addi.d   a0, a0,  128
+       addi.d   a1, a1,  128
+
+       st.d     t0, a0,  -64
+       st.d     t1, a0,  -56
+       st.d     t2, a0,  -48
+       st.d     t3, a0,  -40
+       st.d     t4, a0,  -32
+       st.d     t5, a0,  -24
+       st.d     t6, a0,  -16
+       st.d     t7, a0,  -8
+
+       bne      t8, a0, 1b
+       jirl     $r0, ra, 0
+SYM_FUNC_END(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
new file mode 100644 (file)
index 0000000..0569647
--- /dev/null
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *ret, *init;
+
+       ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+       if (ret) {
+               init = pgd_offset(&init_mm, 0UL);
+               pgd_init((unsigned long)ret);
+               memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+                      (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(pgd_alloc);
+
+void pgd_init(unsigned long page)
+{
+       unsigned long *p, *end;
+       unsigned long entry;
+
+#if !defined(__PAGETABLE_PUD_FOLDED)
+       entry = (unsigned long)invalid_pud_table;
+#elif !defined(__PAGETABLE_PMD_FOLDED)
+       entry = (unsigned long)invalid_pmd_table;
+#else
+       entry = (unsigned long)invalid_pte_table;
+#endif
+
+       p = (unsigned long *) page;
+       end = p + PTRS_PER_PGD;
+
+       do {
+               p[0] = entry;
+               p[1] = entry;
+               p[2] = entry;
+               p[3] = entry;
+               p[4] = entry;
+               p += 8;
+               p[-3] = entry;
+               p[-2] = entry;
+               p[-1] = entry;
+       } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pgd_init);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+void pmd_init(unsigned long addr, unsigned long pagetable)
+{
+       unsigned long *p, *end;
+
+       p = (unsigned long *) addr;
+       end = p + PTRS_PER_PMD;
+
+       do {
+               p[0] = pagetable;
+               p[1] = pagetable;
+               p[2] = pagetable;
+               p[3] = pagetable;
+               p[4] = pagetable;
+               p += 8;
+               p[-3] = pagetable;
+               p[-2] = pagetable;
+               p[-1] = pagetable;
+       } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pmd_init);
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_init(unsigned long addr, unsigned long pagetable)
+{
+       unsigned long *p, *end;
+
+       p = (unsigned long *)addr;
+       end = p + PTRS_PER_PUD;
+
+       do {
+               p[0] = pagetable;
+               p[1] = pagetable;
+               p[2] = pagetable;
+               p[3] = pagetable;
+               p[4] = pagetable;
+               p += 8;
+               p[-3] = pagetable;
+               p[-2] = pagetable;
+               p[-1] = pagetable;
+       } while (p != end);
+}
+#endif
+
+pmd_t mk_pmd(struct page *page, pgprot_t prot)
+{
+       pmd_t pmd;
+
+       pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+
+       return pmd;
+}
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+               pmd_t *pmdp, pmd_t pmd)
+{
+       *pmdp = pmd;
+       flush_tlb_all();
+}
+
+void __init pagetable_init(void)
+{
+       /* Initialize the entire pgd.  */
+       pgd_init((unsigned long)swapper_pg_dir);
+       pgd_init((unsigned long)invalid_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+       pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+       pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
+#endif
+}
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
new file mode 100644 (file)
index 0000000..e272f8a
--- /dev/null
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/export.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+void local_flush_tlb_all(void)
+{
+       invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_all);
+
+void local_flush_tlb_user(void)
+{
+       invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_user);
+
+void local_flush_tlb_kernel(void)
+{
+       invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_kernel);
+
+/*
+ * All entries common to a mm share an asid. To effectively flush
+ * these entries, we just bump the asid.
+ */
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+       int cpu;
+
+       preempt_disable();
+
+       cpu = smp_processor_id();
+
+       if (asid_valid(mm, cpu))
+               drop_mmu_context(mm, cpu);
+       else
+               cpumask_clear_cpu(cpu, mm_cpumask(mm));
+
+       preempt_enable();
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+       unsigned long end)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       int cpu = smp_processor_id();
+
+       if (asid_valid(mm, cpu)) {
+               unsigned long size, flags;
+
+               local_irq_save(flags);
+               start = round_down(start, PAGE_SIZE << 1);
+               end = round_up(end, PAGE_SIZE << 1);
+               size = (end - start) >> (PAGE_SHIFT + 1);
+               if (size <= (current_cpu_data.tlbsizestlbsets ?
+                            current_cpu_data.tlbsize / 8 :
+                            current_cpu_data.tlbsize / 2)) {
+                       int asid = cpu_asid(cpu, mm);
+
+                       while (start < end) {
+                               invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
+                               start += (PAGE_SIZE << 1);
+                       }
+               } else {
+                       drop_mmu_context(mm, cpu);
+               }
+               local_irq_restore(flags);
+       } else {
+               cpumask_clear_cpu(cpu, mm_cpumask(mm));
+       }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       unsigned long size, flags;
+
+       local_irq_save(flags);
+       size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+       size = (size + 1) >> 1;
+       if (size <= (current_cpu_data.tlbsizestlbsets ?
+                    current_cpu_data.tlbsize / 8 :
+                    current_cpu_data.tlbsize / 2)) {
+
+               start &= (PAGE_MASK << 1);
+               end += ((PAGE_SIZE << 1) - 1);
+               end &= (PAGE_MASK << 1);
+
+               while (start < end) {
+                       invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
+                       start += (PAGE_SIZE << 1);
+               }
+       } else {
+               local_flush_tlb_kernel();
+       }
+       local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+       int cpu = smp_processor_id();
+
+       if (asid_valid(vma->vm_mm, cpu)) {
+               int newpid;
+
+               newpid = cpu_asid(cpu, vma->vm_mm);
+               page &= (PAGE_MASK << 1);
+               invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
+       } else {
+               cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
+       }
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void local_flush_tlb_one(unsigned long page)
+{
+       page &= (PAGE_MASK << 1);
+       invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
+}
+
+static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+       int idx;
+       unsigned long lo;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       address &= (PAGE_MASK << 1);
+       write_csr_entryhi(address);
+       tlb_probe();
+       idx = read_csr_tlbidx();
+       write_csr_pagesize(PS_HUGE_SIZE);
+       lo = pmd_to_entrylo(pte_val(*ptep));
+       write_csr_entrylo0(lo);
+       write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
+
+       if (idx < 0)
+               tlb_write_random();
+       else
+               tlb_write_indexed();
+       write_csr_pagesize(PS_DEFAULT_SIZE);
+
+       local_irq_restore(flags);
+#endif
+}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+       int idx;
+       unsigned long flags;
+
+       /*
+        * Handle debugger faulting in for debugee.
+        */
+       if (current->active_mm != vma->vm_mm)
+               return;
+
+       if (pte_val(*ptep) & _PAGE_HUGE) {
+               __update_hugetlb(vma, address, ptep);
+               return;
+       }
+
+       local_irq_save(flags);
+
+       if ((unsigned long)ptep & sizeof(pte_t))
+               ptep--;
+
+       address &= (PAGE_MASK << 1);
+       write_csr_entryhi(address);
+       tlb_probe();
+       idx = read_csr_tlbidx();
+       write_csr_pagesize(PS_DEFAULT_SIZE);
+       write_csr_entrylo0(pte_val(*ptep++));
+       write_csr_entrylo1(pte_val(*ptep));
+       if (idx < 0)
+               tlb_write_random();
+       else
+               tlb_write_indexed();
+
+       local_irq_restore(flags);
+}
+
+static void setup_ptwalker(void)
+{
+       unsigned long pwctl0, pwctl1;
+       unsigned long pgd_i = 0, pgd_w = 0;
+       unsigned long pud_i = 0, pud_w = 0;
+       unsigned long pmd_i = 0, pmd_w = 0;
+       unsigned long pte_i = 0, pte_w = 0;
+
+       pgd_i = PGDIR_SHIFT;
+       pgd_w = PAGE_SHIFT - 3;
+#if CONFIG_PGTABLE_LEVELS > 3
+       pud_i = PUD_SHIFT;
+       pud_w = PAGE_SHIFT - 3;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+       pmd_i = PMD_SHIFT;
+       pmd_w = PAGE_SHIFT - 3;
+#endif
+       pte_i = PAGE_SHIFT;
+       pte_w = PAGE_SHIFT - 3;
+
+       pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
+       pwctl1 = pgd_i | pgd_w << 6;
+
+       csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
+       csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
+       csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
+       csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+       csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
+}
+
+static void output_pgtable_bits_defines(void)
+{
+#define pr_define(fmt, ...)                                    \
+       pr_debug("#define " fmt, ##__VA_ARGS__)
+
+       pr_debug("#include <asm/asm.h>\n");
+       pr_debug("#include <asm/regdef.h>\n");
+       pr_debug("\n");
+
+       pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
+       pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
+       pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+       pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
+       pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
+       pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+       pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
+       pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+       pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+       pr_debug("\n");
+}
+
+#ifdef CONFIG_NUMA
+static unsigned long pcpu_handlers[NR_CPUS];
+#endif
+extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+
+void setup_tlb_handler(int cpu)
+{
+       setup_ptwalker();
+       output_pgtable_bits_defines();
+
+       /* The tlb handlers are generated only once */
+       if (cpu == 0) {
+               memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
+               local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
+               set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
+               set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
+               set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
+               set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
+               set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
+               set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
+               set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
+       }
+#ifdef CONFIG_NUMA
+       else {
+               void *addr;
+               struct page *page;
+               const int vec_sz = sizeof(exception_handlers);
+
+               if (pcpu_handlers[cpu])
+                       return;
+
+               page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, get_order(vec_sz));
+               if (!page)
+                       return;
+
+               addr = page_address(page);
+               pcpu_handlers[cpu] = virt_to_phys(addr);
+               memcpy((void *)addr, (void *)eentry, vec_sz);
+               local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
+               csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_TLBRENTRY);
+               csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
+       }
+#endif
+}
+
+void tlb_init(int cpu)
+{
+       write_csr_pagesize(PS_DEFAULT_SIZE);
+       write_csr_stlbpgsize(PS_DEFAULT_SIZE);
+       write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
+       setup_tlb_handler(cpu);
+       local_flush_tlb_all();
+}
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
new file mode 100644 (file)
index 0000000..7eee402
--- /dev/null
@@ -0,0 +1,546 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+       .macro tlb_do_page_fault, write
+       SYM_FUNC_START(tlb_do_page_fault_\write)
+       SAVE_ALL
+       csrrd   a2, LOONGARCH_CSR_BADV
+       move    a0, sp
+       REG_S   a2, sp, PT_BVADDR
+       li.w    a1, \write
+       la.abs  t0, do_page_fault
+       jirl    ra, t0, 0
+       RESTORE_ALL_AND_RET
+       SYM_FUNC_END(tlb_do_page_fault_\write)
+       .endm
+
+       tlb_do_page_fault 0
+       tlb_do_page_fault 1
+
+SYM_FUNC_START(handle_tlb_protect)
+       BACKUP_T0T1
+       SAVE_ALL
+       move    a0, sp
+       move    a1, zero
+       csrrd   a2, LOONGARCH_CSR_BADV
+       REG_S   a2, sp, PT_BVADDR
+       la.abs  t0, do_page_fault
+       jirl    ra, t0, 0
+       RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_tlb_protect)
+
+SYM_FUNC_START(handle_tlb_load)
+       csrwr   t0, EXCEPTION_KS0
+       csrwr   t1, EXCEPTION_KS1
+       csrwr   ra, EXCEPTION_KS2
+
+       /*
+        * The vmalloc handling is not in the hotpath.
+        */
+       csrrd   t0, LOONGARCH_CSR_BADV
+       blt     t0, $r0, vmalloc_load
+       csrrd   t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_load:
+       /* Get PGD offset in bytes */
+       srli.d  t0, t0, PGDIR_SHIFT
+       andi    t0, t0, (PTRS_PER_PGD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#if CONFIG_PGTABLE_LEVELS > 3
+       csrrd   t0, LOONGARCH_CSR_BADV
+       ld.d    t1, t1, 0
+       srli.d  t0, t0, PUD_SHIFT
+       andi    t0, t0, (PTRS_PER_PUD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+       csrrd   t0, LOONGARCH_CSR_BADV
+       ld.d    t1, t1, 0
+       srli.d  t0, t0, PMD_SHIFT
+       andi    t0, t0, (PTRS_PER_PMD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#endif
+       ld.d    ra, t1, 0
+
+       /*
+        * For huge tlb entries, pmde doesn't contain an address but
+        * instead contains the tlb pte. Check the PAGE_HUGE bit and
+        * see if we need to jump to huge tlb processing.
+        */
+       andi    t0, ra, _PAGE_HUGE
+       bne     t0, $r0, tlb_huge_update_load
+
+       csrrd   t0, LOONGARCH_CSR_BADV
+       srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
+       andi    t0, t0, (PTRS_PER_PTE - 1)
+       slli.d  t0, t0, _PTE_T_LOG2
+       add.d   t1, ra, t0
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_load:
+#endif
+#ifdef CONFIG_SMP
+       ll.d    t0, t1, 0
+#else
+       ld.d    t0, t1, 0
+#endif
+       tlbsrch
+
+       srli.d  ra, t0, _PAGE_PRESENT_SHIFT
+       andi    ra, ra, 1
+       beq     ra, $r0, nopage_tlb_load
+
+       ori     t0, t0, _PAGE_VALID
+#ifdef CONFIG_SMP
+       sc.d    t0, t1, 0
+       beq     t0, $r0, smp_pgtable_change_load
+#else
+       st.d    t0, t1, 0
+#endif
+       ori     t1, t1, 8
+       xori    t1, t1, 8
+       ld.d    t0, t1, 0
+       ld.d    t1, t1, 8
+       csrwr   t0, LOONGARCH_CSR_TLBELO0
+       csrwr   t1, LOONGARCH_CSR_TLBELO1
+       tlbwr
+leave_load:
+       csrrd   t0, EXCEPTION_KS0
+       csrrd   t1, EXCEPTION_KS1
+       csrrd   ra, EXCEPTION_KS2
+       ertn
+#ifdef CONFIG_64BIT
+vmalloc_load:
+       la.abs  t1, swapper_pg_dir
+       b       vmalloc_done_load
+#endif
+
+       /*
+        * This is the entry point when build_tlbchange_handler_head
+        * spots a huge page.
+        */
+tlb_huge_update_load:
+#ifdef CONFIG_SMP
+       ll.d    t0, t1, 0
+#else
+       ld.d    t0, t1, 0
+#endif
+       srli.d  ra, t0, _PAGE_PRESENT_SHIFT
+       andi    ra, ra, 1
+       beq     ra, $r0, nopage_tlb_load
+       tlbsrch
+
+       ori     t0, t0, _PAGE_VALID
+#ifdef CONFIG_SMP
+       sc.d    t0, t1, 0
+       beq     t0, $r0, tlb_huge_update_load
+       ld.d    t0, t1, 0
+#else
+       st.d    t0, t1, 0
+#endif
+       addu16i.d       t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
+       addi.d  ra, t1, 0
+       csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
+       tlbwr
+
+       csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+
+       /*
+        * A huge PTE describes an area the size of the
+        * configured huge page size. This is twice the
+        * of the large TLB entry size we intend to use.
+        * A TLB entry half the size of the configured
+        * huge page size is configured into entrylo0
+        * and entrylo1 to cover the contiguous huge PTE
+        * address space.
+        */
+       /* Huge page: Move Global bit */
+       xori    t0, t0, _PAGE_HUGE
+       lu12i.w t1, _PAGE_HGLOBAL >> 12
+       and     t1, t0, t1
+       srli.d  t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+       or      t0, t0, t1
+
+       addi.d  ra, t0, 0
+       csrwr   t0, LOONGARCH_CSR_TLBELO0
+       addi.d  t0, ra, 0
+
+       /* Convert to entrylo1 */
+       addi.d  t1, $r0, 1
+       slli.d  t1, t1, (HPAGE_SHIFT - 1)
+       add.d   t0, t0, t1
+       csrwr   t0, LOONGARCH_CSR_TLBELO1
+
+       /* Set huge page tlb entry size */
+       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
+
+       tlbfill
+
+       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
+
+nopage_tlb_load:
+       dbar    0
+       csrrd   ra, EXCEPTION_KS2
+       la.abs  t0, tlb_do_page_fault_0
+       jirl    $r0, t0, 0
+SYM_FUNC_END(handle_tlb_load)
+
+SYM_FUNC_START(handle_tlb_store)
+       csrwr   t0, EXCEPTION_KS0
+       csrwr   t1, EXCEPTION_KS1
+       csrwr   ra, EXCEPTION_KS2
+
+       /*
+        * The vmalloc handling is not in the hotpath.
+        */
+       csrrd   t0, LOONGARCH_CSR_BADV
+       blt     t0, $r0, vmalloc_store
+       csrrd   t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_store:
+       /* Get PGD offset in bytes */
+       srli.d  t0, t0, PGDIR_SHIFT
+       andi    t0, t0, (PTRS_PER_PGD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+
+#if CONFIG_PGTABLE_LEVELS > 3
+       csrrd   t0, LOONGARCH_CSR_BADV
+       ld.d    t1, t1, 0
+       srli.d  t0, t0, PUD_SHIFT
+       andi    t0, t0, (PTRS_PER_PUD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+       csrrd   t0, LOONGARCH_CSR_BADV
+       ld.d    t1, t1, 0
+       srli.d  t0, t0, PMD_SHIFT
+       andi    t0, t0, (PTRS_PER_PMD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#endif
+       ld.d    ra, t1, 0
+
+       /*
+        * For huge tlb entries, pmde doesn't contain an address but
+        * instead contains the tlb pte. Check the PAGE_HUGE bit and
+        * see if we need to jump to huge tlb processing.
+        */
+       andi    t0, ra, _PAGE_HUGE
+       bne     t0, $r0, tlb_huge_update_store
+
+       csrrd   t0, LOONGARCH_CSR_BADV
+       srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
+       andi    t0, t0, (PTRS_PER_PTE - 1)
+       slli.d  t0, t0, _PTE_T_LOG2
+       add.d   t1, ra, t0
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_store:
+#endif
+#ifdef CONFIG_SMP
+       ll.d    t0, t1, 0
+#else
+       ld.d    t0, t1, 0
+#endif
+       tlbsrch
+
+       srli.d  ra, t0, _PAGE_PRESENT_SHIFT
+       andi    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+       xori    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+       bne     ra, $r0, nopage_tlb_store
+
+       ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#ifdef CONFIG_SMP
+       sc.d    t0, t1, 0
+       beq     t0, $r0, smp_pgtable_change_store
+#else
+       st.d    t0, t1, 0
+#endif
+
+       ori     t1, t1, 8
+       xori    t1, t1, 8
+       ld.d    t0, t1, 0
+       ld.d    t1, t1, 8
+       csrwr   t0, LOONGARCH_CSR_TLBELO0
+       csrwr   t1, LOONGARCH_CSR_TLBELO1
+       tlbwr
+leave_store:
+       csrrd   t0, EXCEPTION_KS0
+       csrrd   t1, EXCEPTION_KS1
+       csrrd   ra, EXCEPTION_KS2
+       ertn
+#ifdef CONFIG_64BIT
+vmalloc_store:
+       la.abs  t1, swapper_pg_dir
+       b       vmalloc_done_store
+#endif
+
+       /*
+        * This is the entry point when build_tlbchange_handler_head
+        * spots a huge page.
+        */
+tlb_huge_update_store:
+#ifdef CONFIG_SMP
+       ll.d    t0, t1, 0
+#else
+       ld.d    t0, t1, 0
+#endif
+       srli.d  ra, t0, _PAGE_PRESENT_SHIFT
+       andi    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+       xori    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
+       bne     ra, $r0, nopage_tlb_store
+
+       tlbsrch
+       ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+
+#ifdef CONFIG_SMP
+       sc.d    t0, t1, 0
+       beq     t0, $r0, tlb_huge_update_store
+       ld.d    t0, t1, 0
+#else
+       st.d    t0, t1, 0
+#endif
+       addu16i.d       t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
+       addi.d  ra, t1, 0
+       csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
+       tlbwr
+
+       csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+       /*
+        * A huge PTE describes an area the size of the
+        * configured huge page size. This is twice the
+        * of the large TLB entry size we intend to use.
+        * A TLB entry half the size of the configured
+        * huge page size is configured into entrylo0
+        * and entrylo1 to cover the contiguous huge PTE
+        * address space.
+        */
+       /* Huge page: Move Global bit */
+       xori    t0, t0, _PAGE_HUGE
+       lu12i.w t1, _PAGE_HGLOBAL >> 12
+       and     t1, t0, t1
+       srli.d  t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+       or      t0, t0, t1
+
+       addi.d  ra, t0, 0
+       csrwr   t0, LOONGARCH_CSR_TLBELO0
+       addi.d  t0, ra, 0
+
+       /* Convert to entrylo1 */
+       addi.d  t1, $r0, 1
+       slli.d  t1, t1, (HPAGE_SHIFT - 1)
+       add.d   t0, t0, t1
+       csrwr   t0, LOONGARCH_CSR_TLBELO1
+
+       /* Set huge page tlb entry size */
+       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
+
+       tlbfill
+
+       /* Reset default page size */
+       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
+
+nopage_tlb_store:
+       dbar    0
+       csrrd   ra, EXCEPTION_KS2
+       la.abs  t0, tlb_do_page_fault_1
+       jirl    $r0, t0, 0
+SYM_FUNC_END(handle_tlb_store)
+
+SYM_FUNC_START(handle_tlb_modify)
+       csrwr   t0, EXCEPTION_KS0
+       csrwr   t1, EXCEPTION_KS1
+       csrwr   ra, EXCEPTION_KS2
+
+       /*
+        * The vmalloc handling is not in the hotpath.
+        */
+       csrrd   t0, LOONGARCH_CSR_BADV
+       blt     t0, $r0, vmalloc_modify
+       csrrd   t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_modify:
+       /* Get PGD offset in bytes */
+       srli.d  t0, t0, PGDIR_SHIFT
+       andi    t0, t0, (PTRS_PER_PGD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#if CONFIG_PGTABLE_LEVELS > 3
+       csrrd   t0, LOONGARCH_CSR_BADV
+       ld.d    t1, t1, 0
+       srli.d  t0, t0, PUD_SHIFT
+       andi    t0, t0, (PTRS_PER_PUD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+       csrrd   t0, LOONGARCH_CSR_BADV
+       ld.d    t1, t1, 0
+       srli.d  t0, t0, PMD_SHIFT
+       andi    t0, t0, (PTRS_PER_PMD - 1)
+       slli.d  t0, t0, 3
+       add.d   t1, t1, t0
+#endif
+       ld.d    ra, t1, 0
+
+       /*
+        * For huge tlb entries, pmde doesn't contain an address but
+        * instead contains the tlb pte. Check the PAGE_HUGE bit and
+        * see if we need to jump to huge tlb processing.
+        */
+       andi    t0, ra, _PAGE_HUGE
+       bne     t0, $r0, tlb_huge_update_modify
+
+       csrrd   t0, LOONGARCH_CSR_BADV
+       srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
+       andi    t0, t0, (PTRS_PER_PTE - 1)
+       slli.d  t0, t0, _PTE_T_LOG2
+       add.d   t1, ra, t0
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_modify:
+#endif
+#ifdef CONFIG_SMP
+       ll.d    t0, t1, 0
+#else
+       ld.d    t0, t1, 0
+#endif
+       tlbsrch
+
+       srli.d  ra, t0, _PAGE_WRITE_SHIFT
+       andi    ra, ra, 1
+       beq     ra, $r0, nopage_tlb_modify
+
+       ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#ifdef CONFIG_SMP
+       sc.d    t0, t1, 0
+       beq     t0, $r0, smp_pgtable_change_modify
+#else
+       st.d    t0, t1, 0
+#endif
+       ori     t1, t1, 8
+       xori    t1, t1, 8
+       ld.d    t0, t1, 0
+       ld.d    t1, t1, 8
+       csrwr   t0, LOONGARCH_CSR_TLBELO0
+       csrwr   t1, LOONGARCH_CSR_TLBELO1
+       tlbwr
+leave_modify:
+       csrrd   t0, EXCEPTION_KS0
+       csrrd   t1, EXCEPTION_KS1
+       csrrd   ra, EXCEPTION_KS2
+       ertn
+#ifdef CONFIG_64BIT
+vmalloc_modify:
+       la.abs  t1, swapper_pg_dir
+       b       vmalloc_done_modify
+#endif
+
+       /*
+        * This is the entry point when
+        * build_tlbchange_handler_head spots a huge page.
+        */
+tlb_huge_update_modify:
+#ifdef CONFIG_SMP
+       ll.d    t0, t1, 0
+#else
+       ld.d    t0, t1, 0
+#endif
+
+       srli.d  ra, t0, _PAGE_WRITE_SHIFT
+       andi    ra, ra, 1
+       beq     ra, $r0, nopage_tlb_modify
+
+       tlbsrch
+       ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+
+#ifdef CONFIG_SMP
+       sc.d    t0, t1, 0
+       beq     t0, $r0, tlb_huge_update_modify
+       ld.d    t0, t1, 0
+#else
+       st.d    t0, t1, 0
+#endif
+       /*
+        * A huge PTE describes an area the size of the
+        * configured huge page size. This is twice the
+        * of the large TLB entry size we intend to use.
+        * A TLB entry half the size of the configured
+        * huge page size is configured into entrylo0
+        * and entrylo1 to cover the contiguous huge PTE
+        * address space.
+        */
+       /* Huge page: Move Global bit */
+       xori    t0, t0, _PAGE_HUGE
+       lu12i.w t1, _PAGE_HGLOBAL >> 12
+       and     t1, t0, t1
+       srli.d  t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+       or      t0, t0, t1
+
+       addi.d  ra, t0, 0
+       csrwr   t0, LOONGARCH_CSR_TLBELO0
+       addi.d  t0, ra, 0
+
+       /* Convert to entrylo1 */
+       addi.d  t1, $r0, 1
+       slli.d  t1, t1, (HPAGE_SHIFT - 1)
+       add.d   t0, t0, t1
+       csrwr   t0, LOONGARCH_CSR_TLBELO1
+
+       /* Set huge page tlb entry size */
+       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+       tlbwr
+
+       /* Reset default page size */
+       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+nopage_tlb_modify:
+       dbar    0
+       csrrd   ra, EXCEPTION_KS2
+       la.abs  t0, tlb_do_page_fault_1
+       jirl    $r0, t0, 0
+SYM_FUNC_END(handle_tlb_modify)
+
+SYM_FUNC_START(handle_tlb_refill)
+       csrwr   t0, LOONGARCH_CSR_TLBRSAVE
+       csrrd   t0, LOONGARCH_CSR_PGD
+       lddir   t0, t0, 3
+#if CONFIG_PGTABLE_LEVELS > 3
+       lddir   t0, t0, 2
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+       lddir   t0, t0, 1
+#endif
+       ldpte   t0, 0
+       ldpte   t0, 1
+       tlbfill
+       csrrd   t0, LOONGARCH_CSR_TLBRSAVE
+       ertn
+SYM_FUNC_END(handle_tlb_refill)
diff --git a/arch/loongarch/pci/Makefile b/arch/loongarch/pci/Makefile
new file mode 100644 (file)
index 0000000..8101ef3
--- /dev/null
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PCI specific kernel interface routines under Linux.
+#
+
+obj-y                          += pci.o
+obj-$(CONFIG_ACPI)             += acpi.o
diff --git a/arch/loongarch/vdso/.gitignore b/arch/loongarch/vdso/.gitignore
new file mode 100644 (file)
index 0000000..652e31d
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso.lds
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
new file mode 100644 (file)
index 0000000..6b6e167
--- /dev/null
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0
+# Objects to go into the VDSO.
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_LARCH_32|R_LARCH_64|R_LARCH_MARK_LA|R_LARCH_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
+
+obj-vdso-y := elf.o vgettimeofday.o sigreturn.o
+
+# Common compiler flags between ABIs.
+ccflags-vdso := \
+       $(filter -I%,$(KBUILD_CFLAGS)) \
+       $(filter -E%,$(KBUILD_CFLAGS)) \
+       $(filter -march=%,$(KBUILD_CFLAGS)) \
+       $(filter -m%-float,$(KBUILD_CFLAGS)) \
+       -D__VDSO__
+
+ifeq ($(cc-name),clang)
+ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+endif
+
+cflags-vdso := $(ccflags-vdso) \
+       $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
+       -O2 -g -fno-strict-aliasing -fno-common -fno-builtin -G0 \
+       -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
+       $(call cc-option, -fno-asynchronous-unwind-tables) \
+       $(call cc-option, -fno-stack-protector)
+aflags-vdso := $(ccflags-vdso) \
+       -D__ASSEMBLY__ -Wa,-gdwarf-2
+
+ifneq ($(c-gettimeofday-y),)
+  CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
+# VDSO linker flags.
+ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+       $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
+       --hash-style=sysv --build-id -T
+
+GCOV_PROFILE := n
+
+#
+# Shared build commands.
+#
+
+quiet_cmd_vdsold_and_vdso_check = LD      $@
+      cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
+
+quiet_cmd_vdsoas_o_S = AS       $@
+      cmd_vdsoas_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+      cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+       $(call if_changed,vdsosym)
+
+#
+# Build native VDSO.
+#
+
+native-abi := $(filter -mabi=%,$(KBUILD_CFLAGS))
+
+targets += $(obj-vdso-y)
+targets += vdso.lds vdso.so.dbg vdso.so
+
+obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
+
+$(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
+$(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
+
+$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
+
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+       $(call if_changed,vdsold_and_vdso_check)
+
+$(obj)/vdso.so: OBJCOPYFLAGS := -S
+$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
+       $(call if_changed,objcopy)
+
+obj-y += vdso.o
+
+$(obj)/vdso.o : $(obj)/vdso.so
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+      cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+       @mkdir -p $(MODLIB)/vdso
+       $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/loongarch/vdso/elf.S b/arch/loongarch/vdso/elf.S
new file mode 100644 (file)
index 0000000..9bb21b9
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/vdso/vdso.h>
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+       .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/loongarch/vdso/gen_vdso_offsets.sh b/arch/loongarch/vdso/gen_vdso_offsets.sh
new file mode 100755 (executable)
index 0000000..1bb4e12
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Derived from RISC-V and ARM64:
+# Author: Will Deacon <will.deacon@arm.com>
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+
+LC_ALL=C sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/loongarch/vdso/sigreturn.S b/arch/loongarch/vdso/sigreturn.S
new file mode 100644 (file)
index 0000000..9cb3c58
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/vdso/vdso.h>
+
+#include <linux/linkage.h>
+#include <uapi/asm/unistd.h>
+
+#include <asm/regdef.h>
+#include <asm/asm.h>
+
+       .section        .text
+       .cfi_sections   .debug_frame
+
+SYM_FUNC_START(__vdso_rt_sigreturn)
+
+       li.w    a7, __NR_rt_sigreturn
+       syscall 0
+
+SYM_FUNC_END(__vdso_rt_sigreturn)
diff --git a/arch/loongarch/vdso/vdso.S b/arch/loongarch/vdso/vdso.S
new file mode 100644 (file)
index 0000000..46789ba
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from RISC-V:
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+       __PAGE_ALIGNED_DATA
+
+       .globl vdso_start, vdso_end
+       .balign PAGE_SIZE
+vdso_start:
+       .incbin "arch/loongarch/vdso/vdso.so"
+       .balign PAGE_SIZE
+vdso_end:
+
+       .previous
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
new file mode 100644 (file)
index 0000000..955f02d
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
+
+OUTPUT_ARCH(loongarch)
+
+SECTIONS
+{
+       PROVIDE(_start = .);
+       . = SIZEOF_HEADERS;
+
+       .hash           : { *(.hash) }                  :text
+       .gnu.hash       : { *(.gnu.hash) }
+       .dynsym         : { *(.dynsym) }
+       .dynstr         : { *(.dynstr) }
+       .gnu.version    : { *(.gnu.version) }
+       .gnu.version_d  : { *(.gnu.version_d) }
+       .gnu.version_r  : { *(.gnu.version_r) }
+
+       .note           : { *(.note.*) }                :text :note
+
+       .text           : { *(.text*) }                 :text
+       PROVIDE (__etext = .);
+       PROVIDE (_etext = .);
+       PROVIDE (etext = .);
+
+       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text :eh_frame_hdr
+       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
+
+       .dynamic        : { *(.dynamic) }               :text :dynamic
+
+       .rodata         : { *(.rodata*) }               :text
+
+       _end = .;
+       PROVIDE(end = .);
+
+       /DISCARD/       : {
+               *(.gnu.attributes)
+               *(.note.GNU-stack)
+               *(.data .data.* .gnu.linkonce.d.* .sdata*)
+               *(.bss .sbss .dynbss .dynsbss)
+       }
+}
+
+PHDRS
+{
+       text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+       dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
+       note            PT_NOTE         FLAGS(4);               /* PF_R */
+       eh_frame_hdr    PT_GNU_EH_FRAME;
+}
+
+VERSION
+{
+       LINUX_5.10 {
+       global:
+               __vdso_clock_getres;
+               __vdso_clock_gettime;
+               __vdso_gettimeofday;
+               __vdso_rt_sigreturn;
+       local: *;
+       };
+}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_sigreturn         = __vdso_rt_sigreturn;
diff --git a/arch/loongarch/vdso/vgettimeofday.c b/arch/loongarch/vdso/vgettimeofday.c
new file mode 100644 (file)
index 0000000..b1f4548
--- /dev/null
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LoongArch userspace implementations of gettimeofday() and similar.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock,
+                        struct __kernel_timespec *ts)
+{
+       return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+                       struct timezone *tz)
+{
+       return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id,
+                       struct __kernel_timespec *res)
+{
+       return __cvdso_clock_getres(clock_id, res);
+}
index d1e93a3..d5c0b29 100644 (file)
@@ -56,16 +56,6 @@ config ATARI_ROM_ISA
          The only driver currently using this adapter is the EtherNEC
          driver for RTL8019AS based NE2000 compatible network cards.
 
-config GENERIC_ISA_DMA
-       def_bool ISA
-
 source "drivers/zorro/Kconfig"
 
 endif
-
-if COLDFIRE
-
-config ISA_DMA_API
-       def_bool !M5272
-
-endif
index 3d5da25..f3aa441 100644 (file)
@@ -37,7 +37,7 @@ endchoice
 if M68KCLASSIC
 
 config M68000
-       bool
+       def_bool y
        depends on !MMU
        select CPU_HAS_NO_BITFIELDS
        select CPU_HAS_NO_CAS
index 188a8f8..a104256 100644 (file)
@@ -352,6 +352,7 @@ comment "Machine Options"
 
 config UBOOT
        bool "Support for U-Boot command line parameters"
+       depends on COLDFIRE
        help
          If you say Y here kernel will try to collect command
          line parameters from the initial u-boot stack.
index a3e18d7..9419a6c 100644 (file)
@@ -15,7 +15,7 @@
 
 asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
 
-obj-$(CONFIG_COLDFIRE) += cache.o clk.o device.o dma.o entry.o vectors.o
+obj-$(CONFIG_COLDFIRE) += cache.o clk.o device.o entry.o vectors.o
 obj-$(CONFIG_M5206)    += m5206.o intc.o reset.o
 obj-$(CONFIG_M5206e)   += m5206.o intc.o reset.o
 obj-$(CONFIG_M520x)    += m520x.o intc-simr.o reset.o
diff --git a/arch/m68k/coldfire/dma.c b/arch/m68k/coldfire/dma.c
deleted file mode 100644 (file)
index c3279f7..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/***************************************************************************/
-
-/*
- *     dma.c -- Freescale ColdFire DMA support
- *
- *     Copyright (C) 2007, Greg Ungerer (gerg@snapgear.com)
- */
-
-/***************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/dma.h>
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfdma.h>
-
-/***************************************************************************/
-
-/*
- *      DMA channel base address table.
- */
-unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = {
-#ifdef MCFDMA_BASE0
-       MCFDMA_BASE0,
-#endif
-#ifdef MCFDMA_BASE1
-       MCFDMA_BASE1,
-#endif
-#ifdef MCFDMA_BASE2
-       MCFDMA_BASE2,
-#endif
-#ifdef MCFDMA_BASE3
-       MCFDMA_BASE3,
-#endif
-};
-EXPORT_SYMBOL(dma_base_addr);
-
-unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
-EXPORT_SYMBOL(dma_device_address);
-
-/***************************************************************************/
index cce2574..20c084e 100644 (file)
@@ -28,7 +28,7 @@
 unsigned char mcf_irq2imr[NR_IRQS];
 
 /*
- * Define the miniumun and maximum external interrupt numbers.
+ * Define the minimum and maximum external interrupt numbers.
  * This is also used as the "level" interrupt numbers.
  */
 #define        EIRQ1   25
index bd033e1..17af5f6 100644 (file)
@@ -532,7 +532,7 @@ int clock_pll(int fsys, int flags)
                writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_CKE,
                        MCF_SDRAMC_SDCR);
 
-       /* Errata - workaround for SDRAM opeartion after exiting LIMP mode */
+       /* Errata - workaround for SDRAM operation after exiting LIMP mode */
        writel(MCF_SDRAMC_REFRESH, MCF_SDRAMC_LIMP_FIX);
 
        /* wait for DQS logic to relock */
index 84eab0f..ceb5775 100644 (file)
@@ -31,7 +31,7 @@ static struct pci_bus *rootbus;
 static unsigned long iospace;
 
 /*
- * We need to be carefull probing on bus 0 (directly connected to host
+ * We need to be careful probing on bus 0 (directly connected to host
  * bridge). We should only access the well defined possible devices in
  * use, ignore aliases and the like.
  */
index 71b78ec..b19dc00 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/reboot.h>
 #include <linux/io.h>
 #include <asm/machdep.h>
 #include <asm/natfeat.h>
@@ -90,5 +91,5 @@ void __init nf_init(void)
        pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16,
                version & 0xffff);
 
-       mach_power_off = nf_poweroff;
+       register_platform_power_off(nf_poweroff);
 }
index 2c92843..e4bd691 100644 (file)
@@ -240,12 +240,6 @@ static int hp300_hwclk(int op, struct rtc_time *t)
        return 0;
 }
 
-static unsigned int hp300_get_ss(void)
-{
-       return hp300_rtc_read(RTC_REG_SEC1) * 10 +
-               hp300_rtc_read(RTC_REG_SEC2);
-}
-
 static void __init hp300_init_IRQ(void)
 {
 }
@@ -256,7 +250,6 @@ void __init config_hp300(void)
        mach_init_IRQ        = hp300_init_IRQ;
        mach_get_model       = hp300_get_model;
        mach_hwclk           = hp300_hwclk;
-       mach_get_ss          = hp300_get_ss;
        mach_reset           = hp300_reset;
 #ifdef CONFIG_HEARTBEAT
        mach_heartbeat       = hp300_pulse;
index ae20219..f6c5e0d 100644 (file)
 #ifndef _M68K_DMA_H
 #define _M68K_DMA_H 1
 
-#ifdef CONFIG_COLDFIRE
-/*
- * ColdFire DMA Model:
- *   ColdFire DMA supports two forms of DMA: Single and Dual address. Single
- * address mode emits a source address, and expects that the device will either
- * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
- * the device will place data on the correct byte(s) of the data bus, as the
- * memory transactions are always 32 bits. This implies that only 32 bit
- * devices will find single mode transfers useful. Dual address DMA mode
- * performs two cycles: source read and destination write. ColdFire will
- * align the data so that the device will always get the correct bytes, thus
- * is useful for 8 and 16 bit devices. This is the mode that is supported
- * below.
- *
- * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
- *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
- *
- * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
- *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
- *
- * APR/18/2002 : added proper support for MCF5272 DMA controller.
- *               Arthur Shipkowski (art@videon-central.com)
- */
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfdma.h>
-
-/*
- * Set number of channels of DMA on ColdFire for different implementations.
- */
-#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
-       defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
-       defined(CONFIG_M528x) || defined(CONFIG_M525x)
-
-#define MAX_M68K_DMA_CHANNELS 4
-#elif defined(CONFIG_M5272)
-#define MAX_M68K_DMA_CHANNELS 1
-#elif defined(CONFIG_M53xx)
-#define MAX_M68K_DMA_CHANNELS 0
-#else
-#define MAX_M68K_DMA_CHANNELS 2
-#endif
-
-extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
-extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
-
-#if !defined(CONFIG_M5272)
-#define DMA_MODE_WRITE_BIT  0x01  /* Memory/IO to IO/Memory select */
-#define DMA_MODE_WORD_BIT   0x02  /* 8 or 16 bit transfers */
-#define DMA_MODE_LONG_BIT   0x04  /* or 32 bit transfers */
-#define DMA_MODE_SINGLE_BIT 0x08  /* single-address-mode */
-
-/* I/O to memory, 8 bits, mode */
-#define DMA_MODE_READ              0
-/* memory to I/O, 8 bits, mode */
-#define DMA_MODE_WRITE             1
-/* I/O to memory, 16 bits, mode */
-#define DMA_MODE_READ_WORD          2
-/* memory to I/O, 16 bits, mode */
-#define DMA_MODE_WRITE_WORD         3
-/* I/O to memory, 32 bits, mode */
-#define DMA_MODE_READ_LONG          4
-/* memory to I/O, 32 bits, mode */
-#define DMA_MODE_WRITE_LONG         5
-/* I/O to memory, 8 bits, single-address-mode */
-#define DMA_MODE_READ_SINGLE        8
-/* memory to I/O, 8 bits, single-address-mode */
-#define DMA_MODE_WRITE_SINGLE       9
-/* I/O to memory, 16 bits, single-address-mode */
-#define DMA_MODE_READ_WORD_SINGLE  10
-/* memory to I/O, 16 bits, single-address-mode */
-#define DMA_MODE_WRITE_WORD_SINGLE 11
-/* I/O to memory, 32 bits, single-address-mode */
-#define DMA_MODE_READ_LONG_SINGLE  12
-/* memory to I/O, 32 bits, single-address-mode */
-#define DMA_MODE_WRITE_LONG_SINGLE 13
-
-#else /* CONFIG_M5272 is defined */
-
-/* Source static-address mode */
-#define DMA_MODE_SRC_SA_BIT 0x01
-/* Two bits to select between all four modes */
-#define DMA_MODE_SSIZE_MASK 0x06
-/* Offset to shift bits in */
-#define DMA_MODE_SSIZE_OFF  0x01
-/* Destination static-address mode */
-#define DMA_MODE_DES_SA_BIT 0x10
-/* Two bits to select between all four modes */
-#define DMA_MODE_DSIZE_MASK 0x60
-/* Offset to shift bits in */
-#define DMA_MODE_DSIZE_OFF  0x05
-/* Size modifiers */
-#define DMA_MODE_SIZE_LONG  0x00
-#define DMA_MODE_SIZE_BYTE  0x01
-#define DMA_MODE_SIZE_WORD  0x02
-#define DMA_MODE_SIZE_LINE  0x03
-
-/*
- * Aliases to help speed quick ports; these may be suboptimal, however. They
- * do not include the SINGLE mode modifiers since the MCF5272 does not have a
- * mode where the device is in control of its addressing.
- */
-
-/* I/O to memory, 8 bits, mode */
-#define DMA_MODE_READ                ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 8 bits, mode */
-#define DMA_MODE_WRITE             ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-/* I/O to memory, 16 bits, mode */
-#define DMA_MODE_READ_WORD             ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 16 bits, mode */
-#define DMA_MODE_WRITE_WORD         ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-/* I/O to memory, 32 bits, mode */
-#define DMA_MODE_READ_LONG             ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 32 bits, mode */
-#define DMA_MODE_WRITE_LONG         ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-
-#endif /* !defined(CONFIG_M5272) */
-
-#if !defined(CONFIG_M5272)
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
-  volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
-  printk("enable_dma(dmanr=%d)\n", dmanr);
-#endif
-
-  dmawp = (unsigned short *) dma_base_addr[dmanr];
-  dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
-  volatile unsigned short *dmawp;
-  volatile unsigned char  *dmapb;
-
-#ifdef DMA_DEBUG
-  printk("disable_dma(dmanr=%d)\n", dmanr);
-#endif
-
-  dmawp = (unsigned short *) dma_base_addr[dmanr];
-  dmapb = (unsigned char *) dma_base_addr[dmanr];
-
-  /* Turn off external requests, and stop any DMA in progress */
-  dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
-  dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
-}
-
-/*
- * Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- *
- * This is a NOP for ColdFire. Provide a stub for compatibility.
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-
-  volatile unsigned char  *dmabp;
-  volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
-  printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
-#endif
-
-  dmabp = (unsigned char *) dma_base_addr[dmanr];
-  dmawp = (unsigned short *) dma_base_addr[dmanr];
-
-  /* Clear config errors */
-  dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
-
-  /* Set command register */
-  dmawp[MCFDMA_DCR] =
-    MCFDMA_DCR_INT |         /* Enable completion irq */
-    MCFDMA_DCR_CS |          /* Force one xfer per request */
-    MCFDMA_DCR_AA |          /* Enable auto alignment */
-    /* single-address-mode */
-    ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
-    /* sets s_rw (-> r/w) high if Memory to I/0 */
-    ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
-    /* Memory to I/O or I/O to Memory */
-    ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
-    /* 32 bit, 16 bit or 8 bit transfers */
-    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_SSIZE_WORD :
-     ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
-                                   MCFDMA_DCR_SSIZE_BYTE)) |
-    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_DSIZE_WORD :
-     ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
-                                   MCFDMA_DCR_DSIZE_BYTE));
-
-#ifdef DEBUG_DMA
-  printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
-         dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
-        (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
-#endif
-}
-
-/* Set transfer address for specific DMA channel */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
-  volatile unsigned short *dmawp;
-  volatile unsigned int   *dmalp;
-
-#ifdef DMA_DEBUG
-  printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
-  dmawp = (unsigned short *) dma_base_addr[dmanr];
-  dmalp = (unsigned int *) dma_base_addr[dmanr];
-
-  /* Determine which address registers are used for memory/device accesses */
-  if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
-    /* Source incrementing, must be memory */
-    dmalp[MCFDMA_SAR] = a;
-    /* Set dest address, must be device */
-    dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
-  } else {
-    /* Destination incrementing, must be memory */
-    dmalp[MCFDMA_DAR] = a;
-    /* Set source address, must be device */
-    dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
-  }
-
-#ifdef DEBUG_DMA
-  printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
-       __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
-       (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
-       (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
-#endif
-}
-
-/*
- * Specific for Coldfire - sets device address.
- * Should be called after the mode set call, and before set DMA address.
- */
-static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
-{
-#ifdef DMA_DEBUG
-  printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
-  dma_device_address[dmanr] = a;
-}
-
-/*
- * NOTE 2: "count" represents _bytes_.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
-  volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
-  printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
-#endif
-
-  dmawp = (unsigned short *) dma_base_addr[dmanr];
-  dmawp[MCFDMA_BCR] = (unsigned short)count;
-}
-
-/*
- * Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
-  volatile unsigned short *dmawp;
-  unsigned short count;
-
-#ifdef DMA_DEBUG
-  printk("get_dma_residue(dmanr=%d)\n", dmanr);
-#endif
-
-  dmawp = (unsigned short *) dma_base_addr[dmanr];
-  count = dmawp[MCFDMA_BCR];
-  return((int) count);
-}
-#else /* CONFIG_M5272 is defined */
-
-/*
- * The MCF5272 DMA controller is very different than the controller defined above
- * in terms of register mapping.  For instance, with the exception of the 16-bit
- * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
- *
- * The big difference, however, is the lack of device-requested DMA.  All modes
- * are dual address transfer, and there is no 'device' setup or direction bit.
- * You can DMA between a device and memory, between memory and memory, or even between
- * two devices directly, with any combination of incrementing and non-incrementing
- * addresses you choose.  This puts a crimp in distinguishing between the 'device
- * address' set up by set_dma_device_addr.
- *
- * Therefore, there are two options.  One is to use set_dma_addr and set_dma_device_addr,
- * which will act exactly as above in -- it will look to see if the source is set to
- * autoincrement, and if so it will make the source use the set_dma_addr value and the
- * destination the set_dma_device_addr value.  Otherwise the source will be set to the
- * set_dma_device_addr value and the destination will get the set_dma_addr value.
- *
- * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
- * and make it explicit.  Depending on what you're doing, one of these two should work
- * for you, but don't mix them in the same transfer setup.
- */
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
-  volatile unsigned int  *dmalp;
-
-#ifdef DMA_DEBUG
-  printk("enable_dma(dmanr=%d)\n", dmanr);
-#endif
-
-  dmalp = (unsigned int *) dma_base_addr[dmanr];
-  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
-  volatile unsigned int   *dmalp;
-
-#ifdef DMA_DEBUG
-  printk("disable_dma(dmanr=%d)\n", dmanr);
-#endif
-
-  dmalp = (unsigned int *) dma_base_addr[dmanr];
-
-  /* Turn off external requests, and stop any DMA in progress */
-  dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
-  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
-}
-
-/*
- * Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- *
- * This is a NOP for ColdFire. Provide a stub for compatibility.
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-
-  volatile unsigned int   *dmalp;
-  volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
-  printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
-#endif
-  dmalp = (unsigned int *) dma_base_addr[dmanr];
-  dmawp = (unsigned short *) dma_base_addr[dmanr];
-
-  /* Clear config errors */
-  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
-
-  /* Set command register */
-  dmalp[MCFDMA_DMR] =
-    MCFDMA_DMR_RQM_DUAL |         /* Mandatory Request Mode setting */
-    MCFDMA_DMR_DSTT_SD  |         /* Set up addressing types; set to supervisor-data. */
-    MCFDMA_DMR_SRCT_SD  |         /* Set up addressing types; set to supervisor-data. */
-    /* source static-address-mode */
-    ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
-    /* dest static-address-mode */
-    ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
-    /* burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 */
-    (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
-    (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
-
-  dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN;   /* Enable completion interrupts */
-
-#ifdef DEBUG_DMA
-  printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
-        dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
-        (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
-#endif
-}
-
-/* Set transfer address for specific DMA channel */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
-  volatile unsigned int   *dmalp;
-
-#ifdef DMA_DEBUG
-  printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
-  dmalp = (unsigned int *) dma_base_addr[dmanr];
-
-  /* Determine which address registers are used for memory/device accesses */
-  if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
-    /* Source incrementing, must be memory */
-    dmalp[MCFDMA_DSAR] = a;
-    /* Set dest address, must be device */
-    dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
-  } else {
-    /* Destination incrementing, must be memory */
-    dmalp[MCFDMA_DDAR] = a;
-    /* Set source address, must be device */
-    dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
-  }
-
-#ifdef DEBUG_DMA
-  printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
-       __FILE__, __LINE__, dmanr, (int) &dmalp[MCFDMA_DMR], dmalp[MCFDMA_DMR],
-       (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
-       (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
-#endif
-}
-
-/*
- * Specific for Coldfire - sets device address.
- * Should be called after the mode set call, and before set DMA address.
- */
-static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
-{
-#ifdef DMA_DEBUG
-  printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
-  dma_device_address[dmanr] = a;
-}
-
-/*
- * NOTE 2: "count" represents _bytes_.
- *
- * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
-  volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
-  printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
-#endif
-
-  dmalp = (unsigned int *) dma_base_addr[dmanr];
-  dmalp[MCFDMA_DBCR] = count;
-}
-
-/*
- * Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
-  volatile unsigned int *dmalp;
-  unsigned int count;
-
-#ifdef DMA_DEBUG
-  printk("get_dma_residue(dmanr=%d)\n", dmanr);
-#endif
-
-  dmalp = (unsigned int *) dma_base_addr[dmanr];
-  count = dmalp[MCFDMA_DBCR];
-  return(count);
-}
-
-#endif /* !defined(CONFIG_M5272) */
-#endif /* CONFIG_COLDFIRE */
-
 /* it's useless on the m68k, but unfortunately needed by the new
    bootmem allocator (but this should do it for this) */
 #define MAX_DMA_ADDRESS PAGE_OFFSET
 
-#define MAX_DMA_CHANNELS 8
-
-extern int request_dma(unsigned int dmanr, const char * device_id);    /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr);      /* release it again */
-
 #ifdef CONFIG_PCI
 extern int isa_dma_bridge_buggy;
 #else
index 3d387ce..2def06a 100644 (file)
@@ -60,6 +60,13 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
    is actually used on ASV.  */
 #define ELF_PLAT_INIT(_r, load_addr)   _r->a1 = 0
 
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+        do { \
+                (_r)->d3 = _exec_map_addr; \
+                (_r)->d4 = _interp_map_addr; \
+                (_r)->d5 = dynamic_addr; \
+        } while(0)
+
 #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
 #define ELF_EXEC_PAGESIZE      8192
 #else
@@ -114,4 +121,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
 
 #define ELF_PLATFORM  (NULL)
 
+#define ELF_FDPIC_CORE_EFLAGS  0
+
 #endif
index 8fd80ef..48d27f1 100644 (file)
@@ -19,12 +19,10 @@ extern void (*mach_get_model) (char *model);
 extern void (*mach_get_hardware_list) (struct seq_file *m);
 /* machine dependent timer functions */
 extern int (*mach_hwclk)(int, struct rtc_time*);
-extern unsigned int (*mach_get_ss)(void);
 extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
 extern void (*mach_reset)( void );
 extern void (*mach_halt)( void );
-extern void (*mach_power_off)( void );
 extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
 extern void (*mach_hd_setup)(char *, int *);
 extern void (*mach_heartbeat) (int);
index 5c15aac..e006724 100644 (file)
@@ -6,9 +6,7 @@
 /* Default "unsigned long" context */
 typedef unsigned long mm_context_t;
 #else
-typedef struct {
-       unsigned long           end_brk;
-} mm_context_t;
+#include <asm-generic/mmu.h>
 #endif
 
 #endif
index 87151d6..bce5ca5 100644 (file)
@@ -42,7 +42,8 @@ extern void paging_init(void);
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-#define ZERO_PAGE(vaddr)       (virt_to_page(0))
+extern void *empty_zero_page;
+#define ZERO_PAGE(vaddr)       (virt_to_page(empty_zero_page))
 
 /*
  * All 32bit addresses are effectively valid for vmalloc...
index 19a1b9d..5b50ea5 100644 (file)
@@ -74,7 +74,12 @@ struct switch_stack {
 
 #define PTRACE_GET_THREAD_AREA    25
 
+#define PTRACE_GETFDPIC        31
+
 #define PTRACE_SINGLEBLOCK     33      /* resume execution until next branch */
 
+#define PTRACE_GETFDPIC_EXEC   0
+#define PTRACE_GETFDPIC_INTERP 1
+
 #endif /* __ASSEMBLY__ */
 #endif /* _UAPI_M68K_PTRACE_H */
index a6030db..2cb4a61 100644 (file)
@@ -67,12 +67,11 @@ void machine_halt(void)
 
 void machine_power_off(void)
 {
-       if (mach_power_off)
-               mach_power_off();
+       do_kernel_power_off();
        for (;;);
 }
 
-void (*pm_power_off)(void) = machine_power_off;
+void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 
 void show_regs(struct pt_regs * regs)
@@ -138,9 +137,11 @@ asmlinkage int m68k_clone3(struct pt_regs *regs)
        return sys_clone3((struct clone_args __user *)regs->d1, regs->d2);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct fork_frame {
                struct switch_stack sw;
                struct pt_regs regs;
@@ -157,12 +158,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
         */
        p->thread.fc = USER_DATA;
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* kernel thread */
                memset(frame, 0, sizeof(struct fork_frame));
                frame->regs.sr = PS_S;
-               frame->sw.a3 = usp; /* function */
-               frame->sw.d7 = arg;
+               frame->sw.a3 = (unsigned long)args->fn;
+               frame->sw.d7 = (unsigned long)args->fn_arg;
                frame->sw.retpc = (unsigned long)ret_from_kernel_thread;
                p->thread.usp = 0;
                return 0;
index daebccd..0a4184a 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/ptrace.h>
 #include <linux/user.h>
 #include <linux/signal.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
 
 #include <linux/uaccess.h>
 #include <asm/page.h>
@@ -284,3 +286,59 @@ asmlinkage void syscall_trace_leave(void)
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                ptrace_report_syscall_exit(task_pt_regs(current), 0);
 }
+
+#if defined(CONFIG_BINFMT_ELF_FDPIC) && defined(CONFIG_ELF_CORE)
+/*
+ * Currently the only thing that needs to use regsets for m68k is the
+ * coredump support of the elf_fdpic loader. Implement the minimum
+ * definitions required for that.
+ */
+static int m68k_regset_get(struct task_struct *target,
+                          const struct user_regset *regset,
+                          struct membuf to)
+{
+       struct pt_regs *ptregs = task_pt_regs(target);
+       u32 uregs[ELF_NGREG];
+
+       ELF_CORE_COPY_REGS(uregs, ptregs);
+       return membuf_write(&to, uregs, sizeof(uregs));
+}
+
+enum m68k_regset {
+       REGSET_GPR,
+#ifdef CONFIG_FPU
+       REGSET_FPU,
+#endif
+};
+
+static const struct user_regset m68k_user_regsets[] = {
+       [REGSET_GPR] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = ELF_NGREG,
+               .size = sizeof(u32),
+               .align = sizeof(u16),
+               .regset_get = m68k_regset_get,
+       },
+#ifdef CONFIG_FPU
+       [REGSET_FPU] = {
+               .core_note_type = NT_PRFPREG,
+               .n = sizeof(struct user_m68kfp_struct) / sizeof(u32),
+               .size = sizeof(u32),
+               .align = sizeof(u32),
+       }
+#endif /* CONFIG_FPU */
+};
+
+static const struct user_regset_view user_m68k_view = {
+       .name = "m68k",
+       .e_machine = EM_68K,
+       .ei_osabi = ELF_OSABI,
+       .regsets = m68k_user_regsets,
+       .n = ARRAY_SIZE(m68k_user_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+       return &user_m68k_view;
+}
+#endif /* CONFIG_BINFMT_ELF_FDPIC && CONFIG_ELF_CORE */
index 78ab562..e62fa8f 100644 (file)
@@ -87,18 +87,8 @@ void (*mach_sched_init) (void) __initdata = NULL;
 void (*mach_init_IRQ) (void) __initdata = NULL;
 void (*mach_get_model) (char *model);
 void (*mach_get_hardware_list) (struct seq_file *m);
-/* machine dependent timer functions */
-int (*mach_hwclk) (int, struct rtc_time*);
-EXPORT_SYMBOL(mach_hwclk);
-unsigned int (*mach_get_ss)(void);
-int (*mach_get_rtc_pll)(struct rtc_pll_info *);
-int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-EXPORT_SYMBOL(mach_get_ss);
-EXPORT_SYMBOL(mach_get_rtc_pll);
-EXPORT_SYMBOL(mach_set_rtc_pll);
 void (*mach_reset)( void );
 void (*mach_halt)( void );
-void (*mach_power_off)( void );
 #ifdef CONFIG_HEARTBEAT
 void (*mach_heartbeat) (int);
 EXPORT_SYMBOL(mach_heartbeat);
index 5e4104f..cb6def5 100644 (file)
@@ -50,12 +50,10 @@ char __initdata command_line[COMMAND_LINE_SIZE];
 
 /* machine dependent timer functions */
 void (*mach_sched_init)(void) __initdata = NULL;
-int (*mach_hwclk) (int, struct rtc_time*);
 
 /* machine dependent reboot functions */
 void (*mach_reset)(void);
 void (*mach_halt)(void);
-void (*mach_power_off)(void);
 
 #ifdef CONFIG_M68000
 #if defined(CONFIG_M68328)
index 340ffee..a97600b 100644 (file)
@@ -63,6 +63,15 @@ void timer_heartbeat(void)
 #endif /* CONFIG_HEARTBEAT */
 
 #ifdef CONFIG_M68KCLASSIC
+/* machine dependent timer functions */
+int (*mach_hwclk) (int, struct rtc_time*);
+EXPORT_SYMBOL(mach_hwclk);
+
+int (*mach_get_rtc_pll)(struct rtc_pll_info *);
+int (*mach_set_rtc_pll)(struct rtc_pll_info *);
+EXPORT_SYMBOL(mach_get_rtc_pll);
+EXPORT_SYMBOL(mach_set_rtc_pll);
+
 #if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC)
 void read_persistent_clock64(struct timespec64 *ts)
 {
index 65d124e..382f656 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/errno.h>
 #include <linux/module.h>
+#include <linux/reboot.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/tty.h>
@@ -140,7 +141,6 @@ void __init config_mac(void)
        mach_hwclk = mac_hwclk;
        mach_reset = mac_reset;
        mach_halt = mac_poweroff;
-       mach_power_off = mac_poweroff;
 #if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
        mach_beep = mac_mksound;
 #endif
@@ -160,6 +160,8 @@ void __init config_mac(void)
 
        if (macintosh_config->ident == MAC_MODEL_IICI)
                mach_l2_flush = via_l2_flush;
+
+       register_platform_power_off(mac_poweroff);
 }
 
 
index ecbe948..df7f797 100644 (file)
@@ -27,7 +27,6 @@
 #include <asm/pgalloc.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
-#include <asm/dma.h>
 #ifdef CONFIG_ATARI
 #include <asm/atari_stram.h>
 #endif
index 9237243..c78ee70 100644 (file)
@@ -41,7 +41,6 @@ static void q40_get_model(char *model);
 extern void q40_sched_init(void);
 
 static int q40_hwclk(int, struct rtc_time *);
-static unsigned int q40_get_ss(void);
 static int q40_get_rtc_pll(struct rtc_pll_info *pll);
 static int q40_set_rtc_pll(struct rtc_pll_info *pll);
 
@@ -169,7 +168,6 @@ void __init config_q40(void)
 
        mach_init_IRQ = q40_init_IRQ;
        mach_hwclk = q40_hwclk;
-       mach_get_ss = q40_get_ss;
        mach_get_rtc_pll = q40_get_rtc_pll;
        mach_set_rtc_pll = q40_set_rtc_pll;
 
@@ -246,11 +244,6 @@ static int q40_hwclk(int op, struct rtc_time *t)
        return 0;
 }
 
-static unsigned int q40_get_ss(void)
-{
-       return bcd2bin(Q40_RTC_SECS);
-}
-
 /* get and set PLL calibration of RTC clock */
 #define Q40_RTC_PLL_MASK ((1<<5)-1)
 #define Q40_RTC_PLL_SIGN (1<<5)
index 68d29c8..632ba20 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include <linux/reboot.h>
 #include <linux/serial_core.h>
 #include <clocksource/timer-goldfish.h>
 
@@ -126,5 +127,6 @@ void __init config_virt(void)
        mach_get_model = virt_get_model;
        mach_reset = virt_reset;
        mach_halt = virt_halt;
-       mach_power_off = virt_halt;
+
+       register_platform_power_off(virt_halt);
 }
index 34071a8..8798ad2 100644 (file)
@@ -8,6 +8,7 @@
 
 #ifdef __KERNEL__
 
+#ifdef CONFIG_OPT_LIB_FUNCTION
 #define __HAVE_ARCH_MEMSET
 #define __HAVE_ARCH_MEMCPY
 #define __HAVE_ARCH_MEMMOVE
@@ -15,6 +16,7 @@
 extern void *memset(void *, int, __kernel_size_t);
 extern void *memcpy(void *, const void *, __kernel_size_t);
 extern void *memmove(void *, const void *, __kernel_size_t);
+#endif
 
 #endif /* __KERNEL__ */
 
index 130cd0f..df4b9d0 100644 (file)
@@ -31,7 +31,7 @@
 #define GDB_RTLBLO     55
 #define GDB_RTLBHI     56
 
-/* keep pvr separately because it is unchangeble */
+/* keep pvr separately because it is unchangeable */
 static struct pvr_s pvr;
 
 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
index 1b944d3..3c6241b 100644 (file)
@@ -52,20 +52,22 @@ void flush_thread(void)
 {
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *childregs = task_pt_regs(p);
        struct thread_info *ti = task_thread_info(p);
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* if we're creating a new kernel thread then just zeroing all
                 * the registers. That's OK for a brand new thread.*/
                memset(childregs, 0, sizeof(struct pt_regs));
                memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
                ti->cpu_context.r1  = (unsigned long)childregs;
-               ti->cpu_context.r20 = (unsigned long)usp; /* fn */
-               ti->cpu_context.r19 = (unsigned long)arg;
+               ti->cpu_context.r20 = (unsigned long)args->fn;
+               ti->cpu_context.r19 = (unsigned long)args->fn_arg;
                childregs->pt_mode = 1;
                local_save_flags(childregs->msr);
                ti->cpu_context.msr = childregs->msr & ~MSR_IE;
index f8832cf..26c3855 100644 (file)
@@ -251,6 +251,10 @@ static int __init xilinx_timer_init(struct device_node *timer)
        u32 timer_num = 1;
        int ret;
 
+       /* If this property is present, the device is a PWM and not a timer */
+       if (of_property_read_bool(timer, "#pwm-cells"))
+               return 0;
+
        if (initialized)
                return -EINVAL;
 
index 63041fd..9966dce 100644 (file)
 
 #include <linux/string.h>
 
-#ifdef __HAVE_ARCH_MEMCPY
-#ifndef CONFIG_OPT_LIB_FUNCTION
-void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
-{
-       const char *src = v_src;
-       char *dst = v_dst;
-
-       /* Simple, byte oriented memcpy. */
-       while (c--)
-               *dst++ = *src++;
-
-       return v_dst;
-}
-#else /* CONFIG_OPT_LIB_FUNCTION */
+#ifdef CONFIG_OPT_LIB_FUNCTION
 void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
 {
        const char *src = v_src;
@@ -188,6 +175,5 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
 
        return v_dst;
 }
-#endif /* CONFIG_OPT_LIB_FUNCTION */
 EXPORT_SYMBOL(memcpy);
-#endif /* __HAVE_ARCH_MEMCPY */
+#endif /* CONFIG_OPT_LIB_FUNCTION */
index 9862f6b..c1f08c4 100644 (file)
 #include <linux/compiler.h>
 #include <linux/string.h>
 
-#ifdef __HAVE_ARCH_MEMMOVE
-#ifndef CONFIG_OPT_LIB_FUNCTION
-void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
-{
-       const char *src = v_src;
-       char *dst = v_dst;
-
-       if (!c)
-               return v_dst;
-
-       /* Use memcpy when source is higher than dest */
-       if (v_dst <= v_src)
-               return memcpy(v_dst, v_src, c);
-
-       /* copy backwards, from end to beginning */
-       src += c;
-       dst += c;
-
-       /* Simple, byte oriented memmove. */
-       while (c--)
-               *--dst = *--src;
-
-       return v_dst;
-}
-#else /* CONFIG_OPT_LIB_FUNCTION */
+#ifdef CONFIG_OPT_LIB_FUNCTION
 void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
 {
        const char *src = v_src;
@@ -102,7 +78,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
 
                i_dst = (void *)dst;
                /* Choose a copy scheme based on the source */
-               /* alignment relative to dstination. */
+               /* alignment relative to destination. */
                switch ((unsigned long)src & 3) {
                case 0x0:       /* Both byte offsets are aligned */
 
@@ -215,6 +191,5 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
        }
        return v_dst;
 }
-#endif /* CONFIG_OPT_LIB_FUNCTION */
 EXPORT_SYMBOL(memmove);
-#endif /* __HAVE_ARCH_MEMMOVE */
+#endif /* CONFIG_OPT_LIB_FUNCTION */
index eb6c898..7c2352d 100644 (file)
 #include <linux/compiler.h>
 #include <linux/string.h>
 
-#ifdef __HAVE_ARCH_MEMSET
-#ifndef CONFIG_OPT_LIB_FUNCTION
-void *memset(void *v_src, int c, __kernel_size_t n)
-{
-       char *src = v_src;
-
-       /* Truncate c to 8 bits */
-       c = (c & 0xFF);
-
-       /* Simple, byte oriented memset or the rest of count. */
-       while (n--)
-               *src++ = c;
-
-       return v_src;
-}
-#else /* CONFIG_OPT_LIB_FUNCTION */
+#ifdef CONFIG_OPT_LIB_FUNCTION
 void *memset(void *v_src, int c, __kernel_size_t n)
 {
        char *src = v_src;
@@ -89,11 +74,21 @@ void *memset(void *v_src, int c, __kernel_size_t n)
        }
 
        /* Simple, byte oriented memset or the rest of count. */
-       while (n--)
+       switch (n) {
+       case 3:
+               *src++ = c;
+               fallthrough;
+       case 2:
                *src++ = c;
+               fallthrough;
+       case 1:
+               *src++ = c;
+               break;
+       default:
+               break;
+       }
 
        return v_src;
 }
-#endif /* CONFIG_OPT_LIB_FUNCTION */
 EXPORT_SYMBOL(memset);
-#endif /* __HAVE_ARCH_MEMSET */
+#endif /* CONFIG_OPT_LIB_FUNCTION */
index 952f35b..f4e5034 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h> /* mem_init */
 #include <linux/initrd.h>
+#include <linux/of_fdt.h>
 #include <linux/pagemap.h>
 #include <linux/pfn.h>
 #include <linux/slab.h>
@@ -261,8 +262,12 @@ asmlinkage void __init mmu_init(void)
 
        parse_early_param();
 
+       early_init_fdt_scan_reserved_mem();
+
        /* CMA initialization */
        dma_contiguous_reserve(memory_start + lowmem_size - 1);
+
+       memblock_dump_all();
 }
 
 void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
index de3b32a..db09d45 100644 (file)
@@ -1321,11 +1321,11 @@ config CPU_LOONGSON64
        select SWIOTLB
        select HAVE_KVM
        help
-               The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
-               cores implements the MIPS64R2 instruction set with many extensions,
-               including most 64-bit Loongson-2 (2H, 2K) and Loongson-3 (3A1000,
-               3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old
-               Loongson-2E/2F is not covered here and will be removed in future.
+         The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
+         cores implements the MIPS64R2 instruction set with many extensions,
+         including most 64-bit Loongson-2 (2H, 2K) and Loongson-3 (3A1000,
+         3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old
+         Loongson-2E/2F is not covered here and will be removed in future.
 
 config LOONGSON3_ENHANCEMENT
        bool "New Loongson-3 CPU Enhancements"
@@ -3198,16 +3198,12 @@ config MIPS32_COMPAT
 config COMPAT
        bool
 
-config SYSVIPC_COMPAT
-       bool
-
 config MIPS32_O32
        bool "Kernel support for o32 binaries"
        depends on 64BIT
        select ARCH_WANT_OLD_COMPAT_IPC
        select COMPAT
        select MIPS32_COMPAT
-       select SYSVIPC_COMPAT if SYSVIPC
        help
          Select this option if you want to run o32 binaries.  These are pure
          32-bit binaries as used by the 32-bit Linux/MIPS port.  Most of
@@ -3221,7 +3217,6 @@ config MIPS32_N32
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select COMPAT
        select MIPS32_COMPAT
-       select SYSVIPC_COMPAT if SYSVIPC
        help
          Select this option if you want to run n32 binaries.  These are
          64-bit binaries using 32-bit quantities for addressing and certain
@@ -3255,7 +3250,7 @@ menu "CPU Power Management"
 
 if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
 source "drivers/cpufreq/Kconfig"
-endif
+endif # CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
 
 source "drivers/cpuidle/Kconfig"
 
index 4ca2c28..5ab0430 100644 (file)
@@ -574,7 +574,7 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
                dp++;
        }
 
-       /* Make last descrptor point to the first. */
+       /* Make last descriptor point to the first. */
        dp--;
        dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
        ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
index cd72eaa..e70e529 100644 (file)
@@ -732,16 +732,7 @@ static struct platform_device db1300_lcd_dev = {
 /**********************************************************************/
 
 #if IS_ENABLED(CONFIG_TOUCHSCREEN_WM97XX)
-static void db1300_wm97xx_irqen(struct wm97xx *wm, int enable)
-{
-       if (enable)
-               enable_irq(DB1300_AC97_PEN_INT);
-       else
-               disable_irq_nosync(DB1300_AC97_PEN_INT);
-}
-
 static struct wm97xx_mach_ops db1300_wm97xx_ops = {
-       .irq_enable     = db1300_wm97xx_irqen,
        .irq_gpio       = WM97XX_GPIO_3,
 };
 
index c535f9c..3378866 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0+
 
 #include <linux/types.h>
+#include <linux/dma-map-ops.h>
 #include <asm/bmips.h>
 #include <asm/io.h>
 
index 522f2c4..c17fc14 100644 (file)
@@ -78,7 +78,7 @@
 &qspi {
        status = "okay";
 
-       m25p80@0 {
+       flash@0 {
                compatible = "m25p80";
                reg = <0>;
                spi-max-frequency = <40000000>;
index 01f215b..c9b76f4 100644 (file)
@@ -81,7 +81,7 @@
 &qspi {
        status = "okay";
 
-       m25p80@0 {
+       flash@0 {
                compatible = "m25p80";
                reg = <0>;
                spi-max-frequency = <40000000>;
index f389349..289a57b 100644 (file)
 &qspi {
        status = "okay";
 
-       m25p80@0 {
+       flash@0 {
                compatible = "m25p80";
                reg = <0>;
                spi-max-frequency = <40000000>;
index f98cf02..c89abf9 100644 (file)
                };
        };
 
-       ssi: spi-gpio {
-               compatible = "spi-gpio";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               num-chipselects = <1>;
-
-               mosi-gpios = <&gpd 2 GPIO_ACTIVE_HIGH>;
-               miso-gpios = <&gpd 3 GPIO_ACTIVE_HIGH>;
-               sck-gpios = <&gpd 0 GPIO_ACTIVE_HIGH>;
-               cs-gpios = <&gpd 1 GPIO_ACTIVE_HIGH>;
-
-               status = "okay";
-
-               spi-max-frequency = <50000000>;
-
-               sc16is752: expander@0 {
-                       compatible = "nxp,sc16is752";
-                       reg = <0>; /* CE0 */
-                       spi-max-frequency = <4000000>;
-
-                       clocks = <&exclk_sc16is752>;
-
-                       interrupt-parent = <&gpc>;
-                       interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       exclk_sc16is752: sc16is752 {
-                               compatible = "fixed-clock";
-                               #clock-cells = <0>;
-                               clock-frequency = <48000000>;
-                       };
-               };
-       };
-
        wlan_pwrseq: msc1-pwrseq {
                compatible = "mmc-pwrseq-simple";
 
@@ -90,7 +54,7 @@
 
 &ost {
        /* 1500 kHz for the system timer and clocksource */
-       assigned-clocks = <&ost OST_CLK_PERCPU_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
+       assigned-clocks = <&ost OST_CLK_EVENT_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
        assigned-clock-rates = <1500000>, <1500000>;
 };
 
        pinctrl-0 = <&pins_uart2>;
 };
 
+&ssi {
+       status = "okay";
+
+       num-cs = <2>;
+       cs-gpios = <0>, <&gpc 20 GPIO_ACTIVE_LOW>;
+
+       pinctrl-names = "default";
+       pinctrl-0 = <&pins_ssi>;
+
+       sc16is752: expander@0 {
+               compatible = "nxp,sc16is752";
+               reg = <0>; /* CE0 */
+
+               spi-rx-bus-width = <1>;
+               spi-tx-bus-width = <1>;
+               spi-max-frequency = <4000000>;
+
+               clocks = <&exclk_sc16is752>;
+
+               interrupt-parent = <&gpc>;
+               interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               exclk_sc16is752: sc16is752 {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <48000000>;
+               };
+       };
+};
+
 &i2c0 {
        status = "okay";
 
                bias-pull-up;
        };
 
+       pins_ssi: ssi {
+               function = "ssi";
+               groups = "ssi-dt-d", "ssi-dr-d", "ssi-clk-d", "ssi-ce0-d";
+               bias-disable;
+       };
+
        pins_i2c0: i2c0 {
                function = "i2c0";
                groups = "i2c0-data";
index cfcb40e..3c77849 100644 (file)
                };
        };
 
-       ssi0: spi-gpio {
-               compatible = "spi-gpio";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               num-chipselects = <1>;
-
-               mosi-gpios = <&gpc 12 GPIO_ACTIVE_HIGH>;
-               miso-gpios = <&gpc 11 GPIO_ACTIVE_HIGH>;
-               sck-gpios = <&gpc 15 GPIO_ACTIVE_HIGH>;
-               cs-gpios = <&gpc 16 GPIO_ACTIVE_HIGH>;
-
-               status = "okay";
-
-               spi-max-frequency = <50000000>;
-
-               sc16is752: expander@0 {
-                       compatible = "nxp,sc16is752";
-                       reg = <0>; /* CE0 */
-                       spi-max-frequency = <4000000>;
-
-                       clocks = <&exclk_sc16is752>;
-
-                       interrupt-parent = <&gpb>;
-                       interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       exclk_sc16is752: sc16is752 {
-                               compatible = "fixed-clock";
-                               #clock-cells = <0>;
-                               clock-frequency = <48000000>;
-                       };
-               };
-       };
-
        wlan_pwrseq: msc1-pwrseq {
                compatible = "mmc-pwrseq-simple";
 
@@ -90,7 +54,7 @@
 
 &ost {
        /* 1500 kHz for the system timer and clocksource */
-       assigned-clocks = <&ost OST_CLK_PERCPU_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
+       assigned-clocks = <&ost OST_CLK_EVENT_TIMER>, <&ost OST_CLK_GLOBAL_TIMER>;
        assigned-clock-rates = <1500000>, <1500000>;
 };
 
        pinctrl-0 = <&pins_uart1>;
 };
 
+&ssi0 {
+       status = "okay";
+
+       num-cs = <2>;
+
+       pinctrl-names = "default";
+       pinctrl-0 = <&pins_ssi0>;
+
+       sc16is752: expander@0 {
+               compatible = "nxp,sc16is752";
+               reg = <0>; /* CE0 */
+
+               spi-rx-bus-width = <1>;
+               spi-tx-bus-width = <1>;
+               spi-max-frequency = <4000000>;
+
+               clocks = <&exclk_sc16is752>;
+
+               interrupt-parent = <&gpb>;
+               interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               exclk_sc16is752: sc16is752 {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <48000000>;
+               };
+       };
+};
+
 &i2c0 {
        status = "okay";
 
                bias-pull-up;
        };
 
+       pins_ssi0: ssi0 {
+               function = "ssi0";
+               groups = "ssi0-dt", "ssi0-dr", "ssi0-clk", "ssi0-ce0", "ssi0-ce1";
+               bias-disable;
+       };
+
        pins_i2c0: i2c0 {
                function = "i2c0";
                groups = "i2c0-data";
index b998301..c182a65 100644 (file)
        };
 
        otg: usb@13500000 {
-               compatible = "ingenic,jz4780-otg", "snps,dwc2";
+               compatible = "ingenic,jz4780-otg";
                reg = <0x13500000 0x40000>;
 
                interrupt-parent = <&intc>;
index 8bd27ed..b0a034b 100644 (file)
                        clocks = <&tcu TCU_CLK_WDT>;
                        clock-names = "wdt";
                };
+
+               pwm: pwm@40 {
+                       compatible = "ingenic,x1000-pwm";
+                       reg = <0x40 0x50>;
+
+                       #pwm-cells = <3>;
+
+                       clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+                                <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+                                <&tcu TCU_CLK_TIMER4>;
+                       clock-names = "timer0", "timer1", "timer2", "timer3", "timer4";
+               };
        };
 
        rtc: rtc@10003000 {
                status = "disabled";
        };
 
+       ssi: spi@10043000 {
+               compatible = "ingenic,x1000-spi";
+               reg = <0x10043000 0x20>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <8>;
+
+               clocks = <&cgu X1000_CLK_SSI>;
+               clock-names = "spi";
+
+               dmas = <&pdma X1000_DMA_SSI0_RX 0xffffffff>,
+                          <&pdma X1000_DMA_SSI0_TX 0xffffffff>;
+               dma-names = "rx", "tx";
+
+               status = "disabled";
+       };
+
        i2c0: i2c-controller@10050000 {
                compatible = "ingenic,x1000-i2c";
                reg = <0x10050000 0x1000>;
        pdma: dma-controller@13420000 {
                compatible = "ingenic,x1000-dma";
                reg = <0x13420000 0x400>, <0x13421000 0x40>;
+
                #dma-cells = <2>;
 
                interrupt-parent = <&intc>;
        };
 
        otg: usb@13500000 {
-               compatible = "ingenic,x1000-otg", "snps,dwc2";
+               compatible = "ingenic,x1000-otg";
                reg = <0x13500000 0x40000>;
 
                interrupt-parent = <&intc>;
index 2595df8..dbf21af 100644 (file)
                        clocks = <&tcu TCU_CLK_WDT>;
                        clock-names = "wdt";
                };
+
+               pwm: pwm@40 {
+                       compatible = "ingenic,x1830-pwm", "ingenic,jz4740-pwm";
+                       reg = <0x40 0x80>;
+
+                       #pwm-cells = <3>;
+
+                       clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+                                <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+                                <&tcu TCU_CLK_TIMER4>, <&tcu TCU_CLK_TIMER5>,
+                                <&tcu TCU_CLK_TIMER6>, <&tcu TCU_CLK_TIMER7>;
+                       clock-names = "timer0", "timer1", "timer2", "timer3",
+                                     "timer4", "timer5", "timer6", "timer7";
+               };
        };
 
        rtc: rtc@10003000 {
                status = "disabled";
        };
 
+       ssi0: spi@10043000 {
+               compatible = "ingenic,x1830-spi", "ingenic,x1000-spi";
+               reg = <0x10043000 0x20>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <9>;
+
+               clocks = <&cgu X1830_CLK_SSI0>;
+               clock-names = "spi";
+
+               dmas = <&pdma X1830_DMA_SSI0_RX 0xffffffff>,
+                          <&pdma X1830_DMA_SSI0_TX 0xffffffff>;
+               dma-names = "rx", "tx";
+
+               status = "disabled";
+       };
+
+       ssi1: spi@10044000 {
+               compatible = "ingenic,x1830-spi", "ingenic,x1000-spi";
+               reg = <0x10044000 0x20>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <8>;
+
+               clocks = <&cgu X1830_CLK_SSI1>;
+               clock-names = "spi";
+
+               dmas = <&pdma X1830_DMA_SSI1_RX 0xffffffff>,
+                          <&pdma X1830_DMA_SSI1_TX 0xffffffff>;
+               dma-names = "rx", "tx";
+
+               status = "disabled";
+       };
+
        i2c0: i2c-controller@10050000 {
                compatible = "ingenic,x1830-i2c", "ingenic,x1000-i2c";
                reg = <0x10050000 0x1000>;
        pdma: dma-controller@13420000 {
                compatible = "ingenic,x1830-dma";
                reg = <0x13420000 0x400>, <0x13421000 0x40>;
+
                #dma-cells = <2>;
 
                interrupt-parent = <&intc>;
        };
 
        otg: usb@13500000 {
-               compatible = "ingenic,x1830-otg", "snps,dwc2";
+               compatible = "ingenic,x1830-otg";
                reg = <0x13500000 0x40000>;
 
                interrupt-parent = <&intc>;
index d80cd68..0ea7bc5 100644 (file)
                pins = "GPIO_49";
                function = "si";
        };
-       i2cmux_pins_i: i2cmux-pins-i {
+       i2cmux_pins_i: i2cmux-pins {
                pins = "GPIO_17", "GPIO_18", "GPIO_20", "GPIO_21";
                function = "twi_scl_m";
                output-low;
        };
-       i2cmux_0: i2cmux-0 {
+       i2cmux_0: i2cmux-0-pins {
                pins = "GPIO_17";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_1: i2cmux-1 {
+       i2cmux_1: i2cmux-1-pins {
                pins = "GPIO_18";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_2: i2cmux-2 {
+       i2cmux_2: i2cmux-2-pins {
                pins = "GPIO_20";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_3: i2cmux-3 {
+       i2cmux_3: i2cmux-3-pins {
                pins = "GPIO_21";
                function = "twi_scl_m";
                output-high;
index 813c5e1..05d8c6a 100644 (file)
 };
 
 &gpio {
-       i2cmux_pins_i: i2cmux-pins-i {
+       i2cmux_pins_i: i2cmux-pins {
                pins = "GPIO_17", "GPIO_18";
                function = "twi_scl_m";
                output-low;
        };
-       i2cmux_0: i2cmux-0 {
+       i2cmux_0: i2cmux-0-pins {
                pins = "GPIO_17";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_1: i2cmux-1 {
+       i2cmux_1: i2cmux-1-pins {
                pins = "GPIO_18";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_2: i2cmux-2 {
+       i2cmux_2: i2cmux-2-pins {
                pins = "GPIO_20";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_3: i2cmux-3 {
+       i2cmux_3: i2cmux-3-pins {
                pins = "GPIO_21";
                function = "twi_scl_m";
                output-high;
index 27c644f..cf2cf59 100644 (file)
 };
 
 &gpio {
-       i2cmux_pins_i: i2cmux-pins-i {
+       i2cmux_pins_i: i2cmux-pins {
                pins = "GPIO_17", "GPIO_16";
                function = "twi_scl_m";
                output-low;
        };
-       i2cmux_0: i2cmux-0 {
+       i2cmux_0: i2cmux-0-pins {
                pins = "GPIO_17";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_1: i2cmux-1 {
+       i2cmux_1: i2cmux-1-pins {
                pins = "GPIO_16";
                function = "twi_scl_m";
                output-high;
index e51db65..cfc219a 100644 (file)
                                function = "uart2";
                        };
 
-                       miim1: miim1 {
+                       miim1_pins: miim1-pins {
                                pins = "GPIO_14", "GPIO_15";
                                function = "miim";
                        };
                        reg = <0x10700c0 0x24>;
                        interrupts = <15>;
                        pinctrl-names = "default";
-                       pinctrl-0 = <&miim1>;
+                       pinctrl-0 = <&miim1_pins>;
                        status = "disabled";
                };
 
index bd24069..d348742 100644 (file)
 };
 
 &gpio {
-       phy_int_pins: phy_int_pins {
+       phy_int_pins: phy-int-pins {
                pins = "GPIO_4";
                function = "gpio";
        };
 
-       phy_load_save_pins: phy_load_save_pins {
+       phy_load_save_pins: phy-load-save-pins {
                pins = "GPIO_10";
                function = "ptp2";
        };
@@ -40,7 +40,7 @@
 &mdio1 {
        status = "okay";
        pinctrl-names = "default";
-       pinctrl-0 = <&miim1>, <&phy_int_pins>, <&phy_load_save_pins>;
+       pinctrl-0 = <&miim1_pins>, <&phy_int_pins>, <&phy_load_save_pins>;
 
        phy7: ethernet-phy@0 {
                reg = <0>;
index 5b40483..0893de4 100644 (file)
                pins = "GPIO_7"; /* No "default" scl for i2c0 */
                function = "twi";
        };
-       i2cmux_pins_i: i2cmux-pins-i {
+       i2cmux_pins_i: i2cmux-pins {
                pins = "GPIO_11", "GPIO_12", "GPIO_18", "GPIO_19",
                        "GPIO_20", "GPIO_21";
                function = "twi_scl_m";
                output-low;
        };
-       i2cmux_0: i2cmux-0 {
+       i2cmux_0: i2cmux-0-pins {
                pins = "GPIO_11";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_1: i2cmux-1 {
+       i2cmux_1: i2cmux-1-pins {
                pins = "GPIO_12";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_2: i2cmux-2 {
+       i2cmux_2: i2cmux-2-pins {
                pins = "GPIO_18";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_3: i2cmux-3 {
+       i2cmux_3: i2cmux-3-pins {
                pins = "GPIO_19";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_4: i2cmux-4 {
+       i2cmux_4: i2cmux-4-pins {
                pins = "GPIO_20";
                function = "twi_scl_m";
                output-high;
        };
-       i2cmux_5: i2cmux-5 {
+       i2cmux_5: i2cmux-5-pins {
                pins = "GPIO_21";
                function = "twi_scl_m";
                output-high;
index 6069b33..826e91b 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinmux_spi_spi>, <&pinmux_spi_cs1_cs>;
 
-       m25p80@0 {
+       flash@0 {
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <40000000>;
index 5892bcf..37037e4 100644 (file)
@@ -60,7 +60,7 @@
 &spi0 {
        status = "okay";
 
-       m25p80@0 {
+       flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
        status = "okay";
 };
 
-&pinctrl {
-       pinctrl-names = "default";
-       pinctrl-0 = <&state_default>;
-
-       state_default: state-default {
-               gpio-pinmux {
-                       groups = "rgmii2", "uart3", "wdt";
-                       function = "gpio";
-               };
-       };
+&gmac1 {
+       status = "okay";
+       phy-handle = <&ethphy4>;
 };
 
-&ethernet {
-       pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>;
+&mdio {
+       ethphy4: ethernet-phy@4 {
+               reg = <4>;
+       };
 };
 
 &switch0 {
                        status = "okay";
                        label = "ethblack";
                };
-
-               port@4 {
-                       status = "okay";
-                       label = "ethblue";
-               };
        };
 };
index a7fce8d..a6201a1 100644 (file)
@@ -44,7 +44,7 @@
 &spi0 {
        status = "okay";
 
-       m25p80@0 {
+       flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
        status = "okay";
 };
 
-&pinctrl {
-       pinctrl-names = "default";
-       pinctrl-0 = <&state_default>;
-
-       state_default: state-default {
-               gpio-pinmux {
-                       groups = "wdt";
-                       function = "gpio";
-               };
-       };
+&gmac1 {
+       status = "okay";
+       phy-handle = <&ethphy7>;
 };
 
-&ethernet {
-       gmac1: mac@1 {
-               status = "okay";
-               phy-handle = <&ethphy7>;
-       };
-
-       mdio-bus {
-               ethphy7: ethernet-phy@7 {
-                       reg = <7>;
-                       phy-mode = "rgmii-rxid";
-               };
+&mdio {
+       ethphy7: ethernet-phy@7 {
+               reg = <7>;
+               phy-mode = "rgmii-rxid";
        };
 };
 
index ee2ec78..ee46ace 100644 (file)
                        phy-mode = "rgmii-rxid";
                };
 
-               mdio-bus {
+               mdio: mdio-bus {
                        #address-cells = <1>;
                        #size-cells = <0>;
 
index 1bf53f3..02fc85f 100644 (file)
@@ -351,7 +351,7 @@ static void read_symtabs(FILE *fp)
 
 static void read_relocs(FILE *fp)
 {
-       static unsigned long base = 0;
+       static unsigned long base;
        int i, j;
 
        if (!base) {
index b63ad5d..306cee0 100644 (file)
@@ -318,7 +318,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
                }
 
                /*
-                * Determine if this is an entry that can satisify the
+                * Determine if this is an entry that can satisfy the
                 * request Check to make sure entry is large enough to
                 * satisfy request.
                 */
index fea71a8..a926322 100644 (file)
@@ -156,8 +156,9 @@ int __cvmx_helper_xaui_enable(int interface)
        xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
        xauiCtl.s.lo_pwr = 0;
 
-       /* Issuing a reset here seems to hang some CN68XX chips. */
-       if (!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
+       /* Issuing a reset here seems to hang some CN66XX/CN68XX chips. */
+       if (!OCTEON_IS_MODEL(OCTEON_CN66XX) &&
+           !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
            !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
                xauiCtl.s.reset = 1;
 
index b22f664..6f49fd9 100644 (file)
@@ -61,6 +61,12 @@ int cvmx_helper_get_number_of_interfaces(void)
 {
        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
                return 9;
+       if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+               if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
+                       return 7;
+               else
+                       return 8;
+       }
        if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
                return 4;
        if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
index ae8806e..15faca4 100644 (file)
@@ -377,7 +377,7 @@ cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
                        /*
                         * Check to make sure all static priority
                         * queues are contiguous.  Also catches some
-                        * cases of static priorites not starting at
+                        * cases of static priorities not starting at
                         * queue 0.
                         */
                        if (static_priority_end != -1
index 07d7ff5..6cdcbf4 100644 (file)
@@ -1405,7 +1405,7 @@ static void octeon_irq_init_ciu2_percpu(void)
         * completed.
         *
         * There are 9 registers and 3 IPX levels with strides 0x1000
-        * and 0x200 respectivly.  Use loops to clear them.
+        * and 0x200 respectively.  Use loops to clear them.
         */
        for (regx = 0; regx <= 0x8000; regx += 0x1000) {
                for (ipx = 0; ipx <= 0x400; ipx += 0x200)
index 4df919d..5cffe1e 100644 (file)
@@ -419,7 +419,7 @@ static int dwc3_octeon_clocks_start(struct device *dev, u64 base)
        /* Step 5c: Enable SuperSpeed. */
        uctl_ctl.s.ref_ssp_en = 1;
 
-       /* Step 5d: Cofngiure PHYs. SKIP */
+       /* Step 5d: Configure PHYs. SKIP */
 
        /* Step 6a & 6b: Power up PHYs. */
        uctl_ctl.s.hs_power_en = 1;
index 9d75f5b..5bd55eb 100644 (file)
@@ -61,7 +61,7 @@ CONFIG_SERIAL_SC16IS7XX_SPI=y
 CONFIG_I2C=y
 CONFIG_I2C_JZ4780=y
 CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
+CONFIG_SPI_INGENIC=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_SENSORS_ADS7828=m
 CONFIG_WATCHDOG=y
index 29decd0..cc69688 100644 (file)
@@ -64,7 +64,7 @@ CONFIG_SERIAL_SC16IS7XX_SPI=y
 CONFIG_I2C=y
 CONFIG_I2C_JZ4780=y
 CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
+CONFIG_SPI_INGENIC=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_SENSORS_ADS7828=m
 CONFIG_WATCHDOG=y
index 130eb67..971f7b4 100644 (file)
@@ -68,13 +68,13 @@ static struct irq_chip ioasic_dma_irq_type = {
  * I/O ASIC implements two kinds of DMA interrupts, informational and
  * error interrupts.
  *
- * The formers do not stop DMA and should be cleared as soon as possible
+ * The former do not stop DMA and should be cleared as soon as possible
  * so that if they retrigger before the handler has completed, usually as
  * a side effect of actions taken by the handler, then they are reissued.
  * These use the `handle_edge_irq' handler that clears the request right
  * away.
  *
- * The latters stop DMA and do not resume it until the interrupt has been
+ * The latter stop DMA and do not resume it until the interrupt has been
  * cleared.  This cannot be done until after a corrective action has been
  * taken and this also means they will not retrigger.  Therefore they use
  * the `handle_fasteoi_irq' handler that only clears the request on the
index 82b00e4..6c3704f 100644 (file)
@@ -71,7 +71,7 @@ volatile u32 *ioasic_base;
 EXPORT_SYMBOL(ioasic_base);
 
 /*
- * IRQ routing and priority tables.  Priorites are set as follows:
+ * IRQ routing and priority tables.  Priorities are set as follows:
  *
  *             KN01    KN230   KN02    KN02-BA KN02-CA KN03
  *
index ef5fc1c..6618873 100644 (file)
@@ -32,7 +32,7 @@ static phys_addr_t prom_mem_size[MAX_PROM_MEM] __initdata;
 static unsigned int nr_prom_mem __initdata;
 
 /*
- * For ARC firmware memory functions the unit of meassuring memory is always
+ * For ARC firmware memory functions the unit of measuring memory is always
  * a 4k page of memory
  */
 #define ARC_PAGE_SHIFT 12
index 1e6c135..4044eaf 100644 (file)
@@ -128,48 +128,45 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
                                        __u32 len, __u8 proto,
-                                       __wsum sum)
+                                       __wsum isum)
 {
-       unsigned long tmp = (__force unsigned long)sum;
-
-       __asm__(
-       "       .set    push            # csum_tcpudp_nofold\n"
-       "       .set    noat            \n"
-#ifdef CONFIG_32BIT
-       "       addu    %0, %2          \n"
-       "       sltu    $1, %0, %2      \n"
-       "       addu    %0, $1          \n"
-
-       "       addu    %0, %3          \n"
-       "       sltu    $1, %0, %3      \n"
-       "       addu    %0, $1          \n"
-
-       "       addu    %0, %4          \n"
-       "       sltu    $1, %0, %4      \n"
-       "       addu    %0, $1          \n"
-#endif
-#ifdef CONFIG_64BIT
-       "       daddu   %0, %2          \n"
-       "       daddu   %0, %3          \n"
-       "       daddu   %0, %4          \n"
-       "       dsll32  $1, %0, 0       \n"
-       "       daddu   %0, $1          \n"
-       "       sltu    $1, %0, $1      \n"
-       "       dsra32  %0, %0, 0       \n"
-       "       addu    %0, $1          \n"
-#endif
-       "       .set    pop"
-       : "=r" (tmp)
-       : "0" ((__force unsigned long)daddr),
-         "r" ((__force unsigned long)saddr),
-#ifdef __MIPSEL__
-         "r" ((proto + len) << 8),
-#else
-         "r" (proto + len),
-#endif
-         "r" ((__force unsigned long)sum));
-
-       return (__force __wsum)tmp;
+       const unsigned int sh32 = IS_ENABLED(CONFIG_64BIT) ? 32 : 0;
+       unsigned long sum = (__force unsigned long)daddr;
+       unsigned long tmp;
+       __u32 osum;
+
+       tmp = (__force unsigned long)saddr;
+       sum += tmp;
+
+       if (IS_ENABLED(CONFIG_32BIT))
+               sum += sum < tmp;
+
+       /*
+        * We know PROTO + LEN has the sign bit clear, so cast to a signed
+        * type to avoid an extraneous zero-extension where TMP is 64-bit.
+        */
+       tmp = (__s32)(proto + len);
+       tmp <<= IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? 8 : 0;
+       sum += tmp;
+       if (IS_ENABLED(CONFIG_32BIT))
+               sum += sum < tmp;
+
+       tmp = (__force unsigned long)isum;
+       sum += tmp;
+
+       if (IS_ENABLED(CONFIG_32BIT)) {
+               sum += sum < tmp;
+               osum = sum;
+       } else if (IS_ENABLED(CONFIG_64BIT)) {
+               tmp = sum << sh32;
+               sum += tmp;
+               osum = sum < tmp;
+               osum += sum >> sh32;
+       } else {
+               BUILD_BUG();
+       }
+
+       return (__force __wsum)osum;
 }
 #define csum_tcpudp_nofold csum_tcpudp_nofold
 
index bbb3bc5..ec01dc0 100644 (file)
@@ -9,28 +9,28 @@
 #include <asm/page.h>
 #include <asm/ptrace.h>
 
+#define __compat_uid_t __compat_uid_t
 typedef s32            __compat_uid_t;
 typedef s32            __compat_gid_t;
+
 typedef __compat_uid_t __compat_uid32_t;
 typedef __compat_gid_t __compat_gid32_t;
 #define __compat_uid32_t __compat_uid32_t
-#define __compat_gid32_t __compat_gid32_t
+
+#define compat_statfs          compat_statfs
+#define compat_ipc64_perm      compat_ipc64_perm
 
 #define _COMPAT_NSIG           128             /* Don't ask !$@#% ...  */
 #define _COMPAT_NSIG_BPW       32
 typedef u32            compat_sigset_word;
 
+#define COMPAT_RLIM_INFINITY   0x7fffffffUL
+
 #include <asm-generic/compat.h>
 
-#define COMPAT_USER_HZ         100
 #define COMPAT_UTS_MACHINE     "mips\0\0\0"
 
-typedef u32            compat_dev_t;
 typedef u32            compat_nlink_t;
-typedef s32            compat_ipc_pid_t;
-typedef struct {
-       s32     val[2];
-} compat_fsid_t;
 
 struct compat_stat {
        compat_dev_t    st_dev;
@@ -55,27 +55,8 @@ struct compat_stat {
        s32             st_pad4[14];
 };
 
-struct compat_flock {
-       short           l_type;
-       short           l_whence;
-       compat_off_t    l_start;
-       compat_off_t    l_len;
-       s32             l_sysid;
-       compat_pid_t    l_pid;
-       s32             pad[4];
-};
-
-#define F_GETLK64      33
-#define F_SETLK64      34
-#define F_SETLKW64     35
-
-struct compat_flock64 {
-       short           l_type;
-       short           l_whence;
-       compat_loff_t   l_start;
-       compat_loff_t   l_len;
-       compat_pid_t    l_pid;
-};
+#define __ARCH_COMPAT_FLOCK_EXTRA_SYSID                s32 l_sysid;
+#define __ARCH_COMPAT_FLOCK_PAD                        s32 pad[4];
 
 struct compat_statfs {
        int             f_type;
@@ -92,10 +73,6 @@ struct compat_statfs {
        int             f_spare[5];
 };
 
-#define COMPAT_RLIM_INFINITY   0x7fffffffUL
-
-#define COMPAT_OFF_T_MAX       0x7fffffff
-
 struct compat_ipc64_perm {
        compat_key_t key;
        __compat_uid32_t uid;
index de8cb2c..c098313 100644 (file)
 #  define raw_cpu_has_fpu      0
 # endif
 #else
+# if cpu_has_fpu
+#  error "Forcing `cpu_has_fpu' to non-zero is not supported"
+# endif
 # define raw_cpu_has_fpu       cpu_has_fpu
 #endif
 #ifndef cpu_has_32fpr
index c8385c4..568fe09 100644 (file)
@@ -25,7 +25,6 @@
 #define cpu_has_4kex                   1
 #define cpu_has_3k_cache               0
 #define cpu_has_4k_cache               1
-#define cpu_has_fpu                    1
 #define cpu_has_nofpuex                        0
 #define cpu_has_32fpr                  1
 #define cpu_has_counter                        1
index 8ad0c42..ce4e4c6 100644 (file)
@@ -28,7 +28,6 @@
 #define cpu_has_4kex                   1
 #define cpu_has_3k_cache               0
 #define cpu_has_4k_cache               1
-#define cpu_has_fpu                    1
 #define cpu_has_nofpuex                        0
 #define cpu_has_32fpr                  1
 #define cpu_has_counter                        1
index f7af11e..a9f0570 100644 (file)
@@ -6,7 +6,9 @@
 #define PCI_IOSIZE     SZ_64K
 #define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
 
+#ifdef CONFIG_PCI_DRIVERS_GENERIC
 #define pci_remap_iospace pci_remap_iospace
+#endif
 
 #include <asm/mach-generic/spaces.h>
 #endif
index 6c61e0a..c1c0b32 100644 (file)
@@ -253,6 +253,7 @@ enum cvmx_board_types_enum {
        CVMX_BOARD_TYPE_REDWING = 43,
        CVMX_BOARD_TYPE_NIC68_4 = 44,
        CVMX_BOARD_TYPE_NIC10E_66 = 45,
+       CVMX_BOARD_TYPE_SNIC10E = 50,
        CVMX_BOARD_TYPE_MAX,
 
        /*
@@ -369,6 +370,7 @@ static inline const char *cvmx_board_type_to_string(enum
                ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING)
                ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4)
                ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66)
+               ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SNIC10E)
                ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
 
                        /* Customer boards listed here */
index c2196b1..25a5253 100644 (file)
@@ -50,6 +50,8 @@
 # ifdef CONFIG_32BIT
 #  define __ARCH_WANT_STAT64
 #  define __ARCH_WANT_SYS_TIME32
+# else
+#  define __ARCH_WANT_COMPAT_STAT
 # endif
 # ifdef CONFIG_MIPS32_O32
 #  define __ARCH_WANT_SYS_TIME32
index 42e13de..0369a38 100644 (file)
 #define F_SETOWN       24      /*  for sockets. */
 #define F_GETOWN       23      /*  for sockets. */
 
-#ifndef __mips64
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
 #define F_GETLK64      33      /*  using 'struct flock64' */
 #define F_SETLK64      34
 #define F_SETLKW64     35
-#endif
-
-/*
- * The flavours of struct flock.  "struct flock" is the ABI compliant
- * variant.  Finally struct flock64 is the LFS variant of struct flock.         As
- * a historic accident and inconsistence with the ABI definition it doesn't
- * contain all the same fields as struct flock.
- */
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
 
 #if _MIPS_SIM != _MIPS_SIM_ABI64
-
-#include <linux/types.h>
-
-struct flock {
-       short   l_type;
-       short   l_whence;
-       __kernel_off_t  l_start;
-       __kernel_off_t  l_len;
-       long    l_sysid;
-       __kernel_pid_t l_pid;
-       long    pad[4];
-};
-
-#define HAVE_ARCH_STRUCT_FLOCK
-
-#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+#define __ARCH_FLOCK_EXTRA_SYSID       long l_sysid;
+#define __ARCH_FLOCK_PAD               long pad[4];
+#endif
 
 #include <asm-generic/fcntl.h>
 
index 8a8bb78..aaccdc6 100644 (file)
@@ -22,8 +22,8 @@ struct stat {
        __kernel_ino_t  st_ino;
        __kernel_mode_t st_mode;
        __u32           st_nlink;
-       __kernel_uid_t  st_uid;
-       __kernel_gid_t  st_gid;
+       __kernel_uid32_t st_uid;
+       __kernel_gid32_t st_gid;
        unsigned        st_rdev;
        long            st_pad2[2];
        long            st_size;
@@ -58,8 +58,8 @@ struct stat64 {
        __kernel_mode_t st_mode;
        __u32           st_nlink;
 
-       __kernel_uid_t  st_uid;
-       __kernel_gid_t  st_gid;
+       __kernel_uid32_t st_uid;
+       __kernel_gid32_t st_gid;
 
        unsigned long   st_rdev;
        unsigned long   st_pad1[3];     /* Reserved for st_rdev expansion  */
@@ -99,8 +99,8 @@ struct stat {
        __kernel_mode_t         st_mode;
        __u32                   st_nlink;
 
-       __kernel_uid_t          st_uid;
-       __kernel_gid_t          st_gid;
+       __kernel_uid32_t        st_uid;
+       __kernel_gid32_t        st_gid;
 
        unsigned int            st_rdev;
        unsigned int            st_pad1[3]; /* Reserved for st_rdev expansion */
index dfeffba..1eb6090 100644 (file)
 #ifndef _ASM_TERMBITS_H
 #define _ASM_TERMBITS_H
 
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
 
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
+typedef unsigned int   tcflag_t;
 
 /*
  * The ABI says nothing about NCC but seems to use NCCS as
@@ -54,175 +52,126 @@ struct ktermios {
 };
 
 /* c_cc characters */
-#define VINTR           0              /* Interrupt character [ISIG] */
-#define VQUIT           1              /* Quit character [ISIG] */
-#define VERASE          2              /* Erase character [ICANON] */
-#define VKILL           3              /* Kill-line character [ICANON] */
-#define VMIN            4              /* Minimum number of bytes read at once [!ICANON] */
-#define VTIME           5              /* Time-out value (tenths of a second) [!ICANON] */
-#define VEOL2           6              /* Second EOL character [ICANON] */
+#define VINTR           0              /* Interrupt character [ISIG] */
+#define VQUIT           1              /* Quit character [ISIG] */
+#define VERASE          2              /* Erase character [ICANON] */
+#define VKILL           3              /* Kill-line character [ICANON] */
+#define VMIN            4              /* Minimum number of bytes read at once [!ICANON] */
+#define VTIME           5              /* Time-out value (tenths of a second) [!ICANON] */
+#define VEOL2           6              /* Second EOL character [ICANON] */
 #define VSWTC           7              /* ??? */
 #define VSWTCH         VSWTC
-#define VSTART          8              /* Start (X-ON) character [IXON, IXOFF] */
-#define VSTOP           9              /* Stop (X-OFF) character [IXON, IXOFF] */
-#define VSUSP          10              /* Suspend character [ISIG] */
+#define VSTART          8              /* Start (X-ON) character [IXON, IXOFF] */
+#define VSTOP           9              /* Stop (X-OFF) character [IXON, IXOFF] */
+#define VSUSP          10              /* Suspend character [ISIG] */
 #if 0
 /*
  * VDSUSP is not supported
  */
-#define VDSUSP         11              /* Delayed suspend character [ISIG] */
+#define VDSUSP         11              /* Delayed suspend character [ISIG] */
 #endif
-#define VREPRINT       12              /* Reprint-line character [ICANON] */
-#define VDISCARD       13              /* Discard character [IEXTEN] */
-#define VWERASE                14              /* Word-erase character [ICANON] */
-#define VLNEXT         15              /* Literal-next character [IEXTEN] */
-#define VEOF           16              /* End-of-file character [ICANON] */
-#define VEOL           17              /* End-of-line character [ICANON] */
+#define VREPRINT       12              /* Reprint-line character [ICANON] */
+#define VDISCARD       13              /* Discard character [IEXTEN] */
+#define VWERASE                14              /* Word-erase character [ICANON] */
+#define VLNEXT         15              /* Literal-next character [IEXTEN] */
+#define VEOF           16              /* End-of-file character [ICANON] */
+#define VEOL           17              /* End-of-line character [ICANON] */
 
 /* c_iflag bits */
-#define IGNBRK 0000001         /* Ignore break condition.  */
-#define BRKINT 0000002         /* Signal interrupt on break.  */
-#define IGNPAR 0000004         /* Ignore characters with parity errors.  */
-#define PARMRK 0000010         /* Mark parity and framing errors.  */
-#define INPCK  0000020         /* Enable input parity check.  */
-#define ISTRIP 0000040         /* Strip 8th bit off characters.  */
-#define INLCR  0000100         /* Map NL to CR on input.  */
-#define IGNCR  0000200         /* Ignore CR.  */
-#define ICRNL  0000400         /* Map CR to NL on input.  */
-#define IUCLC  0001000         /* Map upper case to lower case on input.  */
-#define IXON   0002000         /* Enable start/stop output control.  */
-#define IXANY  0004000         /* Any character will restart after stop.  */
-#define IXOFF  0010000         /* Enable start/stop input control.  */
-#define IMAXBEL 0020000                /* Ring bell when input queue is full.  */
-#define IUTF8  0040000         /* Input is UTF-8 */
+#define IUCLC  0x0200          /* Map upper case to lower case on input */
+#define IXON   0x0400          /* Enable start/stop output control */
+#define IXOFF  0x1000          /* Enable start/stop input control */
+#define IMAXBEL        0x2000          /* Ring bell when input queue is full */
+#define IUTF8  0x4000          /* Input is UTF-8 */
 
 /* c_oflag bits */
-#define OPOST  0000001         /* Perform output processing.  */
-#define OLCUC  0000002         /* Map lower case to upper case on output.  */
-#define ONLCR  0000004         /* Map NL to CR-NL on output.  */
-#define OCRNL  0000010
-#define ONOCR  0000020
-#define ONLRET 0000040
-#define OFILL  0000100
-#define OFDEL  0000200
-#define NLDLY  0000400
-#define          NL0   0000000
-#define          NL1   0000400
-#define CRDLY  0003000
-#define          CR0   0000000
-#define          CR1   0001000
-#define          CR2   0002000
-#define          CR3   0003000
-#define TABDLY 0014000
-#define          TAB0  0000000
-#define          TAB1  0004000
-#define          TAB2  0010000
-#define          TAB3  0014000
-#define          XTABS 0014000
-#define BSDLY  0020000
-#define          BS0   0000000
-#define          BS1   0020000
-#define VTDLY  0040000
-#define          VT0   0000000
-#define          VT1   0040000
-#define FFDLY  0100000
-#define          FF0   0000000
-#define          FF1   0100000
+#define OLCUC  0x00002         /* Map lower case to upper case on output */
+#define ONLCR  0x00004         /* Map NL to CR-NL on output */
+#define NLDLY  0x00100
+#define   NL0  0x00000
+#define   NL1  0x00100
+#define CRDLY  0x00600
+#define   CR0  0x00000
+#define   CR1  0x00200
+#define   CR2  0x00400
+#define   CR3  0x00600
+#define TABDLY 0x01800
+#define   TAB0 0x00000
+#define   TAB1 0x00800
+#define   TAB2 0x01000
+#define   TAB3 0x01800
+#define   XTABS        0x01800
+#define BSDLY  0x02000
+#define   BS0  0x00000
+#define   BS1  0x02000
+#define VTDLY  0x04000
+#define   VT0  0x00000
+#define   VT1  0x04000
+#define FFDLY  0x08000
+#define   FF0  0x00000
+#define   FF1  0x08000
 /*
 #define PAGEOUT ???
 #define WRAP   ???
  */
 
 /* c_cflag bit meaning */
-#define CBAUD  0010017
-#define         B0     0000000         /* hang up */
-#define         B50    0000001
-#define         B75    0000002
-#define         B110   0000003
-#define         B134   0000004
-#define         B150   0000005
-#define         B200   0000006
-#define         B300   0000007
-#define         B600   0000010
-#define         B1200  0000011
-#define         B1800  0000012
-#define         B2400  0000013
-#define         B4800  0000014
-#define         B9600  0000015
-#define         B19200 0000016
-#define         B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE  0000060         /* Number of bits per byte (mask).  */
-#define          CS5   0000000         /* 5 bits per byte.  */
-#define          CS6   0000020         /* 6 bits per byte.  */
-#define          CS7   0000040         /* 7 bits per byte.  */
-#define          CS8   0000060         /* 8 bits per byte.  */
-#define CSTOPB 0000100         /* Two stop bits instead of one.  */
-#define CREAD  0000200         /* Enable receiver.  */
-#define PARENB 0000400         /* Parity enable.  */
-#define PARODD 0001000         /* Odd parity instead of even.  */
-#define HUPCL  0002000         /* Hang up on last close.  */
-#define CLOCAL 0004000         /* Ignore modem status lines.  */
-#define CBAUDEX 0010000
-#define           BOTHER 0010000
-#define           B57600 0010001
-#define          B115200 0010002
-#define          B230400 0010003
-#define          B460800 0010004
-#define          B500000 0010005
-#define          B576000 0010006
-#define          B921600 0010007
-#define         B1000000 0010010
-#define         B1152000 0010011
-#define         B1500000 0010012
-#define         B2000000 0010013
-#define         B2500000 0010014
-#define         B3000000 0010015
-#define         B3500000 0010016
-#define         B4000000 0010017
-#define CIBAUD   002003600000  /* input baud rate */
-#define CMSPAR   010000000000  /* mark or space (stick) parity */
-#define CRTSCTS          020000000000  /* flow control */
-
-#define IBSHIFT 16             /* Shift from CBAUD to CIBAUD */
+#define CBAUD          0x0000100f
+#define CSIZE          0x00000030      /* Number of bits per byte (mask) */
+#define   CS5          0x00000000      /* 5 bits per byte */
+#define   CS6          0x00000010      /* 6 bits per byte */
+#define   CS7          0x00000020      /* 7 bits per byte */
+#define   CS8          0x00000030      /* 8 bits per byte */
+#define CSTOPB         0x00000040      /* Two stop bits instead of one */
+#define CREAD          0x00000080      /* Enable receiver */
+#define PARENB         0x00000100      /* Parity enable */
+#define PARODD         0x00000200      /* Odd parity instead of even */
+#define HUPCL          0x00000400      /* Hang up on last close */
+#define CLOCAL         0x00000800      /* Ignore modem status lines */
+#define CBAUDEX                0x00001000
+#define BOTHER         0x00001000
+#define     B57600     0x00001001
+#define    B115200     0x00001002
+#define    B230400     0x00001003
+#define    B460800     0x00001004
+#define    B500000     0x00001005
+#define    B576000     0x00001006
+#define    B921600     0x00001007
+#define   B1000000     0x00001008
+#define   B1152000     0x00001009
+#define   B1500000     0x0000100a
+#define   B2000000     0x0000100b
+#define   B2500000     0x0000100c
+#define   B3000000     0x0000100d
+#define   B3500000     0x0000100e
+#define   B4000000     0x0000100f
+#define CIBAUD         0x100f0000      /* input baud rate */
 
 /* c_lflag bits */
-#define ISIG   0000001         /* Enable signals.  */
-#define ICANON 0000002         /* Do erase and kill processing.  */
-#define XCASE  0000004
-#define ECHO   0000010         /* Enable echo.  */
-#define ECHOE  0000020         /* Visual erase for ERASE.  */
-#define ECHOK  0000040         /* Echo NL after KILL.  */
-#define ECHONL 0000100         /* Echo NL even if ECHO is off.  */
-#define NOFLSH 0000200         /* Disable flush after interrupt.  */
-#define IEXTEN 0000400         /* Enable DISCARD and LNEXT.  */
-#define ECHOCTL 0001000                /* Echo control characters as ^X.  */
-#define ECHOPRT 0002000                /* Hardcopy visual erase.  */
-#define ECHOKE 0004000         /* Visual erase for KILL.  */
-#define FLUSHO 0020000
-#define PENDIN 0040000         /* Retype pending input (state).  */
-#define TOSTOP 0100000         /* Send SIGTTOU for background output.  */
-#define ITOSTOP TOSTOP
-#define EXTPROC 0200000                /* External processing on pty */
+#define ISIG   0x00001         /* Enable signals */
+#define ICANON 0x00002         /* Do erase and kill processing */
+#define XCASE  0x00004
+#define ECHO   0x00008         /* Enable echo */
+#define ECHOE  0x00010         /* Visual erase for ERASE */
+#define ECHOK  0x00020         /* Echo NL after KILL */
+#define ECHONL 0x00040         /* Echo NL even if ECHO is off */
+#define NOFLSH 0x00080         /* Disable flush after interrupt */
+#define IEXTEN 0x00100         /* Enable DISCARD and LNEXT */
+#define ECHOCTL        0x00200         /* Echo control characters as ^X */
+#define ECHOPRT        0x00400         /* Hardcopy visual erase */
+#define ECHOKE 0x00800         /* Visual erase for KILL */
+#define FLUSHO 0x02000
+#define PENDIN 0x04000         /* Retype pending input (state) */
+#define TOSTOP 0x08000         /* Send SIGTTOU for background output */
+#define ITOSTOP        TOSTOP
+#define EXTPROC        0x10000         /* External processing on pty */
 
 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
 #define TIOCSER_TEMT   0x01    /* Transmitter physically empty */
 
-/* tcflow() and TCXONC use these */
-#define TCOOFF         0       /* Suspend output.  */
-#define TCOON          1       /* Restart suspended output.  */
-#define TCIOFF         2       /* Send a STOP character.  */
-#define TCION          3       /* Send a START character.  */
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH       0       /* Discard data received but not yet read.  */
-#define TCOFLUSH       1       /* Discard data written but not yet sent.  */
-#define TCIOFLUSH      2       /* Discard all pending data.  */
-
 /* tcsetattr uses these */
-#define TCSANOW                TCSETS  /* Change immediately */
-#define TCSADRAIN      TCSETSW /* Change when pending output is written */
-#define TCSAFLUSH      TCSETSF /* Flush pending input before changing */
+#define TCSANOW                TCSETS  /* Change immediately */
+#define TCSADRAIN      TCSETSW /* Change when pending output is written */
+#define TCSAFLUSH      TCSETSF /* Flush pending input before changing */
 
 #endif /* _ASM_TERMBITS_H */
index 495ba7c..264d453 100644 (file)
@@ -141,7 +141,7 @@ void __init plat_time_init(void)
        /*
         * Set clock to 100Hz.
         *
-        * The R4030 timer receives an input clock of 1kHz which is divieded by
+        * The R4030 timer receives an input clock of 1kHz which is divided by
         * a programmable 4-bit divider.  This makes it fairly inflexible.
         */
        r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9);
index ac9c8cf..e974a49 100644 (file)
@@ -22,7 +22,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
 
        /*
         * Calculate a shift & mask that correspond to the value we wish to
-        * exchange within the naturally aligned 4 byte integerthat includes
+        * exchange within the naturally aligned 4 byte integer that includes
         * it.
         */
        shift = (unsigned long)ptr & 0x3;
index f0ea929..d510f62 100644 (file)
@@ -156,7 +156,7 @@ static inline void check_errata(void)
                /*
                 * Erratum "RPS May Cause Incorrect Instruction Execution"
                 * This code only handles VPE0, any SMP/RTOS code
-                * making use of VPE1 will be responsable for that VPE.
+                * making use of VPE1 will be responsible for that VPE.
                 */
                if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
                        write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
index 146d9fa..53adcc1 100644 (file)
@@ -228,7 +228,7 @@ void __init check_wait(void)
                        break;
 
                /*
-                * Another rev is incremeting c0_count at a reduced clock
+                * Another rev is incrementing c0_count at a reduced clock
                 * rate while in WAIT mode.  So we basically have the choice
                 * between using the cp0 timer as clocksource or avoiding
                 * the WAIT instruction.  Until more details are known,
index 6c7f3b1..316b27d 100644 (file)
@@ -44,10 +44,11 @@ static const union mips_instruction breakpoint2_insn = {
 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
-static int __kprobes insn_has_delayslot(union mips_instruction insn)
+static int insn_has_delayslot(union mips_instruction insn)
 {
        return __insn_has_delay_slot(insn);
 }
+NOKPROBE_SYMBOL(insn_has_delayslot);
 
 /*
  * insn_has_ll_or_sc function checks whether instruction is ll or sc
@@ -56,7 +57,7 @@ static int __kprobes insn_has_delayslot(union mips_instruction insn)
  * instructions; cannot do much about breakpoint in the middle of
  * ll/sc pair; it is upto user to avoid those places
  */
-static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
+static int insn_has_ll_or_sc(union mips_instruction insn)
 {
        int ret = 0;
 
@@ -72,8 +73,9 @@ static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
        }
        return ret;
 }
+NOKPROBE_SYMBOL(insn_has_ll_or_sc);
 
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+int arch_prepare_kprobe(struct kprobe *p)
 {
        union mips_instruction insn;
        union mips_instruction prev_insn;
@@ -132,26 +134,30 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 out:
        return ret;
 }
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
 
-void __kprobes arch_arm_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
 {
        *p->addr = breakpoint_insn;
        flush_insn_slot(p);
 }
+NOKPROBE_SYMBOL(arch_arm_kprobe);
 
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
+void arch_disarm_kprobe(struct kprobe *p)
 {
        *p->addr = p->opcode;
        flush_insn_slot(p);
 }
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
 
-void __kprobes arch_remove_kprobe(struct kprobe *p)
+void arch_remove_kprobe(struct kprobe *p)
 {
        if (p->ainsn.insn) {
                free_insn_slot(p->ainsn.insn, 0);
                p->ainsn.insn = NULL;
        }
 }
+NOKPROBE_SYMBOL(arch_remove_kprobe);
 
 static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
@@ -257,7 +263,7 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
  * breakpoint trap. In case of branch instructions, the target
  * epc to be restored.
  */
-static void __kprobes resume_execution(struct kprobe *p,
+static void resume_execution(struct kprobe *p,
                                       struct pt_regs *regs,
                                       struct kprobe_ctlblk *kcb)
 {
@@ -268,8 +274,9 @@ static void __kprobes resume_execution(struct kprobe *p,
                regs->cp0_epc = orig_epc + 4;
        }
 }
+NOKPROBE_SYMBOL(resume_execution);
 
-static int __kprobes kprobe_handler(struct pt_regs *regs)
+static int kprobe_handler(struct pt_regs *regs)
 {
        struct kprobe *p;
        int ret = 0;
@@ -367,6 +374,7 @@ no_kprobe:
        return ret;
 
 }
+NOKPROBE_SYMBOL(kprobe_handler);
 
 static inline int post_kprobe_handler(struct pt_regs *regs)
 {
@@ -415,7 +423,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 /*
  * Wrapper routine for handling exceptions.
  */
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+int kprobe_exceptions_notify(struct notifier_block *self,
                                       unsigned long val, void *data)
 {
 
@@ -446,6 +454,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        }
        return ret;
 }
+NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 
 /*
  * Function return probe trampoline:
@@ -469,7 +478,7 @@ static void __used kretprobe_trampoline_holder(void)
 
 void __kretprobe_trampoline(void);
 
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+void arch_prepare_kretprobe(struct kretprobe_instance *ri,
                                      struct pt_regs *regs)
 {
        ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
@@ -478,11 +487,12 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
        /* Replace the return addr with trampoline addr */
        regs->regs[31] = (unsigned long)__kretprobe_trampoline;
 }
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
 
 /*
  * Called when the probe at kretprobe trampoline is hit
  */
-static int __kprobes trampoline_probe_handler(struct kprobe *p,
+static int trampoline_probe_handler(struct kprobe *p,
                                                struct pt_regs *regs)
 {
        instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, NULL);
@@ -493,14 +503,16 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
         */
        return 1;
 }
+NOKPROBE_SYMBOL(trampoline_probe_handler);
 
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+int arch_trampoline_kprobe(struct kprobe *p)
 {
        if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline)
                return 1;
 
        return 0;
 }
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
 
 static struct kprobe trampoline_p = {
        .addr = (kprobe_opcode_t *)__kretprobe_trampoline,
index 17aff13..3e386f7 100644 (file)
@@ -28,6 +28,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void)
        cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
        if (cpc_node) {
                err = of_address_to_resource(cpc_node, 0, &res);
+               of_node_put(cpc_node);
                if (!err)
                        return res.start;
        }
index 1641d27..c4d6b09 100644 (file)
@@ -329,7 +329,7 @@ static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
        for (i = mipspmu.num_counters - 1; i >= 0; i--) {
                /*
                 * Note that some MIPS perf events can be counted by both
-                * even and odd counters, wheresas many other are only by
+                * even and odd counters, whereas many other are only by
                 * even _or_ odd counters. This introduces an issue that
                 * when the former kind of event takes the counter the
                 * latter kind of event wants to use, then the "counter
index c2d5f4b..35b912b 100644 (file)
@@ -105,10 +105,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 /*
  * Copy architecture-specific thread state
  */
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-               unsigned long kthread_arg, struct task_struct *p,
-               unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs, *regs = current_pt_regs();
        unsigned long childksp;
@@ -120,12 +121,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        /*  Put the stack after the struct pt_regs.  */
        childksp = (unsigned long) childregs;
        p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* kernel thread */
                unsigned long status = p->thread.cp0_status;
                memset(childregs, 0, sizeof(struct pt_regs));
-               p->thread.reg16 = usp; /* fn */
-               p->thread.reg17 = kthread_arg;
+               p->thread.reg16 = (unsigned long)args->fn;
+               p->thread.reg17 = (unsigned long)args->fn_arg;
                p->thread.reg29 = childksp;
                p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
 #if defined(CONFIG_CPU_R3000)
index 6288780..e7ce07b 100644 (file)
@@ -114,8 +114,7 @@ void machine_halt(void)
 
 void machine_power_off(void)
 {
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
 
 #ifdef CONFIG_SMP
        preempt_disable();
index ef73ba1..2ca156a 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/cdmm.h>
 #include <asm/cpu.h>
 #include <asm/debug.h>
+#include <asm/mmzone.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp-ops.h>
@@ -344,6 +345,11 @@ static int __init early_parse_mem(char *p)
 {
        phys_addr_t start, size;
 
+       if (!p) {
+               pr_err("mem parameter is empty, do nothing\n");
+               return -EINVAL;
+       }
+
        /*
         * If a user specifies memory size, we
         * blow away any automatically generated
@@ -359,7 +365,10 @@ static int __init early_parse_mem(char *p)
        if (*p == '@')
                start = memparse(p + 1, &p);
 
-       memblock_add(start, size);
+       if (IS_ENABLED(CONFIG_NUMA))
+               memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
+       else
+               memblock_add(start, size);
 
        return 0;
 }
@@ -554,7 +563,7 @@ static void __init bootcmdline_init(void)
         * unmodified.
         */
        if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
-               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+               strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
                return;
        }
 
@@ -566,7 +575,7 @@ static void __init bootcmdline_init(void)
         * boot_command_line to undo anything early_init_dt_scan_chosen() did.
         */
        if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
-               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+               strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
        else
                boot_command_line[0] = 0;
 
@@ -628,7 +637,7 @@ static void __init arch_mem_init(char **cmdline_p)
        memblock_set_bottom_up(true);
 
        bootcmdline_init();
-       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+       strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
        *cmdline_p = command_line;
 
        parse_early_param();
index 1986d13..1d93b85 100644 (file)
@@ -518,6 +518,12 @@ static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
+       if (!mm)
+               return;
+
+       if (atomic_read(&mm->mm_users) == 0)
+               return;         /* happens as a result of exit_mmap() */
+
        preempt_disable();
 
        if (cpu_has_mmid) {
index a3b50d5..4e91971 100644 (file)
@@ -153,7 +153,7 @@ EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
  * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
  * @vcpu:      KVM VCPU pointer.
  * @gpa:       Guest virtual address in a TLB mapped guest segment.
- * @gpa:       Ponter to output guest physical address it maps to.
+ * @gpa:       Pointer to output guest physical address it maps to.
  *
  * Converts a guest virtual address in a guest TLB mapped segment to a guest
  * physical address, by probing the guest TLB.
index e27879b..2ef9da0 100644 (file)
@@ -46,7 +46,7 @@ menuconfig CEVT_CSRC_LS1X
          If unsure, say N.
 
 choice
-       prompt  "Select clockevent/clocksource"
+       prompt "Select clockevent/clocksource"
        depends on CEVT_CSRC_LS1X
        default TIMER_USE_PWM0
 
index 44f9810..b08bc55 100644 (file)
@@ -35,7 +35,7 @@ int show_unhandled_signals = 1;
  * and the problem, and then passes it off to one of the appropriate
  * routines.
  */
-static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+static void __do_page_fault(struct pt_regs *regs, unsigned long write,
        unsigned long address)
 {
        struct vm_area_struct * vma = NULL;
@@ -322,8 +322,9 @@ vmalloc_fault:
        }
 #endif
 }
+NOKPROBE_SYMBOL(__do_page_fault);
 
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+asmlinkage void do_page_fault(struct pt_regs *regs,
        unsigned long write, unsigned long address)
 {
        enum ctx_state prev_state;
@@ -332,3 +333,4 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        __do_page_fault(regs, write, address);
        exception_exit(prev_state);
 }
+NOKPROBE_SYMBOL(do_page_fault);
index 044b11b..83c975d 100644 (file)
@@ -722,7 +722,7 @@ static void emit_atomic_r32(struct jit_context *ctx,
                  0, JIT_RESERVED_STACK);
        /*
         * Argument 1: dst+off if xchg, otherwise src, passed in register a0
-        * Argument 2: src if xchg, othersize dst+off, passed in register a1
+        * Argument 2: src if xchg, otherwise dst+off, passed in register a1
         */
        emit(ctx, move, MIPS_R_T9, dst);
        if (code == BPF_XCHG) {
index d919a0d..c9edd3f 100644 (file)
@@ -895,7 +895,7 @@ retry:
        mem_access_subid.s.nsw = 0;     /* Enable Snoop for Writes. */
        mem_access_subid.s.ror = 0;     /* Disable Relaxed Ordering for Reads. */
        mem_access_subid.s.row = 0;     /* Disable Relaxed Ordering for Writes. */
-       mem_access_subid.s.ba = 0;      /* PCIe Adddress Bits <63:34>. */
+       mem_access_subid.s.ba = 0;      /* PCIe Address Bits <63:34>. */
 
        /*
         * Setup mem access 12-15 for port 0, 16-19 for port 1,
@@ -1345,7 +1345,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
        mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
        mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
        mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
-       /* PCIe Adddress Bits <63:34>. */
+       /* PCIe Address Bits <63:34>. */
        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
                mem_access_subid.cn68xx.ba = 0;
        else
index 36afe1b..f695320 100644 (file)
@@ -111,7 +111,7 @@ void __init pic32_config_init(void)
        pic32_reset_status = readl(pic32_conf_base + PIC32_RCON);
        writel(-1, PIC32_CLR(pic32_conf_base + PIC32_RCON));
 
-       /* Device Inforation */
+       /* Device Information */
        pr_info("Device Id: 0x%08x, Device Ver: 0x%04x\n",
                pic32_get_device_id(),
                pic32_get_device_version());
index 9028dbb..8f0861c 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/sched/signal.h>
-#include <linux/notifier.h>
 #include <linux/panic_notifier.h>
 #include <linux/pm.h>
 #include <linux/timer.h>
@@ -41,7 +40,7 @@
 static struct timer_list power_timer, blink_timer, debounce_timer;
 static unsigned long blink_timer_timeout;
 
-#define MACHINE_PANICED                1
+#define MACHINE_PANICKED               1
 #define MACHINE_SHUTTING_DOWN  2
 
 static int machine_state;
@@ -112,7 +111,7 @@ static void debounce(struct timer_list *unused)
                return;
        }
 
-       if (machine_state & MACHINE_PANICED)
+       if (machine_state & MACHINE_PANICKED)
                sgimc->cpuctrl0 |= SGIMC_CCTRL0_SYSINIT;
 
        enable_irq(SGI_PANEL_IRQ);
@@ -120,7 +119,7 @@ static void debounce(struct timer_list *unused)
 
 static inline void power_button(void)
 {
-       if (machine_state & MACHINE_PANICED)
+       if (machine_state & MACHINE_PANICKED)
                return;
 
        if ((machine_state & MACHINE_SHUTTING_DOWN) ||
@@ -167,9 +166,9 @@ static irqreturn_t panel_int(int irq, void *dev_id)
 static int panic_event(struct notifier_block *this, unsigned long event,
                      void *ptr)
 {
-       if (machine_state & MACHINE_PANICED)
+       if (machine_state & MACHINE_PANICKED)
                return NOTIFY_DONE;
-       machine_state |= MACHINE_PANICED;
+       machine_state |= MACHINE_PANICKED;
 
        blink_timer_timeout = PANIC_FREQ;
        blink_timeout(&blink_timer);
index 000ede1..e762886 100644 (file)
@@ -53,6 +53,8 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
        }
        platform_device_add_resources(pdev, &w1_res, 1);
        platform_device_add_data(pdev, wd, sizeof(*wd));
+       /* platform_device_add_data() duplicates the data */
+       kfree(wd);
        platform_device_add(pdev);
 
        bd = kzalloc(sizeof(*bd), GFP_KERNEL);
@@ -83,6 +85,8 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
        bd->io_offset   = offset;
 
        platform_device_add_data(pdev, bd, sizeof(*bd));
+       /* platform_device_add_data() duplicates the data */
+       kfree(bd);
        platform_device_add(pdev);
        pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
        return;
index 8a28946..8129524 100644 (file)
@@ -63,6 +63,8 @@ static void bridge_platform_create(int widget, int masterwid)
        }
        platform_device_add_resources(pdev, &w1_res, 1);
        platform_device_add_data(pdev, wd, sizeof(*wd));
+       /* platform_device_add_data() duplicates the data */
+       kfree(wd);
        platform_device_add(pdev);
 
        bd = kzalloc(sizeof(*bd), GFP_KERNEL);
@@ -92,6 +94,8 @@ static void bridge_platform_create(int widget, int masterwid)
        bd->io_offset   = IP30_SWIN_BASE(widget);
 
        platform_device_add_data(pdev, bd, sizeof(*bd));
+       /* platform_device_add_data() duplicates the data */
+       kfree(bd);
        platform_device_add(pdev);
        pr_info("xtalk:%x bridge widget\n", widget);
        return;
index 6f34b87..e3e8070 100644 (file)
@@ -34,8 +34,6 @@ static char *pass_str;
 
 static int __init setup_bcm1x80_bcm1x55(void)
 {
-       int ret = 0;
-
        switch (soc_pass) {
        case K_SYS_REVISION_BCM1480_S0:
                periph_rev = 1;
@@ -64,7 +62,7 @@ static int __init setup_bcm1x80_bcm1x55(void)
                break;
        }
 
-       return ret;
+       return 0;
 }
 
 /* Setup code likely to be common to all SiByte platforms */
index bdbc7b4..5f68a4f 100644 (file)
@@ -217,7 +217,7 @@ static int check_code(uint64_t pc, uint32_t *code, size_t sz)
 )
 
        /*
-        * Skip the first instructionm allowing check_ll to look backwards
+        * Skip the first instruction, allowing check_ll to look backwards
         * unconditionally.
         */
        advance();
index fb99872..e988455 100644 (file)
@@ -225,7 +225,7 @@ txx9_alloc_pci_controller(struct pci_controller *pcic,
 static int __init
 txx9_arch_pci_init(void)
 {
-       PCIBIOS_MIN_IO = 0x8000;        /* reseve legacy I/O space */
+       PCIBIOS_MIN_IO = 0x8000;        /* reserve legacy I/O space */
        return 0;
 }
 arch_initcall(txx9_arch_pci_init);
index b59ee54..e4cbe11 100644 (file)
@@ -236,8 +236,6 @@ static int __init vr41xx_cmu_init(void)
        if (current_cpu_type() == CPU_VR4133)
                cmuclkmsk2 = cmu_read(CMUCLKMSK2);
 
-       spin_lock_init(&cmu_lock);
-
        return 0;
 }
 
index f8ea522..29593b9 100644 (file)
@@ -100,21 +100,23 @@ void flush_thread(void)
 {
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *childregs = task_pt_regs(p);
        struct pt_regs *regs;
        struct switch_stack *stack;
        struct switch_stack *childstack =
                ((struct switch_stack *)childregs) - 1;
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                memset(childstack, 0,
                        sizeof(struct switch_stack) + sizeof(struct pt_regs));
 
-               childstack->r16 = usp;          /* fn */
-               childstack->r17 = arg;
+               childstack->r16 = (unsigned long) args->fn;
+               childstack->r17 = (unsigned long) args->fn_arg;
                childstack->ra = (unsigned long) ret_from_kernel_thread;
                childregs->estatus = STATUS_PIE;
                childregs->sp = (unsigned long) childstack;
index 1d4c092..52dc983 100644 (file)
@@ -167,9 +167,11 @@ extern asmlinkage void ret_from_fork(void);
  */
 
 int
-copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
-           struct task_struct *p, unsigned long tls)
+copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *userregs;
        struct pt_regs *kregs;
        unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
@@ -187,10 +189,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
        sp -= sizeof(struct pt_regs);
        kregs = (struct pt_regs *)sp;
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                memset(kregs, 0, sizeof(struct pt_regs));
-               kregs->gpr[20] = usp; /* fn, kernel thread */
-               kregs->gpr[22] = arg;
+               kregs->gpr[20] = (unsigned long)args->fn;
+               kregs->gpr[22] = (unsigned long)args->fn_arg;
        } else {
                *userregs = *current_pt_regs();
 
index bd22578..5f2448d 100644 (file)
@@ -332,10 +332,6 @@ config COMPAT
        def_bool y
        depends on 64BIT
 
-config SYSVIPC_COMPAT
-       def_bool y
-       depends on COMPAT && SYSVIPC
-
 config AUDIT_ARCH
        def_bool y
 
index aca1710..e38d993 100644 (file)
@@ -18,7 +18,6 @@
 boot := arch/parisc/boot
 KBUILD_IMAGE := $(boot)/bzImage
 
-NM             = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
 
 ifdef CONFIG_64BIT
index ea0cb31..0f0d4a4 100644 (file)
        depd,z  \r, 63-(\sa), 64-(\sa), \t
        .endm
 
-       /* Shift Right - note the r and t can NOT be the same! */
+       /* Shift Right for 32-bit. Clobbers upper 32-bit on PA2.0. */
        .macro shr r, sa, t
        extru \r, 31-(\sa), 32-(\sa), \t
        .endm
 #endif
        .endm
 
+       /* The depw instruction leaves the most significant 32 bits of the
+        * target register in an undefined state on PA 2.0 systems. */
+       .macro dep_safe i, p, len, t
+#ifdef CONFIG_64BIT
+       depd    \i, 32+(\p), \len, \t
+#else
+       depw    \i, \p, \len, \t
+#endif
+       .endm
+
        /* load 32-bit 'value' into 'reg' compensating for the ldil
         * sign-extension when running in wide mode.
         * WARNING!! neither 'value' nor 'reg' can be expressions
index 5032e75..e23d06b 100644 (file)
@@ -54,6 +54,7 @@ void parisc_setup_cache_timing(void);
 #define asm_io_sync()  asm volatile("sync" \
                        ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
                        ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
+#define asm_syncdma()  asm volatile("syncdma" :::"memory")
 
 #endif /* ! __ASSEMBLY__ */
 
index c04f5a6..339d1b8 100644 (file)
 #define compat_mode_t compat_mode_t
 typedef u16    compat_mode_t;
 
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16    compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
 #include <asm-generic/compat.h>
 
-#define COMPAT_USER_HZ                 100
 #define COMPAT_UTS_MACHINE     "parisc\0\0"
 
-typedef u32    __compat_uid_t;
-typedef u32    __compat_gid_t;
-typedef u32    compat_dev_t;
 typedef u16    compat_nlink_t;
-typedef u16    compat_ipc_pid_t;
 
 struct compat_stat {
        compat_dev_t            st_dev; /* dev_t is 32 bits on parisc */
@@ -53,37 +53,6 @@ struct compat_stat {
        u32                     st_spare4[3];
 };
 
-struct compat_flock {
-       short                   l_type;
-       short                   l_whence;
-       compat_off_t            l_start;
-       compat_off_t            l_len;
-       compat_pid_t            l_pid;
-};
-
-struct compat_flock64 {
-       short                   l_type;
-       short                   l_whence;
-       compat_loff_t           l_start;
-       compat_loff_t           l_len;
-       compat_pid_t            l_pid;
-};
-
-struct compat_statfs {
-       s32             f_type;
-       s32             f_bsize;
-       s32             f_blocks;
-       s32             f_bfree;
-       s32             f_bavail;
-       s32             f_files;
-       s32             f_ffree;
-       __kernel_fsid_t f_fsid;
-       s32             f_namelen;
-       s32             f_frsize;
-       s32             f_flags;
-       s32             f_spare[4];
-};
-
 struct compat_sigcontext {
        compat_int_t sc_flags;
        compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */
@@ -93,10 +62,6 @@ struct compat_sigcontext {
        compat_int_t sc_sar; /* cr11 */
 };
 
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define COMPAT_OFF_T_MAX       0x7fffffff
-
 struct compat_ipc64_perm {
        compat_key_t key;
        __compat_uid_t uid;
index c4cd636..d63a2ac 100644 (file)
@@ -12,9 +12,13 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
 }
 
+#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI)
+int fb_is_primary_device(struct fb_info *info);
+#else
 static inline int fb_is_primary_device(struct fb_info *info)
 {
        return 0;
 }
+#endif
 
 #endif /* _ASM_FB_H_ */
index e480b2c..5cd80ce 100644 (file)
@@ -9,12 +9,27 @@
  *
  * All of the values in this file must be <4GB (because of assembly
  * loading restrictions).  If you place this region anywhere above
- * __PAGE_OFFSET, you must adjust the memory map accordingly */
+ * __PAGE_OFFSET, you must adjust the memory map accordingly
+ */
 
-/* The alias region is used in kernel space to do copy/clear to or
- * from areas congruently mapped with user space.  It is 8MB large
- * and must be 16MB aligned */
-#define TMPALIAS_MAP_START     ((__PAGE_OFFSET) - 16*1024*1024)
+/*
+ * The tmpalias region is used in kernel space to copy/clear/flush data
+ * from pages congruently mapped with user space. It is comprised of
+ * a pair regions. The size of these regions is determined by the largest
+ * cache aliasing boundary for machines that support equivalent aliasing.
+ *
+ * The c3750 with PA8700 processor returns an alias value of 11. This
+ * indicates that it has an alias boundary of 4 MB. It also supports
+ * non-equivalent aliasing without a performance penalty.
+ *
+ * Machines with PA8800/PA8900 processors return an alias value of 0.
+ * This indicates the alias boundary is unknown and may be larger than
+ * 16 MB. Non-equivalent aliasing is not supported.
+ *
+ * Here we assume the maximum alias boundary is 4 MB.
+ */
+#define TMPALIAS_SIZE_BITS     22      /* 4 MB */
+#define TMPALIAS_MAP_START     ((__PAGE_OFFSET) - (2 << TMPALIAS_SIZE_BITS))
 
 #define FIXMAP_SIZE            (FIX_BITMAP_COUNT << PAGE_SHIFT)
 #define FIXMAP_START           (TMPALIAS_MAP_START - FIXMAP_SIZE)
index 7708a58..e38f9a9 100644 (file)
@@ -142,7 +142,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)       \
 }
 
 #define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
 #define __ARCH_WANT_SYS_ALARM
 #define __ARCH_WANT_SYS_GETHOSTNAME
@@ -156,7 +155,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)       \
 #define __ARCH_WANT_SYS_FADVISE64
 #define __ARCH_WANT_SYS_GETPGRP
 #define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_FORK
@@ -164,6 +162,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)       \
 #define __ARCH_WANT_SYS_CLONE
 #define __ARCH_WANT_SYS_CLONE3
 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_COMPAT_STAT
 
 #ifdef CONFIG_64BIT
 #define __ARCH_WANT_SYS_TIME
index 40e920f..3a8938d 100644 (file)
@@ -2,10 +2,8 @@
 #ifndef __ARCH_PARISC_TERMBITS_H__
 #define __ARCH_PARISC_TERMBITS_H__
 
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
 
-typedef unsigned char  cc_t;
-typedef unsigned int   speed_t;
 typedef unsigned int   tcflag_t;
 
 #define NCCS 19
@@ -41,158 +39,107 @@ struct ktermios {
 };
 
 /* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
+#define VINTR           0
+#define VQUIT           1
+#define VERASE          2
+#define VKILL           3
+#define VEOF            4
+#define VTIME           5
+#define VMIN            6
+#define VSWTC           7
+#define VSTART          8
+#define VSTOP           9
+#define VSUSP          10
+#define VEOL           11
+#define VREPRINT       12
+#define VDISCARD       13
+#define VWERASE                14
+#define VLNEXT         15
+#define VEOL2          16
 
 /* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK  0000020
-#define ISTRIP 0000040
-#define INLCR  0000100
-#define IGNCR  0000200
-#define ICRNL  0000400
-#define IUCLC  0001000
-#define IXON   0002000
-#define IXANY  0004000
-#define IXOFF  0010000
-#define IMAXBEL        0040000
-#define IUTF8  0100000
+#define IUCLC  0x0200
+#define IXON   0x0400
+#define IXOFF  0x1000
+#define IMAXBEL        0x4000
+#define IUTF8  0x8000
 
 /* c_oflag bits */
-#define OPOST  0000001
-#define OLCUC  0000002
-#define ONLCR  0000004
-#define OCRNL  0000010
-#define ONOCR  0000020
-#define ONLRET 0000040
-#define OFILL  0000100
-#define OFDEL  0000200
-#define NLDLY  0000400
-#define   NL0  0000000
-#define   NL1  0000400
-#define CRDLY  0003000
-#define   CR0  0000000
-#define   CR1  0001000
-#define   CR2  0002000
-#define   CR3  0003000
-#define TABDLY 0014000
-#define   TAB0 0000000
-#define   TAB1 0004000
-#define   TAB2 0010000
-#define   TAB3 0014000
-#define   XTABS        0014000
-#define BSDLY  0020000
-#define   BS0  0000000
-#define   BS1  0020000
-#define VTDLY  0040000
-#define   VT0  0000000
-#define   VT1  0040000
-#define FFDLY  0100000
-#define   FF0  0000000
-#define   FF1  0100000
+#define OLCUC  0x00002
+#define ONLCR  0x00004
+#define NLDLY  0x00100
+#define   NL0  0x00000
+#define   NL1  0x00100
+#define CRDLY  0x00600
+#define   CR0  0x00000
+#define   CR1  0x00200
+#define   CR2  0x00400
+#define   CR3  0x00600
+#define TABDLY 0x01800
+#define   TAB0 0x00000
+#define   TAB1 0x00800
+#define   TAB2 0x01000
+#define   TAB3 0x01800
+#define   XTABS        0x01800
+#define BSDLY  0x02000
+#define   BS0  0x00000
+#define   BS1  0x02000
+#define VTDLY  0x04000
+#define   VT0  0x00000
+#define   VT1  0x04000
+#define FFDLY  0x08000
+#define   FF0  0x00000
+#define   FF1  0x08000
 
 /* c_cflag bit meaning */
-#define CBAUD   0010017
-#define  B0     0000000         /* hang up */
-#define  B50    0000001
-#define  B75    0000002
-#define  B110   0000003
-#define  B134   0000004
-#define  B150   0000005
-#define  B200   0000006
-#define  B300   0000007
-#define  B600   0000010
-#define  B1200  0000011
-#define  B1800  0000012
-#define  B2400  0000013
-#define  B4800  0000014
-#define  B9600  0000015
-#define  B19200 0000016
-#define  B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE   0000060
-#define   CS5   0000000
-#define   CS6   0000020
-#define   CS7   0000040
-#define   CS8   0000060
-#define CSTOPB  0000100
-#define CREAD   0000200
-#define PARENB  0000400
-#define PARODD  0001000
-#define HUPCL   0002000
-#define CLOCAL  0004000
-#define CBAUDEX 0010000
-#define    BOTHER 0010000
-#define    B57600 0010001
-#define   B115200 0010002
-#define   B230400 0010003
-#define   B460800 0010004
-#define   B500000 0010005
-#define   B576000 0010006
-#define   B921600 0010007
-#define  B1000000 0010010
-#define  B1152000 0010011
-#define  B1500000 0010012
-#define  B2000000 0010013
-#define  B2500000 0010014
-#define  B3000000 0010015
-#define  B3500000 0010016
-#define  B4000000 0010017
-#define CIBAUD    002003600000         /* input baud rate */
-#define CMSPAR    010000000000          /* mark or space (stick) parity */
-#define CRTSCTS   020000000000          /* flow control */
-
-#define IBSHIFT        16              /* Shift from CBAUD to CIBAUD */
-
+#define CBAUD          0x0000100f
+#define CSIZE          0x00000030
+#define   CS5          0x00000000
+#define   CS6          0x00000010
+#define   CS7          0x00000020
+#define   CS8          0x00000030
+#define CSTOPB         0x00000040
+#define CREAD          0x00000080
+#define PARENB         0x00000100
+#define PARODD         0x00000200
+#define HUPCL          0x00000400
+#define CLOCAL         0x00000800
+#define CBAUDEX                0x00001000
+#define BOTHER         0x00001000
+#define     B57600     0x00001001
+#define    B115200     0x00001002
+#define    B230400     0x00001003
+#define    B460800     0x00001004
+#define    B500000     0x00001005
+#define    B576000     0x00001006
+#define    B921600     0x00001007
+#define   B1000000     0x00001008
+#define   B1152000     0x00001009
+#define   B1500000     0x0000100a
+#define   B2000000     0x0000100b
+#define   B2500000     0x0000100c
+#define   B3000000     0x0000100d
+#define   B3500000     0x0000100e
+#define   B4000000     0x0000100f
+#define CIBAUD         0x100f0000              /* input baud rate */
 
 /* c_lflag bits */
-#define ISIG    0000001
-#define ICANON  0000002
-#define XCASE   0000004
-#define ECHO    0000010
-#define ECHOE   0000020
-#define ECHOK   0000040
-#define ECHONL  0000100
-#define NOFLSH  0000200
-#define TOSTOP  0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE  0004000
-#define FLUSHO  0010000
-#define PENDIN  0040000
-#define IEXTEN  0100000
-#define EXTPROC        0200000
-
-/* tcflow() and TCXONC use these */
-#define        TCOOFF          0
-#define        TCOON           1
-#define        TCIOFF          2
-#define        TCION           3
-
-/* tcflush() and TCFLSH use these */
-#define        TCIFLUSH        0
-#define        TCOFLUSH        1
-#define        TCIOFLUSH       2
+#define ISIG   0x00001
+#define ICANON 0x00002
+#define XCASE  0x00004
+#define ECHO   0x00008
+#define ECHOE  0x00010
+#define ECHOK  0x00020
+#define ECHONL 0x00040
+#define NOFLSH 0x00080
+#define TOSTOP 0x00100
+#define ECHOCTL        0x00200
+#define ECHOPRT        0x00400
+#define ECHOKE 0x00800
+#define FLUSHO 0x01000
+#define PENDIN 0x04000
+#define IEXTEN 0x08000
+#define EXTPROC        0x10000
 
 /* tcsetattr uses these */
 #define        TCSANOW         0
index 0fd0407..c8a11fc 100644 (file)
@@ -754,6 +754,9 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
        unsigned long start = (unsigned long)vaddr;
        unsigned long end = start + size;
 
+       /* Ensure DMA is complete */
+       asm_syncdma();
+
        if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
            (unsigned long)size >= parisc_cache_flush_threshold) {
                flush_tlb_kernel_range(start, end);
index ecf5015..df8102f 100644 (file)
        extrd,s         \pte,63,25,\pte
        .endm
 
-       /* The alias region is an 8MB aligned 16MB to do clear and
-        * copy user pages at addresses congruent with the user
+       /* The alias region is comprised of a pair of 4 MB regions
+        * aligned to 8 MB. It is used to clear/copy/flush user pages
+        * using kernel virtual addresses congruent with the user
         * virtual address.
         *
         * To use the alias page, you set %r26 up with the to TLB
        .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
        cmpib,COND(<>),n 0,\spc,\fault
        ldil            L%(TMPALIAS_MAP_START),\tmp
-#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
-       /* on LP64, ldi will sign extend into the upper 32 bits,
-        * which is behaviour we don't want */
-       depdi           0,31,32,\tmp
-#endif
        copy            \va,\tmp1
-       depi            0,31,23,\tmp1
+       depi_safe       0,31,TMPALIAS_SIZE_BITS+1,\tmp1
        cmpb,COND(<>),n \tmp,\tmp1,\fault
        mfctl           %cr19,\tmp      /* iir */
        /* get the opcode (first six bits) into \tmp */
         * OK, it is in the temp alias region, check whether "from" or "to".
         * Check "subtle" note in pacache.S re: r23/r26.
         */
-#ifdef CONFIG_64BIT
-       extrd,u,*=      \va,41,1,%r0
-#else
-       extrw,u,=       \va,9,1,%r0
-#endif
+       extrw,u,=       \va,31-TMPALIAS_SIZE_BITS,1,%r0
        or,COND(tr)     %r23,%r0,\pte
        or              %r26,%r0,\pte
+
+       /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
+       SHRREG          \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
+       depi_safe       _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
        .endm 
 
 
index b4c3f01..9a0018f 100644 (file)
@@ -300,7 +300,6 @@ fdoneloop2:
        fdce,m          %arg1(%sr1, %arg0)      /* Fdce for one loop */
 
 fdsync:
-       syncdma
        sync
        mtsm            %r22                    /* restore I-bit */
 89:    ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
@@ -488,6 +487,8 @@ ENDPROC_CFI(copy_page_asm)
  *       parisc chip designers that there will not ever be a parisc
  *       chip with a larger alias boundary (Never say never :-) ).
  *
+ *       Yah, what about the PA8800 and PA8900 processors?
+ *
  *       Subtle: the dtlb miss handlers support the temp alias region by
  *       "knowing" that if a dtlb miss happens within the temp alias
  *       region it must have occurred while in clear_user_page. Since
@@ -499,19 +500,10 @@ ENDPROC_CFI(copy_page_asm)
  *       miss on the translation, the dtlb miss handler inserts the
  *       translation into the tlb using these values:
  *
- *          %r26 physical page (shifted for tlb insert) of "to" translation
- *          %r23 physical page (shifted for tlb insert) of "from" translation
+ *          %r26 physical address of "to" translation
+ *          %r23 physical address of "from" translation
  */
 
-        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
-        #define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
-        .macro          convert_phys_for_tlb_insert20  phys
-        extrd,u         \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
-#if _PAGE_SIZE_ENCODING_DEFAULT
-        depdi           _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
-#endif
-       .endm
-
        /*
         * copy_user_page_asm() performs a page copy using mappings
         * equivalent to the user page mappings.  It can be used to
@@ -540,24 +532,10 @@ ENTRY_CFI(copy_user_page_asm)
        sub             %r25, %r1, %r23
 
        ldil            L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
-       depdi           0, 31,32, %r28          /* clear any sign extension */
-#endif
-       convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
-       convert_phys_for_tlb_insert20 %r23      /* convert phys addr to tlb insert format */
-       depd            %r24,63,22, %r28        /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
-       copy            %r28, %r29
-       depdi           1, 41,1, %r29           /* Form aliased virtual address 'from' */
-#else
-       extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
-       extrw,u         %r23, 24,25, %r23       /* convert phys addr to tlb insert format */
-       depw            %r24, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
+       dep_safe        %r24, 31,TMPALIAS_SIZE_BITS, %r28       /* Form aliased virtual address 'to' */
+       depi_safe       0, 31,PAGE_SHIFT, %r28                  /* Clear any offset bits */
        copy            %r28, %r29
-       depwi           1, 9,1, %r29            /* Form aliased virtual address 'from' */
-#endif
+       depi_safe       1, 31-TMPALIAS_SIZE_BITS,1, %r29        /* Form aliased virtual address 'from' */
 
        /* Purge any old translations */
 
@@ -687,18 +665,8 @@ ENTRY_CFI(clear_user_page_asm)
        tophys_r1       %r26
 
        ldil            L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
-       depdi           0, 31,32, %r28          /* clear any sign extension */
-#endif
-       convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
-       depd            %r25, 63,22, %r28       /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#else
-       extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
-       depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#endif
+       dep_safe        %r25, 31,TMPALIAS_SIZE_BITS, %r28       /* Form aliased virtual address 'to' */
+       depi_safe       0, 31,PAGE_SHIFT, %r28                  /* Clear any offset bits */
 
        /* Purge any old translation */
 
@@ -763,18 +731,8 @@ ENDPROC_CFI(clear_user_page_asm)
 
 ENTRY_CFI(flush_dcache_page_asm)
        ldil            L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
-       depdi           0, 31,32, %r28          /* clear any sign extension */
-#endif
-       convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
-       depd            %r25, 63,22, %r28       /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#else
-       extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
-       depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#endif
+       dep_safe        %r25, 31,TMPALIAS_SIZE_BITS, %r28       /* Form aliased virtual address 'to' */
+       depi_safe       0, 31,PAGE_SHIFT, %r28                  /* Clear any offset bits */
 
        /* Purge any old translation */
 
@@ -822,18 +780,8 @@ ENDPROC_CFI(flush_dcache_page_asm)
 
 ENTRY_CFI(purge_dcache_page_asm)
        ldil            L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
-       depdi           0, 31,32, %r28          /* clear any sign extension */
-#endif
-       convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
-       depd            %r25, 63,22, %r28       /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#else
-       extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
-       depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#endif
+       dep_safe        %r25, 31,TMPALIAS_SIZE_BITS, %r28       /* Form aliased virtual address 'to' */
+       depi_safe       0, 31,PAGE_SHIFT, %r28                  /* Clear any offset bits */
 
        /* Purge any old translation */
 
@@ -881,18 +829,8 @@ ENDPROC_CFI(purge_dcache_page_asm)
 
 ENTRY_CFI(flush_icache_page_asm)
        ldil            L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
-#if (TMPALIAS_MAP_START >= 0x80000000)
-       depdi           0, 31,32, %r28          /* clear any sign extension */
-#endif
-       convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
-       depd            %r25, 63,22, %r28       /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#else
-       extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
-       depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
-#endif
+       dep_safe        %r25, 31,TMPALIAS_SIZE_BITS, %r28       /* Form aliased virtual address 'to' */
+       depi_safe       0, 31,PAGE_SHIFT, %r28                  /* Clear any offset bits */
 
        /* Purge any old translation.  Note that the FIC instruction
         * may use either the instruction or data TLB.  Given that we
@@ -1098,7 +1036,6 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
 
        sync
 89:    ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
-       syncdma
        bv              %r0(%r2)
        nop
 ENDPROC_CFI(flush_kernel_dcache_range_asm)
@@ -1140,7 +1077,6 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
 
        sync
 89:    ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
-       syncdma
        bv              %r0(%r2)
        nop
 ENDPROC_CFI(purge_kernel_dcache_range_asm)
index 28b6a2a..7c37e09 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/personality.h>
 #include <linux/ptrace.h>
+#include <linux/reboot.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task.h>
@@ -116,8 +117,7 @@ void machine_power_off(void)
        pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
 
        /* ipmi_poweroff may have been installed. */
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
                
        /* It seems we have no way to power the system off via
         * software. The user has to press the button himself. */
@@ -206,9 +206,11 @@ arch_initcall(parisc_idle_init);
  * Copy architecture-specific thread state
  */
 int
-copy_thread(unsigned long clone_flags, unsigned long usp,
-           unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
+copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *cregs = &(p->thread.regs);
        void *stack = task_stack_page(p);
        
@@ -218,10 +220,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
        extern void * const ret_from_kernel_thread;
        extern void * const child_return;
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* kernel thread */
                memset(cregs, 0, sizeof(struct pt_regs));
-               if (!usp) /* idle thread */
+               if (args->idle) /* idle thread */
                        return 0;
                /* Must exit via ret_from_kernel_thread in order
                 * to call schedule_tail()
@@ -233,12 +235,12 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
                 * ret_from_kernel_thread.
                 */
 #ifdef CONFIG_64BIT
-               cregs->gr[27] = ((unsigned long *)usp)[3];
-               cregs->gr[26] = ((unsigned long *)usp)[2];
+               cregs->gr[27] = ((unsigned long *)args->fn)[3];
+               cregs->gr[26] = ((unsigned long *)args->fn)[2];
 #else
-               cregs->gr[26] = usp;
+               cregs->gr[26] = (unsigned long) args->fn;
 #endif
-               cregs->gr[25] = kthread_arg;
+               cregs->gr[25] = (unsigned long) args->fn_arg;
        } else {
                /* user thread */
                /* usp must be word aligned.  This also prevents users from
index 26eb568..dddaaa6 100644 (file)
@@ -327,8 +327,6 @@ int init_per_cpu(int cpunum)
        set_firmware_width();
        ret = pdc_coproc_cfg(&coproc_cfg);
 
-       store_cpu_topology(cpunum);
-
        if(ret >= 0 && coproc_cfg.ccr_functional) {
                mtctl(coproc_cfg.ccr_functional, 10);  /* 10 == Coprocessor Control Reg */
 
index 9696e3c..b9d845e 100644 (file)
@@ -20,8 +20,6 @@
 
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
-static int dualcores_found;
-
 /*
  * store_cpu_topology is called at boot when only one cpu is running
  * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
@@ -60,7 +58,6 @@ void store_cpu_topology(unsigned int cpuid)
                        if (p->cpu_loc) {
                                cpuid_topo->core_id++;
                                cpuid_topo->package_id = cpu_topology[cpu].package_id;
-                               dualcores_found = 1;
                                continue;
                        }
                }
@@ -80,22 +77,11 @@ void store_cpu_topology(unsigned int cpuid)
                cpu_topology[cpuid].package_id);
 }
 
-static struct sched_domain_topology_level parisc_mc_topology[] = {
-#ifdef CONFIG_SCHED_MC
-       { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
-#endif
-
-       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
-       { NULL, },
-};
-
 /*
  * init_cpu_topology is called at boot when only one cpu is running
  * which prevent simultaneous write access to cpu_topology array
  */
 void __init init_cpu_topology(void)
 {
-       /* Set scheduler topology descriptor */
-       if (dualcores_found)
-               set_sched_topology(parisc_mc_topology);
+       reset_cpu_topology();
 }
index 1dc2e88..0a81499 100644 (file)
@@ -555,6 +555,12 @@ void __init mem_init(void)
        BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
 #endif
 
+#ifdef CONFIG_64BIT
+       /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
+       BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000);
+       BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
+#endif
+
        high_memory = __va((max_pfn << PAGE_SHIFT));
        set_max_mapnr(max_low_pfn);
        memblock_free_all();
diff --git a/arch/parisc/nm b/arch/parisc/nm
deleted file mode 100644 (file)
index c788308..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-##
-# Hack to have an nm which removes the local symbols.  We also rely
-# on this nm being hidden out of the ordinarily executable path
-##
-${CROSS_COMPILE}nm $* | grep -v '.LC*[0-9]*$'
index 3eaddb8..be68c1f 100644 (file)
@@ -303,11 +303,6 @@ config COMPAT
        select ARCH_WANT_OLD_COMPAT_IPC
        select COMPAT_OLD_SIGACTION
 
-config SYSVIPC_COMPAT
-       bool
-       depends on COMPAT && SYSVIPC
-       default y
-
 config SCHED_OMIT_FRAME_POINTER
        bool
        default y
index 7afc96f..dda4091 100644 (file)
@@ -8,21 +8,20 @@
 #include <linux/types.h>
 #include <linux/sched.h>
 
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16            compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
 #include <asm-generic/compat.h>
 
-#define COMPAT_USER_HZ         100
 #ifdef __BIG_ENDIAN__
 #define COMPAT_UTS_MACHINE     "ppc\0\0"
 #else
 #define COMPAT_UTS_MACHINE     "ppcle\0\0"
 #endif
 
-typedef u32            __compat_uid_t;
-typedef u32            __compat_gid_t;
-typedef u32            compat_dev_t;
 typedef s16            compat_nlink_t;
-typedef u16            compat_ipc_pid_t;
-typedef __kernel_fsid_t        compat_fsid_t;
 
 struct compat_stat {
        compat_dev_t    st_dev;
@@ -44,45 +43,6 @@ struct compat_stat {
        u32             __unused4[2];
 };
 
-struct compat_flock {
-       short           l_type;
-       short           l_whence;
-       compat_off_t    l_start;
-       compat_off_t    l_len;
-       compat_pid_t    l_pid;
-};
-
-#define F_GETLK64      12      /*  using 'struct flock64' */
-#define F_SETLK64      13
-#define F_SETLKW64     14
-
-struct compat_flock64 {
-       short           l_type;
-       short           l_whence;
-       compat_loff_t   l_start;
-       compat_loff_t   l_len;
-       compat_pid_t    l_pid;
-};
-
-struct compat_statfs {
-       int             f_type;
-       int             f_bsize;
-       int             f_blocks;
-       int             f_bfree;
-       int             f_bavail;
-       int             f_files;
-       int             f_ffree;
-       compat_fsid_t   f_fsid;
-       int             f_namelen;      /* SunOS ignores this field. */
-       int             f_frsize;
-       int             f_flags;
-       int             f_spare[4];
-};
-
-#define COMPAT_RLIM_INFINITY           0xffffffff
-
-#define COMPAT_OFF_T_MAX       0x7fffffff
-
 /*
  * ipc64_perm is actually 32/64bit clean but since the compat layer refers to
  * it we may as well define it.
index 1c60094..d044a1f 100644 (file)
@@ -7,17 +7,9 @@
 #ifndef _ASM_POWERPC_LIVEPATCH_H
 #define _ASM_POWERPC_LIVEPATCH_H
 
-#include <linux/module.h>
-#include <linux/ftrace.h>
+#include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 
-#ifdef CONFIG_LIVEPATCH
-static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
-{
-       ftrace_instruction_pointer_set(fregs, ip);
-}
-#endif /* CONFIG_LIVEPATCH */
-
 #ifdef CONFIG_LIVEPATCH_64
 static inline void klp_init_thread_info(struct task_struct *p)
 {
index 5eb462a..b1129b4 100644 (file)
@@ -44,6 +44,7 @@
 #define __ARCH_WANT_SYS_TIME
 #define __ARCH_WANT_SYS_UTIME
 #define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_STAT
 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
 #endif
 #define __ARCH_WANT_SYS_FORK
index a28c9a1..d509016 100644 (file)
@@ -37,8 +37,8 @@ struct stat {
        __kernel_mode_t st_mode;
        unsigned short  st_nlink;
 #endif
-       __kernel_uid_t  st_uid;
-       __kernel_gid_t  st_gid;
+       __kernel_uid32_t st_uid;
+       __kernel_gid32_t st_gid;
        unsigned long   st_rdev;
        long            st_size;
        unsigned long   st_blksize;
index ed18bc6..21dc86d 100644 (file)
@@ -9,8 +9,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-typedef unsigned char  cc_t;
-typedef unsigned int   speed_t;
+#include <asm-generic/termbits-common.h>
+
 typedef unsigned int   tcflag_t;
 
 /*
@@ -64,115 +64,72 @@ struct ktermios {
 #define VDISCARD       16
 
 /* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK  0000020
-#define ISTRIP 0000040
-#define INLCR  0000100
-#define IGNCR  0000200
-#define ICRNL  0000400
-#define IXON   0001000
-#define IXOFF  0002000
-#define IXANY  0004000
-#define IUCLC  0010000
-#define IMAXBEL        0020000
-#define        IUTF8   0040000
+#define IXON   0x0200
+#define IXOFF  0x0400
+#define IUCLC  0x1000
+#define IMAXBEL        0x2000
+#define IUTF8  0x4000
 
 /* c_oflag bits */
-#define OPOST  0000001
-#define ONLCR  0000002
-#define OLCUC  0000004
-
-#define OCRNL  0000010
-#define ONOCR  0000020
-#define ONLRET 0000040
-
-#define OFILL  00000100
-#define OFDEL  00000200
-#define NLDLY  00001400
-#define   NL0  00000000
-#define   NL1  00000400
-#define   NL2  00001000
-#define   NL3  00001400
-#define TABDLY 00006000
-#define   TAB0 00000000
-#define   TAB1 00002000
-#define   TAB2 00004000
-#define   TAB3 00006000
-#define   XTABS        00006000        /* required by POSIX to == TAB3 */
-#define CRDLY  00030000
-#define   CR0  00000000
-#define   CR1  00010000
-#define   CR2  00020000
-#define   CR3  00030000
-#define FFDLY  00040000
-#define   FF0  00000000
-#define   FF1  00040000
-#define BSDLY  00100000
-#define   BS0  00000000
-#define   BS1  00100000
-#define VTDLY  00200000
-#define   VT0  00000000
-#define   VT1  00200000
+#define ONLCR  0x00002
+#define OLCUC  0x00004
+#define NLDLY  0x00300
+#define   NL0  0x00000
+#define   NL1  0x00100
+#define   NL2  0x00200
+#define   NL3  0x00300
+#define TABDLY 0x00c00
+#define   TAB0 0x00000
+#define   TAB1 0x00400
+#define   TAB2 0x00800
+#define   TAB3 0x00c00
+#define   XTABS        0x00c00         /* required by POSIX to == TAB3 */
+#define CRDLY  0x03000
+#define   CR0  0x00000
+#define   CR1  0x01000
+#define   CR2  0x02000
+#define   CR3  0x03000
+#define FFDLY  0x04000
+#define   FF0  0x00000
+#define   FF1  0x04000
+#define BSDLY  0x08000
+#define   BS0  0x00000
+#define   BS1  0x08000
+#define VTDLY  0x10000
+#define   VT0  0x00000
+#define   VT1  0x10000
 
 /* c_cflag bit meaning */
-#define CBAUD  0000377
-#define  B0    0000000         /* hang up */
-#define  B50   0000001
-#define  B75   0000002
-#define  B110  0000003
-#define  B134  0000004
-#define  B150  0000005
-#define  B200  0000006
-#define  B300  0000007
-#define  B600  0000010
-#define  B1200 0000011
-#define  B1800 0000012
-#define  B2400 0000013
-#define  B4800 0000014
-#define  B9600 0000015
-#define  B19200        0000016
-#define  B38400        0000017
-#define  EXTA   B19200
-#define  EXTB   B38400
-#define  CBAUDEX 0000000
-#define  B57600   00020
-#define  B115200  00021
-#define  B230400  00022
-#define  B460800  00023
-#define  B500000  00024
-#define  B576000  00025
-#define  B921600  00026
-#define B1000000  00027
-#define B1152000  00030
-#define B1500000  00031
-#define B2000000  00032
-#define B2500000  00033
-#define B3000000  00034
-#define B3500000  00035
-#define B4000000  00036
-#define   BOTHER  00037
-
-#define CIBAUD 077600000
-#define IBSHIFT        16              /* Shift from CBAUD to CIBAUD */
-
-#define CSIZE  00001400
-#define   CS5  00000000
-#define   CS6  00000400
-#define   CS7  00001000
-#define   CS8  00001400
-
-#define CSTOPB 00002000
-#define CREAD  00004000
-#define PARENB 00010000
-#define PARODD 00020000
-#define HUPCL  00040000
-
-#define CLOCAL 00100000
-#define CMSPAR   010000000000          /* mark or space (stick) parity */
-#define CRTSCTS          020000000000          /* flow control */
+#define CBAUD          0x000000ff
+#define CBAUDEX                0x00000000
+#define BOTHER         0x0000001f
+#define    B57600      0x00000010
+#define   B115200      0x00000011
+#define   B230400      0x00000012
+#define   B460800      0x00000013
+#define   B500000      0x00000014
+#define   B576000      0x00000015
+#define   B921600      0x00000016
+#define  B1000000      0x00000017
+#define  B1152000      0x00000018
+#define  B1500000      0x00000019
+#define  B2000000      0x0000001a
+#define  B2500000      0x0000001b
+#define  B3000000      0x0000001c
+#define  B3500000      0x0000001d
+#define  B4000000      0x0000001e
+#define CSIZE          0x00000300
+#define   CS5          0x00000000
+#define   CS6          0x00000100
+#define   CS7          0x00000200
+#define   CS8          0x00000300
+#define CSTOPB         0x00000400
+#define CREAD          0x00000800
+#define PARENB         0x00001000
+#define PARODD         0x00002000
+#define HUPCL          0x00004000
+#define CLOCAL         0x00008000
+#define CIBAUD         0x00ff0000
 
 /* c_lflag bits */
 #define ISIG   0x00000080
@@ -192,17 +149,6 @@ struct ktermios {
 #define IEXTEN 0x00000400
 #define EXTPROC        0x10000000
 
-/* Values for the ACTION argument to `tcflow'.  */
-#define        TCOOFF          0
-#define        TCOON           1
-#define        TCIOFF          2
-#define        TCION           3
-
-/* Values for the QUEUE_SELECTOR argument to `tcflush'.  */
-#define        TCIFLUSH        0
-#define        TCOFLUSH        1
-#define        TCIOFLUSH       2
-
 /* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'.  */
 #define        TCSANOW         0
 #define        TCSADRAIN       1
index ea38c13..dd09919 100644 (file)
@@ -63,7 +63,6 @@
 #include <asm/machdep.h>
 #include <asm/udbg.h>
 #include <asm/smp.h>
-#include <asm/livepatch.h>
 #include <asm/hw_irq.h>
 #include <asm/softirq_stack.h>
 
index d00b20c..b62046b 100644 (file)
@@ -1713,10 +1713,11 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
 /*
  * Copy architecture-specific thread state
  */
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-               unsigned long kthread_arg, struct task_struct *p,
-               unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *childregs, *kregs;
        extern void ret_from_fork(void);
        extern void ret_from_fork_scv(void);
@@ -1733,18 +1734,18 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        /* Copy registers */
        sp -= sizeof(struct pt_regs);
        childregs = (struct pt_regs *) sp;
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* kernel thread */
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->gpr[1] = sp + sizeof(struct pt_regs);
                /* function */
-               if (usp)
-                       childregs->gpr[14] = ppc_function_entry((void *)usp);
+               if (args->fn)
+                       childregs->gpr[14] = ppc_function_entry((void *)args->fn);
 #ifdef CONFIG_PPC64
                clear_tsk_thread_flag(p, TIF_32BIT);
                childregs->softe = IRQS_ENABLED;
 #endif
-               childregs->gpr[15] = kthread_arg;
+               childregs->gpr[15] = (unsigned long)args->fn_arg;
                p->thread.regs = NULL;  /* no user register state */
                ti->flags |= _TIF_RESTOREALL;
                f = ret_from_kernel_thread;
index 9d83d16..eb0077b 100644 (file)
@@ -161,9 +161,7 @@ void machine_restart(char *cmd)
 void machine_power_off(void)
 {
        machine_shutdown();
-       if (pm_power_off)
-               pm_power_off();
-
+       do_kernel_power_off();
        smp_send_stop();
        machine_hang();
 }
index 0e8fc1c..5761f08 100644 (file)
@@ -60,7 +60,7 @@
 #include <asm/udbg.h>
 #include <asm/kexec.h>
 #include <asm/code-patching.h>
-#include <asm/livepatch.h>
+#include <asm/ftrace.h>
 #include <asm/opal.h>
 #include <asm/cputhreads.h>
 #include <asm/hw_irq.h>
index fff81c2..3d9782e 100644 (file)
@@ -1242,8 +1242,7 @@ static void bootcmds(void)
        } else if (cmd == 'h') {
                ppc_md.halt();
        } else if (cmd == 'p') {
-               if (pm_power_off)
-                       pm_power_off();
+               do_kernel_power_off();
        }
 }
 
index fb33972..afa83e3 100644 (file)
@@ -2,6 +2,10 @@
 
 obj-y += kernel/ mm/ net/
 obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
+obj-y += errata/
+obj-$(CONFIG_KVM) += kvm/
+
+obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/
 
 # for cleaning
 subdir- += boot
index c0853f1..c22f581 100644 (file)
@@ -78,6 +78,7 @@ config RISCV
        select HAVE_ARCH_KGDB if !XIP_KERNEL
        select HAVE_ARCH_KGDB_QXFER_PKT
        select HAVE_ARCH_MMAP_RND_BITS if MMU
+       select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
@@ -129,12 +130,18 @@ config ARCH_MMAP_RND_BITS_MIN
        default 18 if 64BIT
        default 8
 
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+       default 8
+
 # max bits determined by the following formula:
 #  VA_BITS - PAGE_SHIFT - 3
 config ARCH_MMAP_RND_BITS_MAX
        default 24 if 64BIT # SV39 based
        default 17
 
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+       default 17
+
 # set if we run in machine mode, cleared if we run in supervisor mode
 config RISCV_M_MODE
        bool
@@ -326,6 +333,21 @@ config NODES_SHIFT
          Specify the maximum number of NUMA Nodes available on the target
          system.  Increases memory reserved to accommodate various tables.
 
+config RISCV_ALTERNATIVE
+       bool
+       depends on !XIP_KERNEL
+       help
+         This Kconfig allows the kernel to automatically patch the
+         errata required by the execution platform at run time. The
+         code patching is performed once in the boot stages. It means
+         that the overhead from this mechanism is just taken once.
+
+config RISCV_ALTERNATIVE_EARLY
+       bool
+       depends on RISCV_ALTERNATIVE
+       help
+         Allows early patching of the kernel for special errata
+
 config RISCV_ISA_C
        bool "Emit compressed instructions when building Linux"
        default y
@@ -336,6 +358,19 @@ config RISCV_ISA_C
 
           If you don't know what to do here, say Y.
 
+config RISCV_ISA_SVPBMT
+       bool "SVPBMT extension support"
+       depends on 64BIT && MMU
+       select RISCV_ALTERNATIVE
+       default y
+       help
+          Adds support to dynamically detect the presence of the SVPBMT extension
+          (Supervisor-mode: page-based memory types) and enable its usage.
+
+          The SVPBMT extension is only available on 64Bit cpus.
+
+          If you don't know what to do here, say Y.
+
 config FPU
        bool "FPU support"
        default y
@@ -361,7 +396,7 @@ config RISCV_SBI_V01
 config RISCV_BOOT_SPINWAIT
        bool "Spinwait booting method"
        depends on SMP
-       default y
+       default y if RISCV_SBI_V01 || RISCV_M_MODE
        help
          This enables support for booting Linux via spinwait method. In the
          spinwait method, all cores randomly jump to Linux. One of the cores
@@ -372,6 +407,12 @@ config RISCV_BOOT_SPINWAIT
          rely on ordered booting via SBI HSM extension which gets chosen
          dynamically at runtime if the firmware supports it.
 
+         Since spinwait is incompatible with sparse hart IDs, it requires
+         NR_CPUS be large enough to contain the physical hart ID of the first
+         hart to enter Linux.
+
+         If unsure what to do here, say N.
+
 config KEXEC
        bool "Kexec system call"
        select KEXEC_CORE
@@ -385,6 +426,26 @@ config KEXEC
 
          The name comes from the similarity to the exec system call.
 
+config KEXEC_FILE
+       bool "kexec file based systmem call"
+       select KEXEC_CORE
+       select KEXEC_ELF
+       select HAVE_IMA_KEXEC if IMA
+       depends on 64BIT
+       help
+         This is new version of kexec system call. This system call is
+         file based and takes file descriptors as system call argument
+         for kernel and initramfs as opposed to list of segments as
+         accepted by previous system call.
+
+         If you don't know what to do here, say Y.
+
+config ARCH_HAS_KEXEC_PURGATORY
+       def_bool KEXEC_FILE
+       select BUILD_BIN2C
+       depends on CRYPTO=y
+       depends on CRYPTO_SHA256=y
+
 config CRASH_DUMP
        bool "Build kdump crash kernel"
        help
@@ -396,6 +457,18 @@ config CRASH_DUMP
 
          For more details see Documentation/admin-guide/kdump/kdump.rst
 
+config COMPAT
+       bool "Kernel support for 32-bit U-mode"
+       default 64BIT
+       depends on 64BIT && MMU
+       help
+         This option enables support for a 32-bit U-mode running under a 64-bit
+         kernel at S-mode. riscv32-specific components such as system calls,
+         the user helper functions (vdso), signal rt_frame functions and the
+         ptrace interface are handled appropriately by the kernel.
+
+         If you want to execute 32-bit userspace applications, say Y.
+
 endmenu
 
 menu "Boot options"
index 0aacd70..ebfcd5c 100644 (file)
@@ -1,18 +1,9 @@
 menu "CPU errata selection"
 
-config RISCV_ERRATA_ALTERNATIVE
-       bool "RISC-V alternative scheme"
-       depends on !XIP_KERNEL
-       default y
-       help
-         This Kconfig allows the kernel to automatically patch the
-         errata required by the execution platform at run time. The
-         code patching is performed once in the boot stages. It means
-         that the overhead from this mechanism is just taken once.
-
 config ERRATA_SIFIVE
        bool "SiFive errata"
-       depends on RISCV_ERRATA_ALTERNATIVE
+       depends on !XIP_KERNEL
+       select RISCV_ALTERNATIVE
        help
          All SiFive errata Kconfig depend on this Kconfig. Disabling
          this Kconfig will disable all SiFive errata. Please say "Y"
@@ -42,4 +33,25 @@ config ERRATA_SIFIVE_CIP_1200
 
          If you don't know what to do here, say "Y".
 
+config ERRATA_THEAD
+       bool "T-HEAD errata"
+       select RISCV_ALTERNATIVE
+       help
+         All T-HEAD errata Kconfig depend on this Kconfig. Disabling
+         this Kconfig will disable all T-HEAD errata. Please say "Y"
+         here if your platform uses T-HEAD CPU cores.
+
+         Otherwise, please say "N" here to avoid unnecessary overhead.
+
+config ERRATA_THEAD_PBMT
+       bool "Apply T-Head memory type errata"
+       depends on ERRATA_THEAD && 64BIT
+       select RISCV_ALTERNATIVE_EARLY
+       default y
+       help
+         This will apply the memory type errata to handle the non-standard
+         memory type bits in page-table-entries on T-Head SoCs.
+
+         If you don't know what to do here, say "Y".
+
 endmenu
index f6ef358..85670dc 100644 (file)
@@ -14,7 +14,6 @@ config SOC_SIFIVE
        select CLK_SIFIVE
        select CLK_SIFIVE_PRCI
        select SIFIVE_PLIC
-       select RISCV_ERRATA_ALTERNATIVE if !XIP_KERNEL
        select ERRATA_SIFIVE if !XIP_KERNEL
        help
          This enables support for SiFive SoC platform hardware.
index 2b93ca9..34cf8a5 100644 (file)
@@ -103,21 +103,23 @@ endif
 
 head-y := arch/riscv/kernel/head.o
 
-core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/
-core-$(CONFIG_KVM) += arch/riscv/kvm/
-
 libs-y += arch/riscv/lib/
 libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
 
 PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+       $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
+               $(build)=arch/riscv/kernel/compat_vdso $@)
 
 ifeq ($(KBUILD_EXTMOD),)
 ifeq ($(CONFIG_MMU),y)
 prepare: vdso_prepare
 vdso_prepare: prepare0
        $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
+       $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
+               $(build)=arch/riscv/kernel/compat_vdso include/generated/compat_vdso-offsets.h)
+
 endif
 endif
 
@@ -153,3 +155,7 @@ PHONY += rv64_randconfig
 rv64_randconfig:
        $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \
                -f $(srctree)/Makefile randconfig
+
+PHONY += rv32_defconfig
+rv32_defconfig:
+       $(Q)$(MAKE) -f $(srctree)/Makefile defconfig 32-bit.config
index 90e66ad..0cea9f7 100644 (file)
@@ -4,3 +4,4 @@ Image.*
 loader
 loader.lds
 loader.bin
+xipImage
index 855c150..39aae7b 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-icicle-kit.dtb
+dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-polarberry.dtb
 obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
@@ -2,6 +2,8 @@
 /* Copyright (c) 2020-2021 Microchip Technology Inc */
 
 / {
+       compatible = "microchip,mpfs-icicle-reference-rtlv2203", "microchip,mpfs";
+
        core_pwm0: pwm@41000000 {
                compatible = "microchip,corepwm-rtl-v4";
                reg = <0x0 0x41000000 0x0 0xF0>;
@@ -3,7 +3,8 @@
 
 /dts-v1/;
 
-#include "microchip-mpfs.dtsi"
+#include "mpfs.dtsi"
+#include "mpfs-icicle-kit-fabric.dtsi"
 
 /* Clock frequency (in Hz) of the rtcclk */
 #define RTCCLK_FREQ            1000000
        ddrc_cache_lo: memory@80000000 {
                device_type = "memory";
                reg = <0x0 0x80000000 0x0 0x2e000000>;
-               clocks = <&clkcfg CLK_DDRC>;
                status = "okay";
        };
 
        ddrc_cache_hi: memory@1000000000 {
                device_type = "memory";
                reg = <0x10 0x0 0x0 0x40000000>;
-               clocks = <&clkcfg CLK_DDRC>;
                status = "okay";
        };
 };
 
-&refclk {
-       clock-frequency = <125000000>;
+&core_pwm0 {
+       status = "okay";
 };
 
-&mmuart1 {
+&gpio2 {
+       interrupts = <53>, <53>, <53>, <53>,
+                    <53>, <53>, <53>, <53>,
+                    <53>, <53>, <53>, <53>,
+                    <53>, <53>, <53>, <53>,
+                    <53>, <53>, <53>, <53>,
+                    <53>, <53>, <53>, <53>,
+                    <53>, <53>, <53>, <53>,
+                    <53>, <53>, <53>, <53>;
        status = "okay";
 };
 
-&mmuart2 {
+&i2c0 {
        status = "okay";
 };
 
-&mmuart3 {
+&i2c1 {
        status = "okay";
 };
 
-&mmuart4 {
+&i2c2 {
        status = "okay";
 };
 
-&mmc {
+&mac0 {
+       phy-mode = "sgmii";
+       phy-handle = <&phy0>;
+       status = "okay";
+};
+
+&mac1 {
+       phy-mode = "sgmii";
+       phy-handle = <&phy1>;
        status = "okay";
 
+       phy1: ethernet-phy@9 {
+               reg = <9>;
+               ti,fifo-depth = <0x1>;
+       };
+
+       phy0: ethernet-phy@8 {
+               reg = <8>;
+               ti,fifo-depth = <0x1>;
+       };
+};
+
+&mbox {
+       status = "okay";
+};
+
+&mmc {
        bus-width = <4>;
        disable-wp;
        cap-sd-highspeed;
        sd-uhs-sdr25;
        sd-uhs-sdr50;
        sd-uhs-sdr104;
-};
-
-&spi0 {
        status = "okay";
 };
 
-&spi1 {
+&mmuart1 {
        status = "okay";
 };
 
-&qspi {
+&mmuart2 {
        status = "okay";
 };
 
-&i2c0 {
+&mmuart3 {
        status = "okay";
 };
 
-&i2c1 {
+&mmuart4 {
        status = "okay";
 };
 
-&i2c2 {
+&pcie {
        status = "okay";
 };
 
-&mac0 {
-       phy-mode = "sgmii";
-       phy-handle = <&phy0>;
-};
-
-&mac1 {
+&qspi {
        status = "okay";
-       phy-mode = "sgmii";
-       phy-handle = <&phy1>;
-       phy1: ethernet-phy@9 {
-               reg = <9>;
-               ti,fifo-depth = <0x1>;
-       };
-       phy0: ethernet-phy@8 {
-               reg = <8>;
-               ti,fifo-depth = <0x1>;
-       };
 };
 
-&gpio2 {
-       interrupts = <53>, <53>, <53>, <53>,
-                    <53>, <53>, <53>, <53>,
-                    <53>, <53>, <53>, <53>,
-                    <53>, <53>, <53>, <53>,
-                    <53>, <53>, <53>, <53>,
-                    <53>, <53>, <53>, <53>,
-                    <53>, <53>, <53>, <53>,
-                    <53>, <53>, <53>, <53>;
-       status = "okay";
+&refclk {
+       clock-frequency = <125000000>;
 };
 
 &rtc {
        status = "okay";
 };
 
-&usb {
+&spi0 {
        status = "okay";
-       dr_mode = "host";
 };
 
-&mbox {
+&spi1 {
        status = "okay";
 };
 
        status = "okay";
 };
 
-&pcie {
-       status = "okay";
-};
-
-&core_pwm0 {
+&usb {
        status = "okay";
+       dr_mode = "host";
 };
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
new file mode 100644 (file)
index 0000000..49380c4
--- /dev/null
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020-2022 Microchip Technology Inc */
+
+/ {
+       fabric_clk3: fabric-clk3 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <62500000>;
+       };
+
+       fabric_clk1: fabric-clk1 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <125000000>;
+       };
+};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
new file mode 100644 (file)
index 0000000..82c93c8
--- /dev/null
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020-2022 Microchip Technology Inc */
+
+/dts-v1/;
+
+#include "mpfs.dtsi"
+#include "mpfs-polarberry-fabric.dtsi"
+
+/* Clock frequency (in Hz) of the rtcclk */
+#define MTIMER_FREQ    1000000
+
+/ {
+       model = "Sundance PolarBerry";
+       compatible = "sundance,polarberry", "microchip,mpfs";
+
+       aliases {
+               ethernet0 = &mac1;
+               serial0 = &mmuart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
+       cpus {
+               timebase-frequency = <MTIMER_FREQ>;
+       };
+
+       ddrc_cache_lo: memory@80000000 {
+               device_type = "memory";
+               reg = <0x0 0x80000000 0x0 0x2e000000>;
+       };
+
+       ddrc_cache_hi: memory@1000000000 {
+               device_type = "memory";
+               reg = <0x10 0x00000000 0x0 0xC0000000>;
+       };
+};
+
+/*
+ * phy0 is connected to mac0, but the port itself is on the (optional) carrier
+ * board.
+ */
+&mac0 {
+       phy-mode = "sgmii";
+       phy-handle = <&phy0>;
+       status = "disabled";
+};
+
+&mac1 {
+       phy-mode = "sgmii";
+       phy-handle = <&phy1>;
+       status = "okay";
+
+       phy1: ethernet-phy@5 {
+               reg = <5>;
+               ti,fifo-depth = <0x01>;
+       };
+
+       phy0: ethernet-phy@4 {
+               reg = <4>;
+               ti,fifo-depth = <0x01>;
+       };
+};
+
+&mbox {
+       status = "okay";
+};
+
+&mmc {
+       bus-width = <4>;
+       disable-wp;
+       cap-sd-highspeed;
+       cap-mmc-highspeed;
+       card-detect-delay = <200>;
+       mmc-ddr-1_8v;
+       mmc-hs200-1_8v;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
+       sd-uhs-sdr104;
+       status = "okay";
+};
+
+&mmuart0 {
+       status = "okay";
+};
+
+&refclk {
+       clock-frequency = <125000000>;
+};
+
+&rtc {
+       status = "okay";
+};
+
+&syscontroller {
+       status = "okay";
+};
similarity index 98%
rename from arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
rename to arch/riscv/boot/dts/microchip/mpfs.dtsi
index cf2f55e..8c32591 100644 (file)
@@ -3,7 +3,6 @@
 
 /dts-v1/;
 #include "dt-bindings/clock/microchip,mpfs-clock.h"
-#include "microchip-mpfs-fabric.dtsi"
 
 / {
        #address-cells = <2>;
                #clock-cells = <0>;
        };
 
+       syscontroller: syscontroller {
+               compatible = "microchip,mpfs-sys-controller";
+               mboxes = <&mbox 0>;
+       };
+
        soc {
                #address-cells = <2>;
                #size-cells = <2>;
                        #mbox-cells = <1>;
                        status = "disabled";
                };
-
-               syscontroller: syscontroller {
-                       compatible = "microchip,mpfs-sys-controller";
-                       mboxes = <&mbox 0>;
-               };
        };
 };
index 5c638fd..e3172d0 100644 (file)
                        status = "disabled";
                };
                dma: dma-controller@3000000 {
-                       compatible = "sifive,fu540-c000-pdma";
+                       compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
                        reg = <0x0 0x3000000 0x0 0x8000>;
                        interrupt-parent = <&plic0>;
                        interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>,
                                     <30>;
+                       dma-channels = <4>;
                        #dma-cells = <1>;
                };
                uart1: serial@10011000 {
index b8f8740..a105596 100644 (file)
@@ -1,2 +1,2 @@
-obj-y  += alternative.o
 obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
+obj-$(CONFIG_ERRATA_THEAD) += thead/
diff --git a/arch/riscv/errata/alternative.c b/arch/riscv/errata/alternative.c
deleted file mode 100644 (file)
index e8b4a0f..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * alternative runtime patching
- * inspired by the ARM64 and x86 version
- *
- * Copyright (C) 2021 Sifive.
- */
-
-#include <linux/init.h>
-#include <linux/cpu.h>
-#include <linux/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/sections.h>
-#include <asm/vendorid_list.h>
-#include <asm/sbi.h>
-#include <asm/csr.h>
-
-static struct cpu_manufacturer_info_t {
-       unsigned long vendor_id;
-       unsigned long arch_id;
-       unsigned long imp_id;
-} cpu_mfr_info;
-
-static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
-                                unsigned long archid,
-                                unsigned long impid) __initdata;
-
-static inline void __init riscv_fill_cpu_mfr_info(void)
-{
-#ifdef CONFIG_RISCV_M_MODE
-       cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID);
-       cpu_mfr_info.arch_id = csr_read(CSR_MARCHID);
-       cpu_mfr_info.imp_id = csr_read(CSR_MIMPID);
-#else
-       cpu_mfr_info.vendor_id = sbi_get_mvendorid();
-       cpu_mfr_info.arch_id = sbi_get_marchid();
-       cpu_mfr_info.imp_id = sbi_get_mimpid();
-#endif
-}
-
-static void __init init_alternative(void)
-{
-       riscv_fill_cpu_mfr_info();
-
-       switch (cpu_mfr_info.vendor_id) {
-#ifdef CONFIG_ERRATA_SIFIVE
-       case SIFIVE_VENDOR_ID:
-               vendor_patch_func = sifive_errata_patch_func;
-               break;
-#endif
-       default:
-               vendor_patch_func = NULL;
-       }
-}
-
-/*
- * This is called very early in the boot process (directly after we run
- * a feature detect on the boot CPU). No need to worry about other CPUs
- * here.
- */
-void __init apply_boot_alternatives(void)
-{
-       /* If called on non-boot cpu things could go wrong */
-       WARN_ON(smp_processor_id() != 0);
-
-       init_alternative();
-
-       if (!vendor_patch_func)
-               return;
-
-       vendor_patch_func((struct alt_entry *)__alt_start,
-                         (struct alt_entry *)__alt_end,
-                         cpu_mfr_info.arch_id, cpu_mfr_info.imp_id);
-}
-
index f5e5ae7..672f02b 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/string.h>
 #include <linux/bug.h>
 #include <asm/patch.h>
@@ -54,7 +55,8 @@ static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = {
        },
 };
 
-static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid)
+static u32 __init_or_module sifive_errata_probe(unsigned long archid,
+                                               unsigned long impid)
 {
        int idx;
        u32 cpu_req_errata = 0;
@@ -66,7 +68,7 @@ static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid)
        return cpu_req_errata;
 }
 
-static void __init warn_miss_errata(u32 miss_errata)
+static void __init_or_module warn_miss_errata(u32 miss_errata)
 {
        int i;
 
@@ -79,14 +81,22 @@ static void __init warn_miss_errata(u32 miss_errata)
        pr_warn("----------------------------------------------------------------\n");
 }
 
-void __init sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
-                                    unsigned long archid, unsigned long impid)
+void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
+                                              struct alt_entry *end,
+                                              unsigned long archid,
+                                              unsigned long impid,
+                                              unsigned int stage)
 {
        struct alt_entry *alt;
-       u32 cpu_req_errata = sifive_errata_probe(archid, impid);
+       u32 cpu_req_errata;
        u32 cpu_apply_errata = 0;
        u32 tmp;
 
+       if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+               return;
+
+       cpu_req_errata = sifive_errata_probe(archid, impid);
+
        for (alt = begin; alt < end; alt++) {
                if (alt->vendor_id != SIFIVE_VENDOR_ID)
                        continue;
diff --git a/arch/riscv/errata/thead/Makefile b/arch/riscv/errata/thead/Makefile
new file mode 100644 (file)
index 0000000..137e700
--- /dev/null
@@ -0,0 +1,11 @@
+ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+CFLAGS_errata.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_errata.o = $(CC_FLAGS_FTRACE)
+endif
+ifdef CONFIG_KASAN
+KASAN_SANITIZE_errata.o := n
+endif
+endif
+
+obj-y += errata.o
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
new file mode 100644 (file)
index 0000000..e5d7527
--- /dev/null
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/errata_list.h>
+#include <asm/patch.h>
+#include <asm/vendorid_list.h>
+
+struct errata_info {
+       char name[ERRATA_STRING_LENGTH_MAX];
+       bool (*check_func)(unsigned long arch_id, unsigned long impid);
+       unsigned int stage;
+};
+
+static bool errata_mt_check_func(unsigned long  arch_id, unsigned long impid)
+{
+       if (arch_id != 0 || impid != 0)
+               return false;
+       return true;
+}
+
+static const struct errata_info errata_list[ERRATA_THEAD_NUMBER] = {
+       {
+               .name = "memory-types",
+               .stage = RISCV_ALTERNATIVES_EARLY_BOOT,
+               .check_func = errata_mt_check_func
+       },
+};
+
+static u32 thead_errata_probe(unsigned int stage, unsigned long archid, unsigned long impid)
+{
+       const struct errata_info *info;
+       u32 cpu_req_errata = 0;
+       int idx;
+
+       for (idx = 0; idx < ERRATA_THEAD_NUMBER; idx++) {
+               info = &errata_list[idx];
+
+               if ((stage == RISCV_ALTERNATIVES_MODULE ||
+                    info->stage == stage) && info->check_func(archid, impid))
+                       cpu_req_errata |= (1U << idx);
+       }
+
+       return cpu_req_errata;
+}
+
+void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+                                             unsigned long archid, unsigned long impid,
+                                             unsigned int stage)
+{
+       struct alt_entry *alt;
+       u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
+       u32 tmp;
+
+       for (alt = begin; alt < end; alt++) {
+               if (alt->vendor_id != THEAD_VENDOR_ID)
+                       continue;
+               if (alt->errata_id >= ERRATA_THEAD_NUMBER)
+                       continue;
+
+               tmp = (1U << alt->errata_id);
+               if (cpu_req_errata & tmp) {
+                       /* On vm-alternatives, the mmu isn't running yet */
+                       if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+                               memcpy((void *)__pa_symbol(alt->old_ptr),
+                                      (void *)__pa_symbol(alt->alt_ptr), alt->alt_len);
+                       else
+                               patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
+               }
+       }
+
+       if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+               local_flush_icache_all();
+}
index 67406c3..ec2f3f1 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_ALTERNATIVE_MACROS_H
 #define __ASM_ALTERNATIVE_MACROS_H
 
-#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
+#ifdef CONFIG_RISCV_ALTERNATIVE
 
 #ifdef __ASSEMBLY__
 
        .popsection
        .subsection 1
 888 :
+       .option push
+       .option norvc
+       .option norelax
        \new_c
+       .option pop
 889 :
-       .previous
        .org    . - (889b - 888b) + (887b - 886b)
        .org    . - (887b - 886b) + (889b - 888b)
+       .previous
        .endif
 .endm
 
 .macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
 886 :
+       .option push
+       .option norvc
+       .option norelax
        \old_c
+       .option pop
 887 :
        ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
 .endm
 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
        __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
 
+.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
+                                 new_c_2, vendor_id_2, errata_id_2, enable_2
+886 :
+       .option push
+       .option norvc
+       .option norelax
+       \old_c
+       .option pop
+887 :
+       ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
+       ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
+.endm
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1,   \
+                                       CONFIG_k_1,                     \
+                                 new_c_2, vendor_id_2, errata_id_2,    \
+                                       CONFIG_k_2)                     \
+       __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1,   \
+                                       IS_ENABLED(CONFIG_k_1),         \
+                                  new_c_2, vendor_id_2, errata_id_2,   \
+                                       IS_ENABLED(CONFIG_k_2)
+
 #else /* !__ASSEMBLY__ */
 
 #include <asm/asm.h>
 #include <linux/stringify.h>
 
-#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
-       RISCV_PTR " " oldptr "\n" \
-       RISCV_PTR " " newptr "\n" \
-       REG_ASM " " vendor_id "\n" \
-       REG_ASM " " newlen "\n" \
+#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen)                \
+       RISCV_PTR " " oldptr "\n"                                       \
+       RISCV_PTR " " newptr "\n"                                       \
+       REG_ASM " " vendor_id "\n"                                      \
+       REG_ASM " " newlen "\n"                                         \
        ".word " errata_id "\n"
 
-#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)           \
        ".if " __stringify(enable) " == 1\n"                            \
        ".pushsection .alternative, \"a\"\n"                            \
        ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
        ".popsection\n"                                                 \
        ".subsection 1\n"                                               \
        "888 :\n"                                                       \
+       ".option push\n"                                                \
+       ".option norvc\n"                                               \
+       ".option norelax\n"                                             \
        new_c "\n"                                                      \
+       ".option pop\n"                                                 \
        "889 :\n"                                                       \
-       ".previous\n"                                                   \
        ".org   . - (887b - 886b) + (889b - 888b)\n"                    \
        ".org   . - (889b - 888b) + (887b - 886b)\n"                    \
+       ".previous\n"                                                   \
        ".endif\n"
 
-#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
-       "886 :\n"       \
-       old_c "\n"      \
-       "887 :\n"       \
+#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable)  \
+       "886 :\n"                                                       \
+       ".option push\n"                                                \
+       ".option norvc\n"                                               \
+       ".option norelax\n"                                             \
+       old_c "\n"                                                      \
+       ".option pop\n"                                                 \
+       "887 :\n"                                                       \
        ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
 
 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
        __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
 
+#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1,  \
+                                       enable_1,                       \
+                                  new_c_2, vendor_id_2, errata_id_2,   \
+                                       enable_2)                       \
+       "886 :\n"                                                       \
+       ".option push\n"                                                \
+       ".option norvc\n"                                               \
+       ".option norelax\n"                                             \
+       old_c "\n"                                                      \
+       ".option pop\n"                                                 \
+       "887 :\n"                                                       \
+       ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1)    \
+       ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2)
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1,   \
+                                       CONFIG_k_1,                     \
+                                 new_c_2, vendor_id_2, errata_id_2,    \
+                                       CONFIG_k_2)                     \
+       __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1,   \
+                                       IS_ENABLED(CONFIG_k_1),         \
+                                  new_c_2, vendor_id_2, errata_id_2,   \
+                                       IS_ENABLED(CONFIG_k_2))
+
 #endif /* __ASSEMBLY__ */
 
-#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/
+#else /* CONFIG_RISCV_ALTERNATIVE */
 #ifdef __ASSEMBLY__
 
 .macro __ALTERNATIVE_CFG old_c
 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
        __ALTERNATIVE_CFG old_c
 
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1,   \
+                                       CONFIG_k_1,                     \
+                                 new_c_2, vendor_id_2, errata_id_2,    \
+                                       CONFIG_k_2)                     \
+       __ALTERNATIVE_CFG old_c
+
 #else /* !__ASSEMBLY__ */
 
 #define __ALTERNATIVE_CFG(old_c)  \
 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
        __ALTERNATIVE_CFG(old_c)
 
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1,   \
+                                       CONFIG_k_1,                     \
+                                 new_c_2, vendor_id_2, errata_id_2,    \
+                                       CONFIG_k_2) \
+       __ALTERNATIVE_CFG(old_c)
+
 #endif /* __ASSEMBLY__ */
-#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */
+#endif /* CONFIG_RISCV_ALTERNATIVE */
+
 /*
  * Usage:
  *   ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)
  * this case, this vendor can create a new macro ALTERNATIVE_2() based
  * on the following sample code and then replace ALTERNATIVE() with
  * ALTERNATIVE_2() to append its customized content.
- *
- * .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
- *                                   new_c_2, vendor_id_2, errata_id_2, enable_2
- * 886 :
- *      \old_c
- * 887 :
- *      ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
- *      ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
- * .endm
- *
- * #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
- *                                   new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
- *        __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \
- *                                   new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \
- *
- * #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
- *                                    new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
- *         _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
- *                                         new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2)
- *
  */
+#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1,         \
+                                       errata_id_1, CONFIG_k_1,        \
+                                  new_content_2, vendor_id_2,          \
+                                       errata_id_2, CONFIG_k_2)        \
+       _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1,     \
+                                           errata_id_1, CONFIG_k_1,    \
+                                       new_content_2, vendor_id_2,     \
+                                           errata_id_2, CONFIG_k_2)
+
 #endif
index e625d3c..6511dd7 100644 (file)
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_RISCV_ALTERNATIVE
+
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <asm/hwcap.h>
 
+#define RISCV_ALTERNATIVES_BOOT                0 /* alternatives applied during regular boot */
+#define RISCV_ALTERNATIVES_MODULE      1 /* alternatives applied during module-init */
+#define RISCV_ALTERNATIVES_EARLY_BOOT  2 /* alternatives applied before mmu start */
+
 void __init apply_boot_alternatives(void);
+void __init apply_early_boot_alternatives(void);
+void apply_module_alternatives(void *start, size_t length);
 
 struct alt_entry {
        void *old_ptr;           /* address of original instruciton or data  */
@@ -33,7 +41,22 @@ struct errata_checkfunc_id {
 };
 
 void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
-                             unsigned long archid, unsigned long impid);
+                             unsigned long archid, unsigned long impid,
+                             unsigned int stage);
+void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+                            unsigned long archid, unsigned long impid,
+                            unsigned int stage);
+
+void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
+                                unsigned int stage);
+
+#else /* CONFIG_RISCV_ALTERNATIVE */
+
+static inline void apply_boot_alternatives(void) { }
+static inline void apply_early_boot_alternatives(void) { }
+static inline void apply_module_alternatives(void *start, size_t length) { }
+
+#endif /* CONFIG_RISCV_ALTERNATIVE */
 
 #endif
 #endif
index 8c2549b..618d7c5 100644 (file)
 #error "Unexpected __SIZEOF_SHORT__"
 #endif
 
-#ifdef __ASSEMBLY__
-
-/* Common assembly source macros */
-
-#ifdef CONFIG_XIP_KERNEL
-.macro XIP_FIXUP_OFFSET reg
-       REG_L t0, _xip_fixup
-       add \reg, \reg, t0
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-       la t1, __data_loc
-       REG_L t1, _xip_phys_offset
-       sub \reg, \reg, t1
-       add \reg, \reg, t0
-.endm
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
-#else
-.macro XIP_FIXUP_OFFSET reg
-.endm
-.macro XIP_FIXUP_FLASH_OFFSET reg
-.endm
-#endif /* CONFIG_XIP_KERNEL */
-
-#endif /* __ASSEMBLY__ */
-
 #endif /* _ASM_RISCV_ASM_H */
index ac9bdf4..0dfe9d8 100644 (file)
@@ -310,47 +310,129 @@ ATOMIC_OPS()
 #undef ATOMIC_OPS
 #undef ATOMIC_OP
 
-static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
+static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
+{
+       int prev, rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.w      %[p],  %[c]\n"
+               "       bltz      %[p],  1f\n"
+               "       addi      %[rc], %[p], 1\n"
+               "       sc.w.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev < 0);
+}
+
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+
+static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
+{
+       int prev, rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.w      %[p],  %[c]\n"
+               "       bgtz      %[p],  1f\n"
+               "       addi      %[rc], %[p], -1\n"
+               "       sc.w.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev > 0);
+}
+
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+
+static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
 {
        int prev, rc;
 
        __asm__ __volatile__ (
                "0:     lr.w     %[p],  %[c]\n"
-               "       sub      %[rc], %[p], %[o]\n"
+               "       addi     %[rc], %[p], -1\n"
                "       bltz     %[rc], 1f\n"
                "       sc.w.rl  %[rc], %[rc], %[c]\n"
                "       bnez     %[rc], 0b\n"
                "       fence    rw, rw\n"
                "1:\n"
                : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
-               : [o]"r" (offset)
+               :
                : "memory");
-       return prev - offset;
+       return prev - 1;
 }
 
-#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+       s64 prev;
+       long rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.d      %[p],  %[c]\n"
+               "       bltz      %[p],  1f\n"
+               "       addi      %[rc], %[p], 1\n"
+               "       sc.d.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev < 0);
+}
+
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+
+static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+       s64 prev;
+       long rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.d      %[p],  %[c]\n"
+               "       bgtz      %[p],  1f\n"
+               "       addi      %[rc], %[p], -1\n"
+               "       sc.d.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev > 0);
+}
+
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+
+static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        s64 prev;
        long rc;
 
        __asm__ __volatile__ (
                "0:     lr.d     %[p],  %[c]\n"
-               "       sub      %[rc], %[p], %[o]\n"
+               "       addi      %[rc], %[p], -1\n"
                "       bltz     %[rc], 1f\n"
                "       sc.d.rl  %[rc], %[rc], %[c]\n"
                "       bnez     %[rc], 0b\n"
                "       fence    rw, rw\n"
                "1:\n"
                : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
-               : [o]"r" (offset)
+               :
                : "memory");
-       return prev - offset;
+       return prev - 1;
 }
 
-#define arch_atomic64_dec_if_positive(v)       arch_atomic64_sub_if_positive(v, 1)
+#define arch_atomic64_dec_if_positive  arch_atomic64_dec_if_positive
 #endif
 
 #endif /* _ASM_RISCV_ATOMIC_H */
index 36dc962..12debce 100644 (file)
 #define arch_cmpxchg_local(ptr, o, n)                                  \
        (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
 
-#define cmpxchg32(ptr, o, n)                                           \
-({                                                                     \
-       BUILD_BUG_ON(sizeof(*(ptr)) != 4);                              \
-       arch_cmpxchg((ptr), (o), (n));                                  \
-})
-
-#define cmpxchg32_local(ptr, o, n)                                     \
-({                                                                     \
-       BUILD_BUG_ON(sizeof(*(ptr)) != 4);                              \
-       arch_cmpxchg_relaxed((ptr), (o), (n))                           \
-})
-
 #define arch_cmpxchg64(ptr, o, n)                                      \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
new file mode 100644 (file)
index 0000000..2ac955b
--- /dev/null
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_COMPAT_H
+#define __ASM_COMPAT_H
+
+#define COMPAT_UTS_MACHINE     "riscv\0\0"
+
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm-generic/compat.h>
+
+static inline int is_compat_task(void)
+{
+       return test_thread_flag(TIF_32BIT);
+}
+
+struct compat_user_regs_struct {
+       compat_ulong_t pc;
+       compat_ulong_t ra;
+       compat_ulong_t sp;
+       compat_ulong_t gp;
+       compat_ulong_t tp;
+       compat_ulong_t t0;
+       compat_ulong_t t1;
+       compat_ulong_t t2;
+       compat_ulong_t s0;
+       compat_ulong_t s1;
+       compat_ulong_t a0;
+       compat_ulong_t a1;
+       compat_ulong_t a2;
+       compat_ulong_t a3;
+       compat_ulong_t a4;
+       compat_ulong_t a5;
+       compat_ulong_t a6;
+       compat_ulong_t a7;
+       compat_ulong_t s2;
+       compat_ulong_t s3;
+       compat_ulong_t s4;
+       compat_ulong_t s5;
+       compat_ulong_t s6;
+       compat_ulong_t s7;
+       compat_ulong_t s8;
+       compat_ulong_t s9;
+       compat_ulong_t s10;
+       compat_ulong_t s11;
+       compat_ulong_t t3;
+       compat_ulong_t t4;
+       compat_ulong_t t5;
+       compat_ulong_t t6;
+};
+
+static inline void regs_to_cregs(struct compat_user_regs_struct *cregs,
+                                struct pt_regs *regs)
+{
+       cregs->pc       = (compat_ulong_t) regs->epc;
+       cregs->ra       = (compat_ulong_t) regs->ra;
+       cregs->sp       = (compat_ulong_t) regs->sp;
+       cregs->gp       = (compat_ulong_t) regs->gp;
+       cregs->tp       = (compat_ulong_t) regs->tp;
+       cregs->t0       = (compat_ulong_t) regs->t0;
+       cregs->t1       = (compat_ulong_t) regs->t1;
+       cregs->t2       = (compat_ulong_t) regs->t2;
+       cregs->s0       = (compat_ulong_t) regs->s0;
+       cregs->s1       = (compat_ulong_t) regs->s1;
+       cregs->a0       = (compat_ulong_t) regs->a0;
+       cregs->a1       = (compat_ulong_t) regs->a1;
+       cregs->a2       = (compat_ulong_t) regs->a2;
+       cregs->a3       = (compat_ulong_t) regs->a3;
+       cregs->a4       = (compat_ulong_t) regs->a4;
+       cregs->a5       = (compat_ulong_t) regs->a5;
+       cregs->a6       = (compat_ulong_t) regs->a6;
+       cregs->a7       = (compat_ulong_t) regs->a7;
+       cregs->s2       = (compat_ulong_t) regs->s2;
+       cregs->s3       = (compat_ulong_t) regs->s3;
+       cregs->s4       = (compat_ulong_t) regs->s4;
+       cregs->s5       = (compat_ulong_t) regs->s5;
+       cregs->s6       = (compat_ulong_t) regs->s6;
+       cregs->s7       = (compat_ulong_t) regs->s7;
+       cregs->s8       = (compat_ulong_t) regs->s8;
+       cregs->s9       = (compat_ulong_t) regs->s9;
+       cregs->s10      = (compat_ulong_t) regs->s10;
+       cregs->s11      = (compat_ulong_t) regs->s11;
+       cregs->t3       = (compat_ulong_t) regs->t3;
+       cregs->t4       = (compat_ulong_t) regs->t4;
+       cregs->t5       = (compat_ulong_t) regs->t5;
+       cregs->t6       = (compat_ulong_t) regs->t6;
+};
+
+static inline void cregs_to_regs(struct compat_user_regs_struct *cregs,
+                                struct pt_regs *regs)
+{
+       regs->epc       = (unsigned long) cregs->pc;
+       regs->ra        = (unsigned long) cregs->ra;
+       regs->sp        = (unsigned long) cregs->sp;
+       regs->gp        = (unsigned long) cregs->gp;
+       regs->tp        = (unsigned long) cregs->tp;
+       regs->t0        = (unsigned long) cregs->t0;
+       regs->t1        = (unsigned long) cregs->t1;
+       regs->t2        = (unsigned long) cregs->t2;
+       regs->s0        = (unsigned long) cregs->s0;
+       regs->s1        = (unsigned long) cregs->s1;
+       regs->a0        = (unsigned long) cregs->a0;
+       regs->a1        = (unsigned long) cregs->a1;
+       regs->a2        = (unsigned long) cregs->a2;
+       regs->a3        = (unsigned long) cregs->a3;
+       regs->a4        = (unsigned long) cregs->a4;
+       regs->a5        = (unsigned long) cregs->a5;
+       regs->a6        = (unsigned long) cregs->a6;
+       regs->a7        = (unsigned long) cregs->a7;
+       regs->s2        = (unsigned long) cregs->s2;
+       regs->s3        = (unsigned long) cregs->s3;
+       regs->s4        = (unsigned long) cregs->s4;
+       regs->s5        = (unsigned long) cregs->s5;
+       regs->s6        = (unsigned long) cregs->s6;
+       regs->s7        = (unsigned long) cregs->s7;
+       regs->s8        = (unsigned long) cregs->s8;
+       regs->s9        = (unsigned long) cregs->s9;
+       regs->s10       = (unsigned long) cregs->s10;
+       regs->s11       = (unsigned long) cregs->s11;
+       regs->t3        = (unsigned long) cregs->t3;
+       regs->t4        = (unsigned long) cregs->t4;
+       regs->t5        = (unsigned long) cregs->t5;
+       regs->t6        = (unsigned long) cregs->t6;
+};
+
+#endif /* __ASM_COMPAT_H */
index cc40521..6d85655 100644 (file)
 #define SR_SD          _AC(0x8000000000000000, UL) /* FS/XS dirty */
 #endif
 
+#ifdef CONFIG_64BIT
+#define SR_UXL         _AC(0x300000000, UL) /* XLEN mask for U-mode */
+#define SR_UXL_32      _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
+#define SR_UXL_64      _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
+#define SR_UXL_SHIFT   32
+#endif
+
 /* SATP flags */
 #ifndef CONFIG_64BIT
 #define SATP_PPN       _AC(0x003FFFFF, UL)
index f53c400..14fc734 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _ASM_RISCV_ELF_H
 #define _ASM_RISCV_ELF_H
 
+#include <uapi/linux/elf.h>
+#include <linux/compat.h>
 #include <uapi/asm/elf.h>
 #include <asm/auxvec.h>
 #include <asm/byteorder.h>
  */
 #define ELF_ARCH       EM_RISCV
 
+#ifndef ELF_CLASS
 #ifdef CONFIG_64BIT
 #define ELF_CLASS      ELFCLASS64
 #else
 #define ELF_CLASS      ELFCLASS32
 #endif
+#endif
 
 #define ELF_DATA       ELFDATA2LSB
 
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
-#define elf_check_arch(x) ((x)->e_machine == EM_RISCV)
+#define elf_check_arch(x) (((x)->e_machine == EM_RISCV) && \
+                          ((x)->e_ident[EI_CLASS] == ELF_CLASS))
+
+extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
+#define compat_elf_check_arch  compat_elf_check_arch
 
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      (PAGE_SIZE)
 #define ELF_ET_DYN_BASE                ((TASK_SIZE / 3) * 2)
 
 #ifdef CONFIG_64BIT
+#ifdef CONFIG_COMPAT
+#define STACK_RND_MASK         (test_thread_flag(TIF_32BIT) ? \
+                                0x7ff >> (PAGE_SHIFT - 12) : \
+                                0x3ffff >> (PAGE_SHIFT - 12))
+#else
 #define STACK_RND_MASK         (0x3ffff >> (PAGE_SHIFT - 12))
 #endif
+#endif
 /*
  * This yields a mask that user programs can use to figure out what
  * instruction set this CPU supports.  This could be done in user space,
@@ -60,11 +74,19 @@ extern unsigned long elf_hwcap;
  */
 #define ELF_PLATFORM   (NULL)
 
+#define COMPAT_ELF_PLATFORM    (NULL)
+
 #ifdef CONFIG_MMU
 #define ARCH_DLINFO                                            \
 do {                                                           \
+       /*                                                      \
+        * Note that we add ulong after elf_addr_t because      \
+        * casting current->mm->context.vdso triggers a cast    \
+        * warning of cast from pointer to integer for          \
+        * COMPAT ELFCLASS32.                                   \
+        */                                                     \
        NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
-               (elf_addr_t)current->mm->context.vdso);         \
+               (elf_addr_t)(ulong)current->mm->context.vdso);  \
        NEW_AUX_ENT(AT_L1I_CACHESIZE,                           \
                get_cache_size(1, CACHE_TYPE_INST));            \
        NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY,                       \
@@ -90,4 +112,28 @@ do {                                                        \
                *(struct user_regs_struct *)regs;       \
 } while (0);
 
+#ifdef CONFIG_COMPAT
+
+#define SET_PERSONALITY(ex)                                    \
+do {    if ((ex).e_ident[EI_CLASS] == ELFCLASS32)              \
+               set_thread_flag(TIF_32BIT);                     \
+       else                                                    \
+               clear_thread_flag(TIF_32BIT);                   \
+       if (personality(current->personality) != PER_LINUX32)   \
+               set_personality(PER_LINUX |                     \
+                       (current->personality & (~PER_MASK)));  \
+} while (0)
+
+#define COMPAT_ELF_ET_DYN_BASE         ((TASK_SIZE_32 / 3) * 2)
+
+/* rv32 registers */
+typedef compat_ulong_t                 compat_elf_greg_t;
+typedef compat_elf_greg_t              compat_elf_gregset_t[ELF_NGREG];
+
+extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+                                             int uses_interp);
+#define compat_arch_setup_additional_pages \
+                               compat_arch_setup_additional_pages
+
+#endif /* CONFIG_COMPAT */
 #endif /* _ASM_RISCV_ELF_H */
index 5f1046e..9e2888d 100644 (file)
 #define        ERRATA_SIFIVE_NUMBER 2
 #endif
 
+#ifdef CONFIG_ERRATA_THEAD
+#define        ERRATA_THEAD_PBMT 0
+#define        ERRATA_THEAD_NUMBER 1
+#endif
+
+#define        CPUFEATURE_SVPBMT 0
+#define        CPUFEATURE_NUMBER 1
+
 #ifdef __ASSEMBLY__
 
 #define ALT_INSN_FAULT(x)                                              \
@@ -34,6 +42,57 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID,     \
                ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200)  \
                : : "r" (addr) : "memory")
 
+/*
+ * _val is marked as "will be overwritten", so need to set it to 0
+ * in the default case.
+ */
+#define ALT_SVPBMT_SHIFT 61
+#define ALT_THEAD_PBMT_SHIFT 59
+#define ALT_SVPBMT(_val, prot)                                         \
+asm(ALTERNATIVE_2("li %0, 0\t\nnop",                                   \
+                 "li %0, %1\t\nslli %0,%0,%3", 0,                      \
+                       CPUFEATURE_SVPBMT, CONFIG_RISCV_ISA_SVPBMT,     \
+                 "li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID,        \
+                       ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT)    \
+               : "=r"(_val)                                            \
+               : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT),               \
+                 "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT),            \
+                 "I"(ALT_SVPBMT_SHIFT),                                \
+                 "I"(ALT_THEAD_PBMT_SHIFT))
+
+#ifdef CONFIG_ERRATA_THEAD_PBMT
+/*
+ * IO/NOCACHE memory types are handled together with svpbmt,
+ * so on T-Head chips, check if no other memory type is set,
+ * and set the non-0 PMA type if applicable.
+ */
+#define ALT_THEAD_PMA(_val)                                            \
+asm volatile(ALTERNATIVE(                                              \
+       "nop\n\t"                                                       \
+       "nop\n\t"                                                       \
+       "nop\n\t"                                                       \
+       "nop\n\t"                                                       \
+       "nop\n\t"                                                       \
+       "nop\n\t"                                                       \
+       "nop",                                                          \
+       "li      t3, %2\n\t"                                            \
+       "slli    t3, t3, %4\n\t"                                        \
+       "and     t3, %0, t3\n\t"                                        \
+       "bne     t3, zero, 2f\n\t"                                      \
+       "li      t3, %3\n\t"                                            \
+       "slli    t3, t3, %4\n\t"                                        \
+       "or      %0, %0, t3\n\t"                                        \
+       "2:",  THEAD_VENDOR_ID,                                         \
+               ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT)            \
+       : "+r"(_val)                                                    \
+       : "0"(_val),                                                    \
+         "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT),              \
+         "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT),                 \
+         "I"(ALT_THEAD_PBMT_SHIFT))
+#else
+#define ALT_THEAD_PMA(_val)
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif
index 3cfece8..5c3e7b9 100644 (file)
@@ -45,8 +45,6 @@ enum fixed_addresses {
        __end_of_fixed_addresses
 };
 
-#define FIXMAP_PAGE_IO         PAGE_KERNEL
-
 #define __early_set_fixmap     __set_fixmap
 
 #define __late_set_fixmap      __set_fixmap
index 0734e42..4e24868 100644 (file)
@@ -52,6 +52,7 @@ extern unsigned long elf_hwcap;
  */
 enum riscv_isa_ext_id {
        RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE,
+       RISCV_ISA_EXT_SVPBMT,
        RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
 };
 
index d6c2779..b538919 100644 (file)
@@ -4,7 +4,7 @@
 
 static inline bool arch_irq_work_has_interrupt(void)
 {
-       return true;
+       return IS_ENABLED(CONFIG_SMP);
 }
 extern void arch_irq_work_raise(void);
 #endif /* _ASM_RISCV_IRQ_WORK_H */
index e4e291d..eee260e 100644 (file)
@@ -53,4 +53,15 @@ typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
 
 extern riscv_kexec_method riscv_kexec_norelocate;
 
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops elf_kexec_ops;
+
+struct purgatory_info;
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+                                    Elf_Shdr *section,
+                                    const Elf_Shdr *relsec,
+                                    const Elf_Shdr *symtab);
+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+#endif
+
 #endif
index 0099dc1..cedcf8e 100644 (file)
@@ -16,6 +16,7 @@ typedef struct {
        atomic_long_t id;
 #endif
        void *vdso;
+       void *vdso_info;
 #ifdef CONFIG_SMP
        /* A local icache flush is needed before user execution can resume. */
        cpumask_t icache_stale_mask;
index 5b2e79e..59ba1fb 100644 (file)
@@ -7,6 +7,7 @@
 #define _ASM_RISCV_PGTABLE_32_H
 
 #include <asm-generic/pgtable-nopmd.h>
+#include <linux/bits.h>
 #include <linux/const.h>
 
 /* Size of region mapped by a page global directory */
 
 #define MAX_POSSIBLE_PHYSMEM_BITS 34
 
+/*
+ * rv32 PTE format:
+ * | XLEN-1  10 | 9             8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ *       PFN      reserved for SW   D   A   G   U   X   W   R   V
+ */
+#define _PAGE_PFN_MASK  GENMASK(31, 10)
+
+#define _PAGE_NOCACHE          0
+#define _PAGE_IO               0
+#define _PAGE_MTMASK           0
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK  (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+                                         _PAGE_WRITE | _PAGE_EXEC |    \
+                                         _PAGE_USER | _PAGE_GLOBAL))
+
 #endif /* _ASM_RISCV_PGTABLE_32_H */
index ba2494c..5c2aba5 100644 (file)
@@ -6,7 +6,9 @@
 #ifndef _ASM_RISCV_PGTABLE_64_H
 #define _ASM_RISCV_PGTABLE_64_H
 
+#include <linux/bits.h>
 #include <linux/const.h>
+#include <asm/errata_list.h>
 
 extern bool pgtable_l4_enabled;
 extern bool pgtable_l5_enabled;
@@ -65,6 +67,71 @@ typedef struct {
 
 #define PTRS_PER_PMD    (PAGE_SIZE / sizeof(pmd_t))
 
+/*
+ * rv64 PTE format:
+ * | 63 | 62 61 | 60 54 | 53  10 | 9             8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ *   N      MT     RSV    PFN      reserved for SW   D   A   G   U   X   W   R   V
+ */
+#define _PAGE_PFN_MASK  GENMASK(53, 10)
+
+/*
+ * [62:61] Svpbmt Memory Type definitions:
+ *
+ *  00 - PMA    Normal Cacheable, No change to implied PMA memory type
+ *  01 - NC     Non-cacheable, idempotent, weakly-ordered Main Memory
+ *  10 - IO     Non-cacheable, non-idempotent, strongly-ordered I/O memory
+ *  11 - Rsvd   Reserved for future standard use
+ */
+#define _PAGE_NOCACHE_SVPBMT   (1UL << 61)
+#define _PAGE_IO_SVPBMT                (1UL << 62)
+#define _PAGE_MTMASK_SVPBMT    (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
+
+/*
+ * [63:59] T-Head Memory Type definitions:
+ *
+ * 00000 - NC   Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ * 01110 - PMA  Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
+ * 10000 - IO   Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ */
+#define _PAGE_PMA_THEAD                ((1UL << 62) | (1UL << 61) | (1UL << 60))
+#define _PAGE_NOCACHE_THEAD    0UL
+#define _PAGE_IO_THEAD         (1UL << 63)
+#define _PAGE_MTMASK_THEAD     (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
+
+static inline u64 riscv_page_mtmask(void)
+{
+       u64 val;
+
+       ALT_SVPBMT(val, _PAGE_MTMASK);
+       return val;
+}
+
+static inline u64 riscv_page_nocache(void)
+{
+       u64 val;
+
+       ALT_SVPBMT(val, _PAGE_NOCACHE);
+       return val;
+}
+
+static inline u64 riscv_page_io(void)
+{
+       u64 val;
+
+       ALT_SVPBMT(val, _PAGE_IO);
+       return val;
+}
+
+#define _PAGE_NOCACHE          riscv_page_nocache()
+#define _PAGE_IO               riscv_page_io()
+#define _PAGE_MTMASK           riscv_page_mtmask()
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK  (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+                                         _PAGE_WRITE | _PAGE_EXEC |    \
+                                         _PAGE_USER | _PAGE_GLOBAL |   \
+                                         _PAGE_MTMASK))
+
 static inline int pud_present(pud_t pud)
 {
        return (pud_val(pud) & _PAGE_PRESENT);
@@ -113,12 +180,12 @@ static inline unsigned long _pud_pfn(pud_t pud)
 
 static inline pmd_t *pud_pgtable(pud_t pud)
 {
-       return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
+       return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
 }
 
 static inline struct page *pud_page(pud_t pud)
 {
-       return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
+       return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
 }
 
 #define mm_p4d_folded  mm_p4d_folded
@@ -143,12 +210,16 @@ static inline bool mm_pud_folded(struct mm_struct *mm)
 
 static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
 {
-       return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+       unsigned long prot_val = pgprot_val(prot);
+
+       ALT_THEAD_PMA(prot_val);
+
+       return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
 }
 
 static inline unsigned long _pmd_pfn(pmd_t pmd)
 {
-       return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
+       return __page_val_to_pfn(pmd_val(pmd));
 }
 
 #define mk_pmd(page, prot)    pfn_pmd(page_to_pfn(page), prot)
index a6b0c89..b9e13a8 100644 (file)
@@ -6,12 +6,6 @@
 #ifndef _ASM_RISCV_PGTABLE_BITS_H
 #define _ASM_RISCV_PGTABLE_BITS_H
 
-/*
- * PTE format:
- * | XLEN-1  10 | 9             8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- *       PFN      reserved for SW   D   A   G   U   X   W   R   V
- */
-
 #define _PAGE_ACCESSED_OFFSET 6
 
 #define _PAGE_PRESENT   (1 << 0)
 
 #define _PAGE_PFN_SHIFT 10
 
-/* Set of bits to preserve across pte_modify() */
-#define _PAGE_CHG_MASK  (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
-                                         _PAGE_WRITE | _PAGE_EXEC |    \
-                                         _PAGE_USER | _PAGE_GLOBAL))
 /*
  * when all of R/W/X are zero, the PTE is a pointer to the next level
  * of the page table; otherwise, it is a leaf PTE.
index 4200dde..1d1be9d 100644 (file)
 #include <asm/tlbflush.h>
 #include <linux/mm_types.h>
 
+#define __page_val_to_pfn(_val)  (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
+
 #ifdef CONFIG_64BIT
 #include <asm/pgtable-64.h>
 #else
@@ -179,11 +181,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
 
 #define PAGE_TABLE             __pgprot(_PAGE_TABLE)
 
-/*
- * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
- * change the properties of memory regions.
- */
-#define _PAGE_IOREMAP _PAGE_KERNEL
+#define _PAGE_IOREMAP  ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
+#define PAGE_KERNEL_IO         __pgprot(_PAGE_IOREMAP)
 
 extern pgd_t swapper_pg_dir[];
 
@@ -253,7 +252,11 @@ static inline void pmd_clear(pmd_t *pmdp)
 
 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
 {
-       return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+       unsigned long prot_val = pgprot_val(prot);
+
+       ALT_THEAD_PMA(prot_val);
+
+       return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
 }
 
 static inline unsigned long _pgd_pfn(pgd_t pgd)
@@ -263,12 +266,12 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
 
 static inline struct page *pmd_page(pmd_t pmd)
 {
-       return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+       return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
 }
 
 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 {
-       return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+       return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
 }
 
 static inline pte_t pmd_pte(pmd_t pmd)
@@ -284,7 +287,7 @@ static inline pte_t pud_pte(pud_t pud)
 /* Yields the page frame number (PFN) of a page table entry */
 static inline unsigned long pte_pfn(pte_t pte)
 {
-       return (pte_val(pte) >> _PAGE_PFN_SHIFT);
+       return __page_val_to_pfn(pte_val(pte));
 }
 
 #define pte_page(x)     pfn_to_page(pte_pfn(x))
@@ -292,7 +295,11 @@ static inline unsigned long pte_pfn(pte_t pte)
 /* Constructs a page table entry */
 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 {
-       return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+       unsigned long prot_val = pgprot_val(prot);
+
+       ALT_THEAD_PMA(prot_val);
+
+       return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
 }
 
 #define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
@@ -406,7 +413,11 @@ static inline int pmd_protnone(pmd_t pmd)
 /* Modify page protection bits */
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+       unsigned long newprot_val = pgprot_val(newprot);
+
+       ALT_THEAD_PMA(newprot_val);
+
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
 }
 
 #define pgd_ERROR(e) \
@@ -539,6 +550,28 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
        return ptep_test_and_clear_young(vma, address, ptep);
 }
 
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+       unsigned long prot = pgprot_val(_prot);
+
+       prot &= ~_PAGE_MTMASK;
+       prot |= _PAGE_IO;
+
+       return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+       unsigned long prot = pgprot_val(_prot);
+
+       prot &= ~_PAGE_MTMASK;
+       prot |= _PAGE_NOCACHE;
+
+       return __pgprot(prot);
+}
+
 /*
  * THP functions
  */
@@ -761,8 +794,17 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
  * 63–48 all equal to bit 47, or else a page-fault exception will occur."
  */
 #ifdef CONFIG_64BIT
-#define TASK_SIZE      (PGDIR_SIZE * PTRS_PER_PGD / 2)
-#define TASK_SIZE_MIN  (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+#define TASK_SIZE_64   (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#define TASK_SIZE_MIN  (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32   (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE      (test_thread_flag(TIF_32BIT) ? \
+                        TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE      TASK_SIZE_64
+#endif
+
 #else
 #define TASK_SIZE      FIXADDR_START
 #define TASK_SIZE_MIN  TASK_SIZE
index 0749924..21c8072 100644 (file)
 #define TASK_UNMAPPED_BASE     PAGE_ALIGN(TASK_SIZE / 3)
 
 #define STACK_TOP              TASK_SIZE
-#define STACK_TOP_MAX          STACK_TOP
+#ifdef CONFIG_64BIT
+#define STACK_TOP_MAX          TASK_SIZE_64
+#else
+#define STACK_TOP_MAX          TASK_SIZE
+#endif
 #define STACK_ALIGN            16
 
 #ifndef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/signal32.h b/arch/riscv/include/asm/signal32.h
new file mode 100644 (file)
index 0000000..96dc569
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL32_H
+#define __ASM_SIGNAL32_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+                         struct pt_regs *regs);
+#else
+static inline
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+                         struct pt_regs *regs)
+{
+       return -1;
+}
+#endif
+
+#endif
index 7ac6a0e..384a63b 100644 (file)
@@ -16,6 +16,7 @@
 
 /* The array of function pointers for syscalls. */
 extern void * const sys_call_table[];
+extern void * const compat_sys_call_table[];
 
 /*
  * Only the low 32 bits of orig_r0 are meaningful, so we return int.
index 74d888c..78933ac 100644 (file)
@@ -97,6 +97,7 @@ struct thread_info {
 #define TIF_SECCOMP            8       /* syscall secure computing */
 #define TIF_NOTIFY_SIGNAL      9       /* signal notifications exist */
 #define TIF_UPROBE             10      /* uprobe breakpoint or singlestep */
+#define TIF_32BIT              11      /* compat-mode 32bit process */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
index 6c31609..221630b 100644 (file)
@@ -9,7 +9,17 @@
  */
 
 #define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_MEMFD_SECRET
+
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_TRUNCATE64
+#define __ARCH_WANT_COMPAT_FTRUNCATE64
+#define __ARCH_WANT_COMPAT_FALLOCATE
+#define __ARCH_WANT_COMPAT_PREAD64
+#define __ARCH_WANT_COMPAT_PWRITE64
+#define __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+#define __ARCH_WANT_COMPAT_READAHEAD
+#define __ARCH_WANT_COMPAT_FADVISE64_64
+#endif
 
 #include <uapi/asm/unistd.h>
 
index bc6f75f..af98142 100644 (file)
 
 #define VDSO_SYMBOL(base, name)                                                        \
        (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+#ifdef CONFIG_COMPAT
+#include <generated/compat_vdso-offsets.h>
+
+#define COMPAT_VDSO_SYMBOL(base, name)                                         \
+       (void __user *)((unsigned long)(base) + compat__vdso_##name##_offset)
+
+#endif /* CONFIG_COMPAT */
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* CONFIG_MMU */
index 9d93421..cb89af3 100644 (file)
@@ -6,5 +6,6 @@
 #define ASM_VENDOR_LIST_H
 
 #define SIFIVE_VENDOR_ID       0x489
+#define THEAD_VENDOR_ID                0x5b7
 
 #endif
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
new file mode 100644 (file)
index 0000000..d4ffc3c
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * XIP fixup macros, only useful in assembly.
+ */
+#ifndef _ASM_RISCV_XIP_FIXUP_H
+#define _ASM_RISCV_XIP_FIXUP_H
+
+#include <linux/pgtable.h>
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+        REG_L t0, _xip_fixup
+        add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+       la t1, __data_loc
+       REG_L t1, _xip_phys_offset
+       sub \reg, \reg, t1
+       add \reg, \reg, t0
+.endm
+
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif
index 8062996..73d7cdd 100644 (file)
  * along with this program.  If not, see <https://www.gnu.org/licenses/>.
  */
 
-#ifdef __LP64__
+#if defined(__LP64__) && !defined(__SYSCALL_COMPAT)
 #define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SET_GET_RLIMIT
 #endif /* __LP64__ */
 
 #define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
 
 #include <asm-generic/unistd.h>
 
index 87adbe4..c71d659 100644 (file)
@@ -14,10 +14,25 @@ ifdef CONFIG_KEXEC
 AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
 endif
 
+# cmodel=medany and notrace when patching early
+ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+CFLAGS_alternative.o := -mcmodel=medany
+CFLAGS_cpufeature.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
+endif
+ifdef CONFIG_KASAN
+KASAN_SANITIZE_alternative.o := n
+KASAN_SANITIZE_cpufeature.o := n
+endif
+endif
+
 extra-y += head.o
 extra-y += vmlinux.lds
 
 obj-y  += soc.o
+obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
 obj-y  += cpu.o
 obj-y  += cpufeature.o
 obj-y  += entry.o
@@ -64,8 +79,12 @@ endif
 obj-$(CONFIG_HOTPLUG_CPU)      += cpu-hotplug.o
 obj-$(CONFIG_KGDB)             += kgdb.o
 obj-$(CONFIG_KEXEC)            += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_KEXEC_FILE)       += elf_kexec.o machine_kexec_file.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
 
 obj-$(CONFIG_EFI)              += efi.o
+obj-$(CONFIG_COMPAT)           += compat_syscall_table.o
+obj-$(CONFIG_COMPAT)           += compat_signal.o
+obj-$(CONFIG_COMPAT)           += compat_vdso/
diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c
new file mode 100644 (file)
index 0000000..c9d0d3c
--- /dev/null
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * alternative runtime patching
+ * inspired by the ARM64 and x86 version
+ *
+ * Copyright (C) 2021 Sifive.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/sections.h>
+#include <asm/vendorid_list.h>
+#include <asm/sbi.h>
+#include <asm/csr.h>
+
+struct cpu_manufacturer_info_t {
+       unsigned long vendor_id;
+       unsigned long arch_id;
+       unsigned long imp_id;
+       void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
+                                 unsigned long archid, unsigned long impid,
+                                 unsigned int stage);
+};
+
+static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
+{
+#ifdef CONFIG_RISCV_M_MODE
+       cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
+       cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
+       cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
+#else
+       cpu_mfr_info->vendor_id = sbi_get_mvendorid();
+       cpu_mfr_info->arch_id = sbi_get_marchid();
+       cpu_mfr_info->imp_id = sbi_get_mimpid();
+#endif
+
+       switch (cpu_mfr_info->vendor_id) {
+#ifdef CONFIG_ERRATA_SIFIVE
+       case SIFIVE_VENDOR_ID:
+               cpu_mfr_info->vendor_patch_func = sifive_errata_patch_func;
+               break;
+#endif
+#ifdef CONFIG_ERRATA_THEAD
+       case THEAD_VENDOR_ID:
+               cpu_mfr_info->vendor_patch_func = thead_errata_patch_func;
+               break;
+#endif
+       default:
+               cpu_mfr_info->vendor_patch_func = NULL;
+       }
+}
+
+/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+static void __init_or_module _apply_alternatives(struct alt_entry *begin,
+                                                struct alt_entry *end,
+                                                unsigned int stage)
+{
+       struct cpu_manufacturer_info_t cpu_mfr_info;
+
+       riscv_fill_cpu_mfr_info(&cpu_mfr_info);
+
+       riscv_cpufeature_patch_func(begin, end, stage);
+
+       if (!cpu_mfr_info.vendor_patch_func)
+               return;
+
+       cpu_mfr_info.vendor_patch_func(begin, end,
+                                  cpu_mfr_info.arch_id,
+                                  cpu_mfr_info.imp_id,
+                                  stage);
+}
+
+void __init apply_boot_alternatives(void)
+{
+       /* If called on non-boot cpu things could go wrong */
+       WARN_ON(smp_processor_id() != 0);
+
+       _apply_alternatives((struct alt_entry *)__alt_start,
+                           (struct alt_entry *)__alt_end,
+                           RISCV_ALTERNATIVES_BOOT);
+}
+
+/*
+ * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
+ *
+ * Following requirements should be honoured for it to work correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ *    To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ *    so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for alternative.o in kernel/Makefile.
+ */
+void __init apply_early_boot_alternatives(void)
+{
+#ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+       _apply_alternatives((struct alt_entry *)__alt_start,
+                           (struct alt_entry *)__alt_end,
+                           RISCV_ALTERNATIVES_EARLY_BOOT);
+#endif
+}
+
+#ifdef CONFIG_MODULES
+void apply_module_alternatives(void *start, size_t length)
+{
+       _apply_alternatives((struct alt_entry *)start,
+                           (struct alt_entry *)(start + length),
+                           RISCV_ALTERNATIVES_MODULE);
+}
+#endif
diff --git a/arch/riscv/kernel/compat_signal.c b/arch/riscv/kernel/compat_signal.c
new file mode 100644 (file)
index 0000000..6ec4e34
--- /dev/null
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/compat.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/linkage.h>
+
+#include <asm/csr.h>
+#include <asm/signal32.h>
+#include <asm/switch_to.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+
+#define COMPAT_DEBUG_SIG 0
+
+struct compat_sigcontext {
+       struct compat_user_regs_struct sc_regs;
+       union __riscv_fp_state sc_fpregs;
+};
+
+struct compat_ucontext {
+       compat_ulong_t          uc_flags;
+       struct compat_ucontext  *uc_link;
+       compat_stack_t          uc_stack;
+       sigset_t                uc_sigmask;
+       /* There's some padding here to allow sigset_t to be expanded in the
+        * future.  Though this is unlikely, other architectures put uc_sigmask
+        * at the end of this structure and explicitly state it can be
+        * expanded, so we didn't want to box ourselves in here. */
+       __u8              __unused[1024 / 8 - sizeof(sigset_t)];
+       /* We can't put uc_sigmask at the end of this structure because we need
+        * to be able to expand sigcontext in the future.  For example, the
+        * vector ISA extension will almost certainly add ISA state.  We want
+        * to ensure all user-visible ISA state can be saved and restored via a
+        * ucontext, so we're putting this at the end in order to allow for
+        * infinite extensibility.  Since we know this will be extended and we
+        * assume sigset_t won't be extended an extreme amount, we're
+        * prioritizing this. */
+       struct compat_sigcontext uc_mcontext;
+};
+
+struct compat_rt_sigframe {
+       struct compat_siginfo info;
+       struct compat_ucontext uc;
+};
+
+#ifdef CONFIG_FPU
+static long compat_restore_fp_state(struct pt_regs *regs,
+       union __riscv_fp_state __user *sc_fpregs)
+{
+       long err;
+       struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+       size_t i;
+
+       err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
+       if (unlikely(err))
+               return err;
+
+       fstate_restore(current, regs);
+
+       /* We support no other extension state at this time. */
+       for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+               u32 value;
+
+               err = __get_user(value, &sc_fpregs->q.reserved[i]);
+               if (unlikely(err))
+                       break;
+               if (value != 0)
+                       return -EINVAL;
+       }
+
+       return err;
+}
+
+static long compat_save_fp_state(struct pt_regs *regs,
+                         union __riscv_fp_state __user *sc_fpregs)
+{
+       long err;
+       struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+       size_t i;
+
+       fstate_save(current, regs);
+       err = __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+       if (unlikely(err))
+               return err;
+
+       /* We support no other extension state at this time. */
+       for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+               err = __put_user(0, &sc_fpregs->q.reserved[i]);
+               if (unlikely(err))
+                       break;
+       }
+
+       return err;
+}
+#else
+#define compat_save_fp_state(task, regs) (0)
+#define compat_restore_fp_state(task, regs) (0)
+#endif
+
+static long compat_restore_sigcontext(struct pt_regs *regs,
+       struct compat_sigcontext __user *sc)
+{
+       long err;
+       struct compat_user_regs_struct cregs;
+
+       /* sc_regs is structured the same as the start of pt_regs */
+       err = __copy_from_user(&cregs, &sc->sc_regs, sizeof(sc->sc_regs));
+
+       cregs_to_regs(&cregs, regs);
+
+       /* Restore the floating-point state. */
+       if (has_fpu())
+               err |= compat_restore_fp_state(regs, &sc->sc_fpregs);
+       return err;
+}
+
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
+{
+       struct pt_regs *regs = current_pt_regs();
+       struct compat_rt_sigframe __user *frame;
+       struct task_struct *task;
+       sigset_t set;
+
+       /* Always make any pending restarted system calls return -EINTR */
+       current->restart_block.fn = do_no_restart_syscall;
+
+       frame = (struct compat_rt_sigframe __user *)regs->sp;
+
+       if (!access_ok(frame, sizeof(*frame)))
+               goto badframe;
+
+       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       set_current_blocked(&set);
+
+       if (compat_restore_sigcontext(regs, &frame->uc.uc_mcontext))
+               goto badframe;
+
+       if (compat_restore_altstack(&frame->uc.uc_stack))
+               goto badframe;
+
+       return regs->a0;
+
+badframe:
+       task = current;
+       if (show_unhandled_signals) {
+               pr_info_ratelimited(
+                       "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
+                       task->comm, task_pid_nr(task), __func__,
+                       frame, (void *)regs->epc, (void *)regs->sp);
+       }
+       force_sig(SIGSEGV);
+       return 0;
+}
+
+static long compat_setup_sigcontext(struct compat_rt_sigframe __user *frame,
+       struct pt_regs *regs)
+{
+       struct compat_sigcontext __user *sc = &frame->uc.uc_mcontext;
+       struct compat_user_regs_struct cregs;
+       long err;
+
+       regs_to_cregs(&cregs, regs);
+
+       /* sc_regs is structured the same as the start of pt_regs */
+       err = __copy_to_user(&sc->sc_regs, &cregs, sizeof(sc->sc_regs));
+       /* Save the floating-point state. */
+       if (has_fpu())
+               err |= compat_save_fp_state(regs, &sc->sc_fpregs);
+       return err;
+}
+
+static inline void __user *compat_get_sigframe(struct ksignal *ksig,
+       struct pt_regs *regs, size_t framesize)
+{
+       unsigned long sp;
+       /* Default to using normal stack */
+       sp = regs->sp;
+
+       /*
+        * If we are on the alternate signal stack and would overflow it, don't.
+        * Return an always-bogus address instead so we will die with SIGSEGV.
+        */
+       if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+               return (void __user __force *)(-1UL);
+
+       /* This is the X/Open sanctioned signal stack switching. */
+       sp = sigsp(sp, ksig) - framesize;
+
+       /* Align the stack frame. */
+       sp &= ~0xfUL;
+
+       return (void __user *)sp;
+}
+
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+       struct pt_regs *regs)
+{
+       struct compat_rt_sigframe __user *frame;
+       long err = 0;
+
+       frame = compat_get_sigframe(ksig, regs, sizeof(*frame));
+       if (!access_ok(frame, sizeof(*frame)))
+               return -EFAULT;
+
+       err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
+
+       /* Create the ucontext. */
+       err |= __put_user(0, &frame->uc.uc_flags);
+       err |= __put_user(NULL, &frame->uc.uc_link);
+       err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+       err |= compat_setup_sigcontext(frame, regs);
+       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+       if (err)
+               return -EFAULT;
+
+       regs->ra = (unsigned long)COMPAT_VDSO_SYMBOL(
+                       current->mm->context.vdso, rt_sigreturn);
+
+       /*
+        * Set up registers for signal handler.
+        * Registers that we don't modify keep the value they had from
+        * user-space at the time we took the signal.
+        * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+        * since some things rely on this (e.g. glibc's debug/segfault.c).
+        */
+       regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
+       regs->sp = (unsigned long)frame;
+       regs->a0 = ksig->sig;                     /* a0: signal number */
+       regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
+       regs->a2 = (unsigned long)(&frame->uc);   /* a2: ucontext pointer */
+
+#if COMPAT_DEBUG_SIG
+       pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
+               current->comm, task_pid_nr(current), ksig->sig,
+               (void *)regs->epc, (void *)regs->ra, frame);
+#endif
+
+       return 0;
+}
diff --git a/arch/riscv/kernel/compat_syscall_table.c b/arch/riscv/kernel/compat_syscall_table.c
new file mode 100644 (file)
index 0000000..651f2b0
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define __SYSCALL_COMPAT
+
+#include <linux/compat.h>
+#include <linux/syscalls.h>
+#include <asm-generic/mman-common.h>
+#include <asm-generic/syscalls.h>
+#include <asm/syscall.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call)      [nr] = (call),
+
+asmlinkage long compat_sys_rt_sigreturn(void);
+
+void * const compat_sys_call_table[__NR_syscalls] = {
+       [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/riscv/kernel/compat_vdso/.gitignore b/arch/riscv/kernel/compat_vdso/.gitignore
new file mode 100644 (file)
index 0000000..19d83d8
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+compat_vdso.lds
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
new file mode 100644 (file)
index 0000000..260daf3
--- /dev/null
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for compat_vdso
+#
+
+# Symbols present in the compat_vdso
+compat_vdso-syms  = rt_sigreturn
+compat_vdso-syms += getcpu
+compat_vdso-syms += flush_icache
+
+COMPAT_CC := $(CC)
+COMPAT_LD := $(LD)
+
+COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
+COMPAT_LD_FLAGS := -melf32lriscv
+
+# Files to link into the compat_vdso
+obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o
+
+# Build rules
+targets := $(obj-compat_vdso) compat_vdso.so compat_vdso.so.dbg compat_vdso.lds
+obj-compat_vdso := $(addprefix $(obj)/, $(obj-compat_vdso))
+
+obj-y += compat_vdso.o
+CPPFLAGS_compat_vdso.lds += -P -C -U$(ARCH)
+
+# Disable profiling and instrumentation for VDSO code
+GCOV_PROFILE := n
+KCOV_INSTRUMENT := n
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+
+# Force dependency
+$(obj)/compat_vdso.o: $(obj)/compat_vdso.so
+
+# link rule for the .so file, .lds has to be first
+$(obj)/compat_vdso.so.dbg: $(obj)/compat_vdso.lds $(obj-compat_vdso) FORCE
+       $(call if_changed,compat_vdsold)
+LDFLAGS_compat_vdso.so.dbg = -shared -S -soname=linux-compat_vdso.so.1 \
+       --build-id=sha1 --hash-style=both --eh-frame-hdr
+
+$(obj-compat_vdso): %.o: %.S FORCE
+       $(call if_changed_dep,compat_vdsoas)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+       $(call if_changed,objcopy)
+
+# Generate VDSO offsets using helper script
+gen-compat_vdsosym := $(srctree)/$(src)/gen_compat_vdso_offsets.sh
+quiet_cmd_compat_vdsosym = VDSOSYM $@
+       cmd_compat_vdsosym = $(NM) $< | $(gen-compat_vdsosym) | LC_ALL=C sort > $@
+
+include/generated/compat_vdso-offsets.h: $(obj)/compat_vdso.so.dbg FORCE
+       $(call if_changed,compat_vdsosym)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Make sure only to export the intended __compat_vdso_xxx symbol offsets.
+quiet_cmd_compat_vdsold = VDSOLD  $@
+      cmd_compat_vdsold = $(COMPAT_LD) $(ld_flags) $(COMPAT_LD_FLAGS) -T $(filter-out FORCE,$^) -o $@.tmp && \
+                   $(OBJCOPY) $(patsubst %, -G __compat_vdso_%, $(compat_vdso-syms)) $@.tmp $@ && \
+                   rm $@.tmp
+
+# actual build commands
+quiet_cmd_compat_vdsoas = VDSOAS $@
+      cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
+
+# install commands for the unstripped file
+quiet_cmd_compat_vdso_install = INSTALL $@
+      cmd_compat_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/compat_vdso/$@
+
+compat_vdso.so: $(obj)/compat_vdso.so.dbg
+       @mkdir -p $(MODLIB)/compat_vdso
+       $(call cmd,compat_vdso_install)
+
+compat_vdso_install: compat_vdso.so
diff --git a/arch/riscv/kernel/compat_vdso/compat_vdso.S b/arch/riscv/kernel/compat_vdso/compat_vdso.S
new file mode 100644 (file)
index 0000000..ffd6623
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define        vdso_start      compat_vdso_start
+#define        vdso_end        compat_vdso_end
+
+#define        __VDSO_PATH     "arch/riscv/kernel/compat_vdso/compat_vdso.so"
+
+#include "../vdso/vdso.S"
diff --git a/arch/riscv/kernel/compat_vdso/compat_vdso.lds.S b/arch/riscv/kernel/compat_vdso/compat_vdso.lds.S
new file mode 100644 (file)
index 0000000..c7c9355
--- /dev/null
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/vdso.lds.S"
diff --git a/arch/riscv/kernel/compat_vdso/flush_icache.S b/arch/riscv/kernel/compat_vdso/flush_icache.S
new file mode 100644 (file)
index 0000000..523dd8b
--- /dev/null
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/flush_icache.S"
diff --git a/arch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh b/arch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh
new file mode 100755 (executable)
index 0000000..8ac070c
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+LC_ALL=C
+sed -n -e 's/^[0]\+\(0[0-9a-fA-F]*\) . \(__vdso_[a-zA-Z0-9_]*\)$/\#define compat\2_offset\t0x\1/p'
diff --git a/arch/riscv/kernel/compat_vdso/getcpu.S b/arch/riscv/kernel/compat_vdso/getcpu.S
new file mode 100644 (file)
index 0000000..10f463e
--- /dev/null
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/getcpu.S"
diff --git a/arch/riscv/kernel/compat_vdso/note.S b/arch/riscv/kernel/compat_vdso/note.S
new file mode 100644 (file)
index 0000000..b103129
--- /dev/null
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/note.S"
diff --git a/arch/riscv/kernel/compat_vdso/rt_sigreturn.S b/arch/riscv/kernel/compat_vdso/rt_sigreturn.S
new file mode 100644 (file)
index 0000000..884aada
--- /dev/null
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include "../vdso/rt_sigreturn.S"
index ccb6177..fba9e9f 100644 (file)
@@ -88,6 +88,7 @@ int riscv_of_parent_hartid(struct device_node *node)
  */
 static struct riscv_isa_ext_data isa_ext_arr[] = {
        __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
+       __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
        __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
 };
 
@@ -138,6 +139,7 @@ static void print_mmu(struct seq_file *f)
 {
        char sv_type[16];
 
+#ifdef CONFIG_MMU
 #if defined(CONFIG_32BIT)
        strncpy(sv_type, "sv32", 5);
 #elif defined(CONFIG_64BIT)
@@ -148,6 +150,9 @@ static void print_mmu(struct seq_file *f)
        else
                strncpy(sv_type, "sv39", 5);
 #endif
+#else
+       strncpy(sv_type, "none", 5);
+#endif /* CONFIG_MMU */
        seq_printf(f, "mmu\t\t: %s\n", sv_type);
 }
 
index 1b2d42d..a6f62a6 100644 (file)
@@ -8,9 +8,15 @@
 
 #include <linux/bitmap.h>
 #include <linux/ctype.h>
+#include <linux/libfdt.h>
+#include <linux/module.h>
 #include <linux/of.h>
-#include <asm/processor.h>
+#include <asm/alternative.h>
+#include <asm/errata_list.h>
 #include <asm/hwcap.h>
+#include <asm/patch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
 #include <asm/smp.h>
 #include <asm/switch_to.h>
 
@@ -192,6 +198,7 @@ void __init riscv_fill_hwcap(void)
                                set_bit(*ext - 'a', this_isa);
                        } else {
                                SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
+                               SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
                        }
 #undef SET_ISA_EXT_MAP
                }
@@ -206,11 +213,10 @@ void __init riscv_fill_hwcap(void)
                else
                        elf_hwcap = this_hwcap;
 
-               if (bitmap_weight(riscv_isa, RISCV_ISA_EXT_MAX))
-                       bitmap_and(riscv_isa, riscv_isa, this_isa, RISCV_ISA_EXT_MAX);
-               else
+               if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
                        bitmap_copy(riscv_isa, this_isa, RISCV_ISA_EXT_MAX);
-
+               else
+                       bitmap_and(riscv_isa, riscv_isa, this_isa, RISCV_ISA_EXT_MAX);
        }
 
        /* We don't support systems with F but without D, so mask those out
@@ -237,3 +243,74 @@ void __init riscv_fill_hwcap(void)
                static_branch_enable(&cpu_hwcap_fpu);
 #endif
 }
+
+#ifdef CONFIG_RISCV_ALTERNATIVE
+struct cpufeature_info {
+       char name[ERRATA_STRING_LENGTH_MAX];
+       bool (*check_func)(unsigned int stage);
+};
+
+static bool __init_or_module cpufeature_svpbmt_check_func(unsigned int stage)
+{
+#ifdef CONFIG_RISCV_ISA_SVPBMT
+       switch (stage) {
+       case RISCV_ALTERNATIVES_EARLY_BOOT:
+               return false;
+       default:
+               return riscv_isa_extension_available(NULL, SVPBMT);
+       }
+#endif
+
+       return false;
+}
+
+static const struct cpufeature_info __initdata_or_module
+cpufeature_list[CPUFEATURE_NUMBER] = {
+       {
+               .name = "svpbmt",
+               .check_func = cpufeature_svpbmt_check_func
+       },
+};
+
+static u32 __init_or_module cpufeature_probe(unsigned int stage)
+{
+       const struct cpufeature_info *info;
+       u32 cpu_req_feature = 0;
+       int idx;
+
+       for (idx = 0; idx < CPUFEATURE_NUMBER; idx++) {
+               info = &cpufeature_list[idx];
+
+               if (info->check_func(stage))
+                       cpu_req_feature |= (1U << idx);
+       }
+
+       return cpu_req_feature;
+}
+
+void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
+                                                 struct alt_entry *end,
+                                                 unsigned int stage)
+{
+       u32 cpu_req_feature = cpufeature_probe(stage);
+       u32 cpu_apply_feature = 0;
+       struct alt_entry *alt;
+       u32 tmp;
+
+       for (alt = begin; alt < end; alt++) {
+               if (alt->vendor_id != 0)
+                       continue;
+               if (alt->errata_id >= CPUFEATURE_NUMBER) {
+                       WARN(1, "This feature id:%d is not in kernel cpufeature list",
+                               alt->errata_id);
+                       continue;
+               }
+
+               tmp = (1U << alt->errata_id);
+               if (cpu_req_feature & tmp) {
+                       patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
+                       cpu_apply_feature |= tmp;
+               }
+       }
+}
+#endif
index 0241592..1aa5403 100644 (file)
@@ -65,7 +65,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
 
        if (md->attribute & EFI_MEMORY_RO) {
                val = pte_val(pte) & ~_PAGE_WRITE;
-               val = pte_val(pte) | _PAGE_READ;
+               val |= _PAGE_READ;
                pte = __pte(val);
        }
        if (md->attribute & EFI_MEMORY_XP) {
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
new file mode 100644 (file)
index 0000000..9cb8509
--- /dev/null
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Load ELF vmlinux file for the kexec_file_load syscall.
+ *
+ * Copyright (C) 2021 Huawei Technologies Co, Ltd.
+ *
+ * Author: Liao Chang (liaochang1@huawei.com)
+ *
+ * Based on kexec-tools' kexec-elf-riscv.c, heavily modified
+ * for kernel.
+ */
+
+#define pr_fmt(fmt)    "kexec_image: " fmt
+
+#include <linux/elf.h>
+#include <linux/kexec.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/libfdt.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <asm/setup.h>
+
+static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
+                               struct kexec_elf_info *elf_info, unsigned long old_pbase,
+                               unsigned long new_pbase)
+{
+       int i;
+       int ret = 0;
+       size_t size;
+       struct kexec_buf kbuf;
+       const struct elf_phdr *phdr;
+
+       kbuf.image = image;
+
+       for (i = 0; i < ehdr->e_phnum; i++) {
+               phdr = &elf_info->proghdrs[i];
+               if (phdr->p_type != PT_LOAD)
+                       continue;
+
+               size = phdr->p_filesz;
+               if (size > phdr->p_memsz)
+                       size = phdr->p_memsz;
+
+               kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;
+               kbuf.bufsz = size;
+               kbuf.buf_align = phdr->p_align;
+               kbuf.mem = phdr->p_paddr - old_pbase + new_pbase;
+               kbuf.memsz = phdr->p_memsz;
+               kbuf.top_down = false;
+               ret = kexec_add_buffer(&kbuf);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/*
+ * Go through the available phsyical memory regions and find one that hold
+ * an image of the specified size.
+ */
+static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
+                         struct elfhdr *ehdr, struct kexec_elf_info *elf_info,
+                         unsigned long *old_pbase, unsigned long *new_pbase)
+{
+       int i;
+       int ret;
+       struct kexec_buf kbuf;
+       const struct elf_phdr *phdr;
+       unsigned long lowest_paddr = ULONG_MAX;
+       unsigned long lowest_vaddr = ULONG_MAX;
+
+       for (i = 0; i < ehdr->e_phnum; i++) {
+               phdr = &elf_info->proghdrs[i];
+               if (phdr->p_type != PT_LOAD)
+                       continue;
+
+               if (lowest_paddr > phdr->p_paddr)
+                       lowest_paddr = phdr->p_paddr;
+
+               if (lowest_vaddr > phdr->p_vaddr)
+                       lowest_vaddr = phdr->p_vaddr;
+       }
+
+       kbuf.image = image;
+       kbuf.buf_min = lowest_paddr;
+       kbuf.buf_max = ULONG_MAX;
+       kbuf.buf_align = PAGE_SIZE;
+       kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+       kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
+       kbuf.top_down = false;
+       ret = arch_kexec_locate_mem_hole(&kbuf);
+       if (!ret) {
+               *old_pbase = lowest_paddr;
+               *new_pbase = kbuf.mem;
+               image->start = ehdr->e_entry - lowest_vaddr + kbuf.mem;
+       }
+       return ret;
+}
+
+static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
+{
+       unsigned int *nr_ranges = arg;
+
+       (*nr_ranges)++;
+       return 0;
+}
+
+static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
+{
+       struct crash_mem *cmem = arg;
+
+       cmem->ranges[cmem->nr_ranges].start = res->start;
+       cmem->ranges[cmem->nr_ranges].end = res->end;
+       cmem->nr_ranges++;
+
+       return 0;
+}
+
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+       struct crash_mem *cmem;
+       unsigned int nr_ranges;
+       int ret;
+
+       nr_ranges = 1; /* For exclusion of crashkernel region */
+       walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
+
+       cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
+       if (!cmem)
+               return -ENOMEM;
+
+       cmem->max_nr_ranges = nr_ranges;
+       cmem->nr_ranges = 0;
+       ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
+       if (ret)
+               goto out;
+
+       /* Exclude crashkernel region */
+       ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+       if (!ret)
+               ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+out:
+       kfree(cmem);
+       return ret;
+}
+
+static char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
+                                unsigned long cmdline_len)
+{
+       int elfcorehdr_strlen;
+       char *cmdline_ptr;
+
+       cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
+       if (!cmdline_ptr)
+               return NULL;
+
+       elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ",
+               image->elf_load_addr);
+
+       if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) {
+               pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n");
+               kfree(cmdline_ptr);
+               return NULL;
+       }
+
+       memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len);
+       /* Ensure it's nul terminated */
+       cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0';
+       return cmdline_ptr;
+}
+
+static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
+                           unsigned long kernel_len, char *initrd,
+                           unsigned long initrd_len, char *cmdline,
+                           unsigned long cmdline_len)
+{
+       int ret;
+       unsigned long old_kernel_pbase = ULONG_MAX;
+       unsigned long new_kernel_pbase = 0UL;
+       unsigned long initrd_pbase = 0UL;
+       unsigned long headers_sz;
+       unsigned long kernel_start;
+       void *fdt, *headers;
+       struct elfhdr ehdr;
+       struct kexec_buf kbuf;
+       struct kexec_elf_info elf_info;
+       char *modified_cmdline = NULL;
+
+       ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info);
+       if (ret)
+               return ERR_PTR(ret);
+
+       ret = elf_find_pbase(image, kernel_len, &ehdr, &elf_info,
+                            &old_kernel_pbase, &new_kernel_pbase);
+       if (ret)
+               goto out;
+       kernel_start = image->start;
+       pr_notice("The entry point of kernel at 0x%lx\n", image->start);
+
+       /* Add the kernel binary to the image */
+       ret = riscv_kexec_elf_load(image, &ehdr, &elf_info,
+                                  old_kernel_pbase, new_kernel_pbase);
+       if (ret)
+               goto out;
+
+       kbuf.image = image;
+       kbuf.buf_min = new_kernel_pbase + kernel_len;
+       kbuf.buf_max = ULONG_MAX;
+
+       /* Add elfcorehdr */
+       if (image->type == KEXEC_TYPE_CRASH) {
+               ret = prepare_elf_headers(&headers, &headers_sz);
+               if (ret) {
+                       pr_err("Preparing elf core header failed\n");
+                       goto out;
+               }
+
+               kbuf.buffer = headers;
+               kbuf.bufsz = headers_sz;
+               kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+               kbuf.memsz = headers_sz;
+               kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
+               kbuf.top_down = true;
+
+               ret = kexec_add_buffer(&kbuf);
+               if (ret) {
+                       vfree(headers);
+                       goto out;
+               }
+               image->elf_headers = headers;
+               image->elf_load_addr = kbuf.mem;
+               image->elf_headers_sz = headers_sz;
+
+               pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+                        image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+
+               /* Setup cmdline for kdump kernel case */
+               modified_cmdline = setup_kdump_cmdline(image, cmdline,
+                                                      cmdline_len);
+               if (!modified_cmdline) {
+                       pr_err("Setting up cmdline for kdump kernel failed\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+               cmdline = modified_cmdline;
+       }
+
+#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
+       /* Add purgatory to the image */
+       kbuf.top_down = true;
+       kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+       ret = kexec_load_purgatory(image, &kbuf);
+       if (ret) {
+               pr_err("Error loading purgatory ret=%d\n", ret);
+               goto out;
+       }
+       ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry",
+                                            &kernel_start,
+                                            sizeof(kernel_start), 0);
+       if (ret)
+               pr_err("Error update purgatory ret=%d\n", ret);
+#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
+
+       /* Add the initrd to the image */
+       if (initrd != NULL) {
+               kbuf.buffer = initrd;
+               kbuf.bufsz = kbuf.memsz = initrd_len;
+               kbuf.buf_align = PAGE_SIZE;
+               kbuf.top_down = false;
+               kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+               ret = kexec_add_buffer(&kbuf);
+               if (ret)
+                       goto out;
+               initrd_pbase = kbuf.mem;
+               pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase);
+       }
+
+       /* Add the DTB to the image */
+       fdt = of_kexec_alloc_and_setup_fdt(image, initrd_pbase,
+                                          initrd_len, cmdline, 0);
+       if (!fdt) {
+               pr_err("Error setting up the new device tree.\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       fdt_pack(fdt);
+       kbuf.buffer = fdt;
+       kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
+       kbuf.buf_align = PAGE_SIZE;
+       kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+       kbuf.top_down = true;
+       ret = kexec_add_buffer(&kbuf);
+       if (ret) {
+               pr_err("Error add DTB kbuf ret=%d\n", ret);
+               goto out_free_fdt;
+       }
+       pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem);
+       goto out;
+
+out_free_fdt:
+       kvfree(fdt);
+out:
+       kfree(modified_cmdline);
+       kexec_free_elf_info(&elf_info);
+       return ret ? ERR_PTR(ret) : NULL;
+}
+
+#define RV_X(x, s, n)  (((x) >> (s)) & ((1 << (n)) - 1))
+#define RISCV_IMM_BITS 12
+#define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS)
+#define RISCV_CONST_HIGH_PART(x) \
+       (((x) + (RISCV_IMM_REACH >> 1)) & ~(RISCV_IMM_REACH - 1))
+#define RISCV_CONST_LOW_PART(x) ((x) - RISCV_CONST_HIGH_PART(x))
+
+#define ENCODE_ITYPE_IMM(x) \
+       (RV_X(x, 0, 12) << 20)
+#define ENCODE_BTYPE_IMM(x) \
+       ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | \
+       (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
+#define ENCODE_UTYPE_IMM(x) \
+       (RV_X(x, 12, 20) << 12)
+#define ENCODE_JTYPE_IMM(x) \
+       ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | \
+       (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
+#define ENCODE_CBTYPE_IMM(x) \
+       ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | \
+       (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12))
+#define ENCODE_CJTYPE_IMM(x) \
+       ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | \
+       (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | \
+       (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12))
+#define ENCODE_UJTYPE_IMM(x) \
+       (ENCODE_UTYPE_IMM(RISCV_CONST_HIGH_PART(x)) | \
+       (ENCODE_ITYPE_IMM(RISCV_CONST_LOW_PART(x)) << 32))
+#define ENCODE_UITYPE_IMM(x) \
+       (ENCODE_UTYPE_IMM(x) | (ENCODE_ITYPE_IMM(x) << 32))
+
+#define CLEAN_IMM(type, x) \
+       ((~ENCODE_##type##_IMM((uint64_t)(-1))) & (x))
+
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+                                    Elf_Shdr *section,
+                                    const Elf_Shdr *relsec,
+                                    const Elf_Shdr *symtab)
+{
+       const char *strtab, *name, *shstrtab;
+       const Elf_Shdr *sechdrs;
+       Elf_Rela *relas;
+       int i, r_type;
+
+       /* String & section header string table */
+       sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+       strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
+       shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
+
+       relas = (void *)pi->ehdr + relsec->sh_offset;
+
+       for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
+               const Elf_Sym *sym;     /* symbol to relocate */
+               unsigned long addr;     /* final location after relocation */
+               unsigned long val;      /* relocated symbol value */
+               unsigned long sec_base; /* relocated symbol value */
+               void *loc;              /* tmp location to modify */
+
+               sym = (void *)pi->ehdr + symtab->sh_offset;
+               sym += ELF64_R_SYM(relas[i].r_info);
+
+               if (sym->st_name)
+                       name = strtab + sym->st_name;
+               else
+                       name = shstrtab + sechdrs[sym->st_shndx].sh_name;
+
+               loc = pi->purgatory_buf;
+               loc += section->sh_offset;
+               loc += relas[i].r_offset;
+
+               if (sym->st_shndx == SHN_ABS)
+                       sec_base = 0;
+               else if (sym->st_shndx >= pi->ehdr->e_shnum) {
+                       pr_err("Invalid section %d for symbol %s\n",
+                              sym->st_shndx, name);
+                       return -ENOEXEC;
+               } else
+                       sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
+
+               val = sym->st_value;
+               val += sec_base;
+               val += relas[i].r_addend;
+
+               addr = section->sh_addr + relas[i].r_offset;
+
+               r_type = ELF64_R_TYPE(relas[i].r_info);
+
+               switch (r_type) {
+               case R_RISCV_BRANCH:
+                       *(u32 *)loc = CLEAN_IMM(BTYPE, *(u32 *)loc) |
+                                ENCODE_BTYPE_IMM(val - addr);
+                       break;
+               case R_RISCV_JAL:
+                       *(u32 *)loc = CLEAN_IMM(JTYPE, *(u32 *)loc) |
+                                ENCODE_JTYPE_IMM(val - addr);
+                       break;
+               /*
+                * With no R_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_I
+                * sym is expected to be next to R_RISCV_PCREL_HI20
+                * in purgatory relsec. Handle it like R_RISCV_CALL
+                * sym, instead of searching the whole relsec.
+                */
+               case R_RISCV_PCREL_HI20:
+               case R_RISCV_CALL:
+                       *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
+                                ENCODE_UJTYPE_IMM(val - addr);
+                       break;
+               case R_RISCV_RVC_BRANCH:
+                       *(u32 *)loc = CLEAN_IMM(CBTYPE, *(u32 *)loc) |
+                                ENCODE_CBTYPE_IMM(val - addr);
+                       break;
+               case R_RISCV_RVC_JUMP:
+                       *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
+                                ENCODE_CJTYPE_IMM(val - addr);
+                       break;
+               case R_RISCV_ADD32:
+                       *(u32 *)loc += val;
+                       break;
+               case R_RISCV_SUB32:
+                       *(u32 *)loc -= val;
+                       break;
+               /* It has been applied by R_RISCV_PCREL_HI20 sym */
+               case R_RISCV_PCREL_LO12_I:
+               case R_RISCV_ALIGN:
+               case R_RISCV_RELAX:
+                       break;
+               default:
+                       pr_err("Unknown rela relocation: %d\n", r_type);
+                       return -ENOEXEC;
+               }
+       }
+       return 0;
+}
+
+const struct kexec_file_ops elf_kexec_ops = {
+       .probe = kexec_elf_probe,
+       .load  = elf_kexec_load,
+};
index c8b9ce2..2e5b88c 100644 (file)
@@ -207,13 +207,27 @@ check_syscall_nr:
         * Syscall number held in a7.
         * If syscall number is above allowed value, redirect to ni_syscall.
         */
-       bgeu a7, t0, 1f
+       bgeu a7, t0, 3f
+#ifdef CONFIG_COMPAT
+       REG_L s0, PT_STATUS(sp)
+       srli s0, s0, SR_UXL_SHIFT
+       andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
+       li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
+       sub t0, s0, t0
+       bnez t0, 1f
+
+       /* Call compat_syscall */
+       la s0, compat_sys_call_table
+       j 2f
+1:
+#endif
        /* Call syscall */
        la s0, sys_call_table
+2:
        slli t0, a7, RISCV_LGPTR
        add s0, s0, t0
        REG_L s0, 0(s0)
-1:
+3:
        jalr s0
 
 ret_from_syscall:
index 893b8bb..b865046 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/cpu_ops_sbi.h>
 #include <asm/hwcap.h>
 #include <asm/image.h>
+#include <asm/xip_fixup.h>
 #include "efi-header.S"
 
 __HEAD
@@ -297,6 +298,7 @@ clear_bss_done:
        REG_S a0, (a2)
 
        /* Initialize page tables and relocate to virtual addresses */
+       la tp, init_task
        la sp, init_thread_union + THREAD_SIZE
        XIP_FIXUP_OFFSET sp
 #ifdef CONFIG_BUILTIN_DTB
index cbef0fc..df8e245 100644 (file)
@@ -65,7 +65,9 @@ machine_kexec_prepare(struct kimage *image)
                if (image->segment[i].memsz <= sizeof(fdt))
                        continue;
 
-               if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
+               if (image->file_mode)
+                       memcpy(&fdt, image->segment[i].buf, sizeof(fdt));
+               else if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
                        continue;
 
                if (fdt_check_header(&fdt))
diff --git a/arch/riscv/kernel/machine_kexec_file.c b/arch/riscv/kernel/machine_kexec_file.c
new file mode 100644 (file)
index 0000000..b0bf8c1
--- /dev/null
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kexec_file for riscv, use vmlinux as the dump-capture kernel image.
+ *
+ * Copyright (C) 2021 Huawei Technologies Co, Ltd.
+ *
+ * Author: Liao Chang (liaochang1@huawei.com)
+ */
+#include <linux/kexec.h>
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+       &elf_kexec_ops,
+       NULL
+};
index c29cef9..91fe16b 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/vmalloc.h>
 #include <linux/sizes.h>
 #include <linux/pgtable.h>
+#include <asm/alternative.h>
 #include <asm/sections.h>
 
 /*
@@ -427,3 +428,31 @@ void *module_alloc(unsigned long size)
                                    __builtin_return_address(0));
 }
 #endif
+
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+                                   const Elf_Shdr *sechdrs,
+                                   const char *name)
+{
+       const Elf_Shdr *s, *se;
+       const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+       for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+               if (strcmp(name, secstrs + s->sh_name) == 0)
+                       return s;
+       }
+
+       return NULL;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+                   const Elf_Shdr *sechdrs,
+                   struct module *me)
+{
+       const Elf_Shdr *s;
+
+       s = find_section(hdr, sechdrs, ".alternative");
+       if (s)
+               apply_module_alternatives((void *)s->sh_addr, s->sh_size);
+
+       return 0;
+}
index 504b496..ceb9eba 100644 (file)
@@ -84,6 +84,34 @@ void show_regs(struct pt_regs *regs)
                dump_backtrace(regs, NULL, KERN_DEFAULT);
 }
 
+#ifdef CONFIG_COMPAT
+static bool compat_mode_supported __read_mostly;
+
+bool compat_elf_check_arch(Elf32_Ehdr *hdr)
+{
+       return compat_mode_supported &&
+              hdr->e_machine == EM_RISCV &&
+              hdr->e_ident[EI_CLASS] == ELFCLASS32;
+}
+
+static int __init compat_mode_detect(void)
+{
+       unsigned long tmp = csr_read(CSR_STATUS);
+
+       csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
+       compat_mode_supported =
+                       (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
+
+       csr_write(CSR_STATUS, tmp);
+
+       pr_info("riscv: ELF compat mode %s",
+                       compat_mode_supported ? "supported" : "failed");
+
+       return 0;
+}
+early_initcall(compat_mode_detect);
+#endif
+
 void start_thread(struct pt_regs *regs, unsigned long pc,
        unsigned long sp)
 {
@@ -98,6 +126,15 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
        }
        regs->epc = pc;
        regs->sp = sp;
+
+#ifdef CONFIG_64BIT
+       regs->status &= ~SR_UXL;
+
+       if (is_compat_task())
+               regs->status |= SR_UXL_32;
+       else
+               regs->status |= SR_UXL_64;
+#endif
 }
 
 void flush_thread(void)
@@ -120,13 +157,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        return 0;
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *childregs = task_pt_regs(p);
 
        /* p->thread holds context to be restored by __switch_to() */
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* Kernel thread */
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->gp = gp_in_global;
@@ -134,8 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
                childregs->status = SR_PP | SR_PIE;
 
                p->thread.ra = (unsigned long)ret_from_kernel_thread;
-               p->thread.s[0] = usp; /* fn */
-               p->thread.s[1] = arg;
+               p->thread.s[0] = (unsigned long)args->fn;
+               p->thread.s[1] = (unsigned long)args->fn_arg;
        } else {
                *childregs = *(current_pt_regs());
                if (usp) /* User fork */
index 793c7da..2ae8280 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/thread_info.h>
 #include <asm/switch_to.h>
 #include <linux/audit.h>
+#include <linux/compat.h>
 #include <linux/ptrace.h>
 #include <linux/elf.h>
 #include <linux/regset.h>
@@ -110,11 +111,6 @@ static const struct user_regset_view riscv_user_native_view = {
        .n = ARRAY_SIZE(riscv_user_regset),
 };
 
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
-       return &riscv_user_native_view;
-}
-
 struct pt_regs_offset {
        const char *name;
        int offset;
@@ -272,3 +268,84 @@ __visible void do_syscall_trace_exit(struct pt_regs *regs)
                trace_sys_exit(regs, regs_return_value(regs));
 #endif
 }
+
+#ifdef CONFIG_COMPAT
+static int compat_riscv_gpr_get(struct task_struct *target,
+                               const struct user_regset *regset,
+                               struct membuf to)
+{
+       struct compat_user_regs_struct cregs;
+
+       regs_to_cregs(&cregs, task_pt_regs(target));
+
+       return membuf_write(&to, &cregs,
+                           sizeof(struct compat_user_regs_struct));
+}
+
+static int compat_riscv_gpr_set(struct task_struct *target,
+                               const struct user_regset *regset,
+                               unsigned int pos, unsigned int count,
+                               const void *kbuf, const void __user *ubuf)
+{
+       int ret;
+       struct compat_user_regs_struct cregs;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &cregs, 0, -1);
+
+       cregs_to_regs(&cregs, task_pt_regs(target));
+
+       return ret;
+}
+
+static const struct user_regset compat_riscv_user_regset[] = {
+       [REGSET_X] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = ELF_NGREG,
+               .size = sizeof(compat_elf_greg_t),
+               .align = sizeof(compat_elf_greg_t),
+               .regset_get = compat_riscv_gpr_get,
+               .set = compat_riscv_gpr_set,
+       },
+#ifdef CONFIG_FPU
+       [REGSET_F] = {
+               .core_note_type = NT_PRFPREG,
+               .n = ELF_NFPREG,
+               .size = sizeof(elf_fpreg_t),
+               .align = sizeof(elf_fpreg_t),
+               .regset_get = riscv_fpr_get,
+               .set = riscv_fpr_set,
+       },
+#endif
+};
+
+static const struct user_regset_view compat_riscv_user_native_view = {
+       .name = "riscv",
+       .e_machine = EM_RISCV,
+       .regsets = compat_riscv_user_regset,
+       .n = ARRAY_SIZE(compat_riscv_user_regset),
+};
+
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+                       compat_ulong_t caddr, compat_ulong_t cdata)
+{
+       long ret = -EIO;
+
+       switch (request) {
+       default:
+               ret = compat_ptrace_request(child, request, caddr, cdata);
+               break;
+       }
+
+       return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_COMPAT
+       if (test_tsk_thread_flag(task, TIF_32BIT))
+               return &compat_riscv_user_native_view;
+       else
+#endif
+               return &riscv_user_native_view;
+}
index 9c842c4..9122885 100644 (file)
@@ -23,16 +23,12 @@ void machine_restart(char *cmd)
 
 void machine_halt(void)
 {
-       if (pm_power_off != NULL)
-               pm_power_off();
-       else
-               default_power_off();
+       do_kernel_power_off();
+       default_power_off();
 }
 
 void machine_power_off(void)
 {
-       if (pm_power_off != NULL)
-               pm_power_off();
-       else
-               default_power_off();
+       do_kernel_power_off();
+       default_power_off();
 }
index 834eb65..f0f36a4 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/efi.h>
 #include <linux/crash_dump.h>
 
+#include <asm/alternative.h>
 #include <asm/cpu_ops.h>
 #include <asm/early_ioremap.h>
 #include <asm/pgtable.h>
@@ -189,7 +190,7 @@ static void __init init_resources(void)
                res = &mem_res[res_idx--];
 
                res->name = "Reserved";
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
                res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
                res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
 
@@ -214,7 +215,7 @@ static void __init init_resources(void)
 
                if (unlikely(memblock_is_nomap(region))) {
                        res->name = "Reserved";
-                       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+                       res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
                } else {
                        res->name = "System RAM";
                        res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
@@ -295,6 +296,7 @@ void __init setup_arch(char **cmdline_p)
 #endif
 
        riscv_fill_hwcap();
+       apply_boot_alternatives();
 }
 
 static int __init topology_init(void)
index 9f4e59f..38b05ca 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2012 Regents of the University of California
  */
 
+#include <linux/compat.h>
 #include <linux/signal.h>
 #include <linux/uaccess.h>
 #include <linux/syscalls.h>
@@ -14,6 +15,7 @@
 
 #include <asm/ucontext.h>
 #include <asm/vdso.h>
+#include <asm/signal32.h>
 #include <asm/switch_to.h>
 #include <asm/csr.h>
 
@@ -261,7 +263,10 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
        rseq_signal_deliver(ksig, regs);
 
        /* Set up the stack frame */
-       ret = setup_rt_frame(ksig, oldset, regs);
+       if (is_compat_task())
+               ret = compat_setup_rt_frame(ksig, oldset, regs);
+       else
+               ret = setup_rt_frame(ksig, oldset, regs);
 
        signal_setup_done(ret, ksig, 0);
 }
index 622f226..f1e4948 100644 (file)
@@ -32,7 +32,6 @@
 #include <asm/sections.h>
 #include <asm/sbi.h>
 #include <asm/smp.h>
-#include <asm/alternative.h>
 
 #include "head.h"
 
@@ -41,9 +40,6 @@ static DECLARE_COMPLETION(cpu_running);
 void __init smp_prepare_boot_cpu(void)
 {
        init_cpu_topology();
-#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
-       apply_boot_alternatives();
-#endif
 }
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
index 4b07b80..aafcca5 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/csr.h>
+#include <asm/xip_fixup.h>
 
        .text
        .altmacro
index 12f8a7f..9c0194f 100644 (file)
@@ -33,7 +33,9 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
 {
        return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
 }
-#else
+#endif
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
        unsigned long, prot, unsigned long, flags,
        unsigned long, fd, off_t, offset)
@@ -44,7 +46,7 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
         */
        return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
 }
-#endif /* !CONFIG_64BIT */
+#endif
 
 /*
  * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
index fe92e11..b404265 100644 (file)
@@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
        }
 }
 
-#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
 #define __trap_section         __section(".xip.traps")
 #else
 #define __trap_section
index a9436a6..69b05b6 100644 (file)
@@ -23,6 +23,9 @@ struct vdso_data {
 #endif
 
 extern char vdso_start[], vdso_end[];
+#ifdef CONFIG_COMPAT
+extern char compat_vdso_start[], compat_vdso_end[];
+#endif
 
 enum vvar_pages {
        VVAR_DATA_PAGE_OFFSET,
@@ -30,6 +33,11 @@ enum vvar_pages {
        VVAR_NR_PAGES,
 };
 
+enum rv_vdso_map {
+       RV_VDSO_MAP_VVAR,
+       RV_VDSO_MAP_VDSO,
+};
+
 #define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
 
 /*
@@ -52,12 +60,6 @@ struct __vdso_info {
        struct vm_special_mapping *cm;
 };
 
-static struct __vdso_info vdso_info __ro_after_init = {
-       .name = "vdso",
-       .vdso_code_start = vdso_start,
-       .vdso_code_end = vdso_end,
-};
-
 static int vdso_mremap(const struct vm_special_mapping *sm,
                       struct vm_area_struct *new_vma)
 {
@@ -66,37 +68,33 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
        return 0;
 }
 
-static int __init __vdso_init(void)
+static void __init __vdso_init(struct __vdso_info *vdso_info)
 {
        unsigned int i;
        struct page **vdso_pagelist;
        unsigned long pfn;
 
-       if (memcmp(vdso_info.vdso_code_start, "\177ELF", 4)) {
-               pr_err("vDSO is not a valid ELF object!\n");
-               return -EINVAL;
-       }
+       if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
+               panic("vDSO is not a valid ELF object!\n");
 
-       vdso_info.vdso_pages = (
-               vdso_info.vdso_code_end -
-               vdso_info.vdso_code_start) >>
+       vdso_info->vdso_pages = (
+               vdso_info->vdso_code_end -
+               vdso_info->vdso_code_start) >>
                PAGE_SHIFT;
 
-       vdso_pagelist = kcalloc(vdso_info.vdso_pages,
+       vdso_pagelist = kcalloc(vdso_info->vdso_pages,
                                sizeof(struct page *),
                                GFP_KERNEL);
        if (vdso_pagelist == NULL)
-               return -ENOMEM;
+               panic("vDSO kcalloc failed!\n");
 
        /* Grab the vDSO code pages. */
-       pfn = sym_to_pfn(vdso_info.vdso_code_start);
+       pfn = sym_to_pfn(vdso_info->vdso_code_start);
 
-       for (i = 0; i < vdso_info.vdso_pages; i++)
+       for (i = 0; i < vdso_info->vdso_pages; i++)
                vdso_pagelist[i] = pfn_to_page(pfn + i);
 
-       vdso_info.cm->pages = vdso_pagelist;
-
-       return 0;
+       vdso_info->cm->pages = vdso_pagelist;
 }
 
 #ifdef CONFIG_TIME_NS
@@ -116,13 +114,14 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
 {
        struct mm_struct *mm = task->mm;
        struct vm_area_struct *vma;
+       struct __vdso_info *vdso_info = mm->context.vdso_info;
 
        mmap_read_lock(mm);
 
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                unsigned long size = vma->vm_end - vma->vm_start;
 
-               if (vma_is_special_mapping(vma, vdso_info.dm))
+               if (vma_is_special_mapping(vma, vdso_info->dm))
                        zap_page_range(vma, vma->vm_start, size);
        }
 
@@ -187,12 +186,27 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
        return vmf_insert_pfn(vma, vmf->address, pfn);
 }
 
-enum rv_vdso_map {
-       RV_VDSO_MAP_VVAR,
-       RV_VDSO_MAP_VDSO,
+static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
+       [RV_VDSO_MAP_VVAR] = {
+               .name   = "[vvar]",
+               .fault = vvar_fault,
+       },
+       [RV_VDSO_MAP_VDSO] = {
+               .name   = "[vdso]",
+               .mremap = vdso_mremap,
+       },
 };
 
-static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
+static struct __vdso_info vdso_info __ro_after_init = {
+       .name = "vdso",
+       .vdso_code_start = vdso_start,
+       .vdso_code_end = vdso_end,
+       .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
+       .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
+};
+
+#ifdef CONFIG_COMPAT
+static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
        [RV_VDSO_MAP_VVAR] = {
                .name   = "[vvar]",
                .fault = vvar_fault,
@@ -203,25 +217,37 @@ static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
        },
 };
 
+static struct __vdso_info compat_vdso_info __ro_after_init = {
+       .name = "compat_vdso",
+       .vdso_code_start = compat_vdso_start,
+       .vdso_code_end = compat_vdso_end,
+       .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
+       .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
+};
+#endif
+
 static int __init vdso_init(void)
 {
-       vdso_info.dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR];
-       vdso_info.cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO];
+       __vdso_init(&vdso_info);
+#ifdef CONFIG_COMPAT
+       __vdso_init(&compat_vdso_info);
+#endif
 
-       return __vdso_init();
+       return 0;
 }
 arch_initcall(vdso_init);
 
 static int __setup_additional_pages(struct mm_struct *mm,
                                    struct linux_binprm *bprm,
-                                   int uses_interp)
+                                   int uses_interp,
+                                   struct __vdso_info *vdso_info)
 {
        unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
        void *ret;
 
        BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
 
-       vdso_text_len = vdso_info.vdso_pages << PAGE_SHIFT;
+       vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
        /* Be sure to map the data page */
        vdso_mapping_len = vdso_text_len + VVAR_SIZE;
 
@@ -232,16 +258,18 @@ static int __setup_additional_pages(struct mm_struct *mm,
        }
 
        ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
-               (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info.dm);
+               (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
        if (IS_ERR(ret))
                goto up_fail;
 
        vdso_base += VVAR_SIZE;
        mm->context.vdso = (void *)vdso_base;
+       mm->context.vdso_info = (void *)vdso_info;
+
        ret =
           _install_special_mapping(mm, vdso_base, vdso_text_len,
                (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
-               vdso_info.cm);
+               vdso_info->cm);
 
        if (IS_ERR(ret))
                goto up_fail;
@@ -253,6 +281,24 @@ up_fail:
        return PTR_ERR(ret);
 }
 
+#ifdef CONFIG_COMPAT
+int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+                                      int uses_interp)
+{
+       struct mm_struct *mm = current->mm;
+       int ret;
+
+       if (mmap_write_lock_killable(mm))
+               return -EINTR;
+
+       ret = __setup_additional_pages(mm, bprm, uses_interp,
+                                                       &compat_vdso_info);
+       mmap_write_unlock(mm);
+
+       return ret;
+}
+#endif
+
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
@@ -261,7 +307,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        if (mmap_write_lock_killable(mm))
                return -EINTR;
 
-       ret = __setup_additional_pages(mm, bprm, uses_interp);
+       ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
        mmap_write_unlock(mm);
 
        return ret;
index df22224..83f1c89 100644 (file)
@@ -7,12 +7,16 @@
 #include <linux/linkage.h>
 #include <asm/page.h>
 
+#ifndef __VDSO_PATH
+#define __VDSO_PATH "arch/riscv/kernel/vdso/vdso.so"
+#endif
+
        __PAGE_ALIGNED_DATA
 
        .globl vdso_start, vdso_end
        .balign PAGE_SIZE
 vdso_start:
-       .incbin "arch/riscv/kernel/vdso/vdso.so"
+       .incbin __VDSO_PATH
        .balign PAGE_SIZE
 vdso_end:
 
index 4e9efbe..40694f0 100644 (file)
@@ -102,9 +102,9 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
 {
        pgd_t *pgd, *pgd_k;
-       pud_t *pud, *pud_k;
-       p4d_t *p4d, *p4d_k;
-       pmd_t *pmd, *pmd_k;
+       pud_t *pud_k;
+       p4d_t *p4d_k;
+       pmd_t *pmd_k;
        pte_t *pte_k;
        int index;
        unsigned long pfn;
@@ -132,14 +132,12 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
        }
        set_pgd(pgd, *pgd_k);
 
-       p4d = p4d_offset(pgd, addr);
        p4d_k = p4d_offset(pgd_k, addr);
        if (!p4d_present(*p4d_k)) {
                no_context(regs, addr);
                return;
        }
 
-       pud = pud_offset(p4d, addr);
        pud_k = pud_offset(p4d_k, addr);
        if (!pud_present(*pud_k)) {
                no_context(regs, addr);
@@ -150,13 +148,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
         * Since the vmalloc area is global, it is unnecessary
         * to copy individual PTEs
         */
-       pmd = pmd_offset(pud, addr);
        pmd_k = pmd_offset(pud_k, addr);
        if (!pmd_present(*pmd_k)) {
                no_context(regs, addr);
                return;
        }
-       set_pmd(pmd, *pmd_k);
 
        /*
         * Make sure the actual PTE exists as well to
index 180d6a3..d466ec6 100644 (file)
@@ -76,38 +76,74 @@ static void __init zone_sizes_init(void)
 }
 
 #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
+
+#define LOG2_SZ_1K  ilog2(SZ_1K)
+#define LOG2_SZ_1M  ilog2(SZ_1M)
+#define LOG2_SZ_1G  ilog2(SZ_1G)
+#define LOG2_SZ_1T  ilog2(SZ_1T)
+
 static inline void print_mlk(char *name, unsigned long b, unsigned long t)
 {
        pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
-                 (((t) - (b)) >> 10));
+                 (((t) - (b)) >> LOG2_SZ_1K));
 }
 
 static inline void print_mlm(char *name, unsigned long b, unsigned long t)
 {
        pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
-                 (((t) - (b)) >> 20));
+                 (((t) - (b)) >> LOG2_SZ_1M));
+}
+
+static inline void print_mlg(char *name, unsigned long b, unsigned long t)
+{
+       pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld GB)\n", name, b, t,
+                  (((t) - (b)) >> LOG2_SZ_1G));
+}
+
+#ifdef CONFIG_64BIT
+static inline void print_mlt(char *name, unsigned long b, unsigned long t)
+{
+       pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld TB)\n", name, b, t,
+                  (((t) - (b)) >> LOG2_SZ_1T));
+}
+#else
+#define print_mlt(n, b, t) do {} while (0)
+#endif
+
+static inline void print_ml(char *name, unsigned long b, unsigned long t)
+{
+       unsigned long diff = t - b;
+
+       if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10)
+               print_mlt(name, b, t);
+       else if ((diff >> LOG2_SZ_1G) >= 10)
+               print_mlg(name, b, t);
+       else if ((diff >> LOG2_SZ_1M) >= 10)
+               print_mlm(name, b, t);
+       else
+               print_mlk(name, b, t);
 }
 
 static void __init print_vm_layout(void)
 {
        pr_notice("Virtual kernel memory layout:\n");
-       print_mlk("fixmap", (unsigned long)FIXADDR_START,
-                 (unsigned long)FIXADDR_TOP);
-       print_mlm("pci io", (unsigned long)PCI_IO_START,
-                 (unsigned long)PCI_IO_END);
-       print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
-                 (unsigned long)VMEMMAP_END);
-       print_mlm("vmalloc", (unsigned long)VMALLOC_START,
-                 (unsigned long)VMALLOC_END);
-       print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
-                 (unsigned long)high_memory);
+       print_ml("fixmap", (unsigned long)FIXADDR_START,
+               (unsigned long)FIXADDR_TOP);
+       print_ml("pci io", (unsigned long)PCI_IO_START,
+               (unsigned long)PCI_IO_END);
+       print_ml("vmemmap", (unsigned long)VMEMMAP_START,
+               (unsigned long)VMEMMAP_END);
+       print_ml("vmalloc", (unsigned long)VMALLOC_START,
+               (unsigned long)VMALLOC_END);
+       print_ml("lowmem", (unsigned long)PAGE_OFFSET,
+               (unsigned long)high_memory);
        if (IS_ENABLED(CONFIG_64BIT)) {
 #ifdef CONFIG_KASAN
-               print_mlm("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
+               print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
 #endif
 
-               print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
-                         (unsigned long)ADDRESS_SPACE_END);
+               print_ml("kernel", (unsigned long)KERNEL_LINK_ADDR,
+                        (unsigned long)ADDRESS_SPACE_END);
        }
 }
 #else
@@ -578,9 +614,9 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
        create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 #define fixmap_pgd_next                ((uintptr_t)fixmap_pte)
 #define early_dtb_pgd_next     ((uintptr_t)early_dtb_pmd)
-#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot)
-#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot)
-#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
+#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 void __init create_pgd_mapping(pgd_t *pgdp,
@@ -671,7 +707,7 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
 }
 #endif /* CONFIG_STRICT_KERNEL_RWX */
 
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
 static void __init disable_pgtable_l5(void)
 {
        pgtable_l5_enabled = false;
@@ -843,7 +879,7 @@ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
  * MMU is not enabled, the page tables are allocated directly using
  * early_pmd/pud/p4d and the address returned is the physical one.
  */
-void __init pt_ops_set_early(void)
+static void __init pt_ops_set_early(void)
 {
        pt_ops.alloc_pte = alloc_pte_early;
        pt_ops.get_pte_virt = get_pte_virt_early;
@@ -865,7 +901,7 @@ void __init pt_ops_set_early(void)
  * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
  * but it will be used as described above.
  */
-void __init pt_ops_set_fixmap(void)
+static void __init pt_ops_set_fixmap(void)
 {
        pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap);
        pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap);
@@ -883,7 +919,7 @@ void __init pt_ops_set_fixmap(void)
  * MMU is enabled and page table setup is complete, so from now, we can use
  * generic page allocation functions to setup page table.
  */
-void __init pt_ops_set_late(void)
+static void __init pt_ops_set_late(void)
 {
        pt_ops.alloc_pte = alloc_pte_late;
        pt_ops.get_pte_virt = get_pte_virt_late;
@@ -947,6 +983,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
 #endif
 
+       apply_early_boot_alternatives();
        pt_ops_set_early();
 
        /* Setup early PGD for fixmap */
diff --git a/arch/riscv/purgatory/.gitignore b/arch/riscv/purgatory/.gitignore
new file mode 100644 (file)
index 0000000..38d7d1b
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+purgatory.chk
+purgatory.ro
+kexec-purgatory.c
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
new file mode 100644 (file)
index 0000000..d4df200
--- /dev/null
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0
+OBJECT_FILES_NON_STANDARD := y
+
+purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
+
+targets += $(purgatory-y)
+PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
+
+$(obj)/string.o: $(srctree)/lib/string.c FORCE
+       $(call if_changed_rule,cc_o_c)
+
+$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
+       $(call if_changed_rule,cc_o_c)
+
+$(obj)/memcpy.o: $(srctree)/arch/riscv/lib/memcpy.S FORCE
+       $(call if_changed_rule,as_o_S)
+
+$(obj)/memset.o: $(srctree)/arch/riscv/lib/memset.S FORCE
+       $(call if_changed_rule,as_o_S)
+
+$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
+       $(call if_changed_rule,cc_o_c)
+
+CFLAGS_sha256.o := -D__DISABLE_EXPORTS
+CFLAGS_string.o := -D__DISABLE_EXPORTS
+CFLAGS_ctype.o := -D__DISABLE_EXPORTS
+
+# When linking purgatory.ro with -r unresolved symbols are not checked,
+# also link a purgatory.chk binary without -r to check for unresolved symbols.
+PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
+LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
+LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
+targets += purgatory.ro purgatory.chk
+
+# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
+GCOV_PROFILE   := n
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
+# These are adjustments to the compiler flags used for objects that
+# make up the standalone purgatory.ro
+
+PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+PURGATORY_CFLAGS := -mcmodel=medany -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+PURGATORY_CFLAGS += -fno-stack-protector -g0
+
+# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
+# in turn leaves some undefined symbols like __fentry__ in purgatory and not
+# sure how to relocate those.
+ifdef CONFIG_FUNCTION_TRACER
+PURGATORY_CFLAGS_REMOVE                += $(CC_FLAGS_FTRACE)
+endif
+
+ifdef CONFIG_STACKPROTECTOR
+PURGATORY_CFLAGS_REMOVE                += -fstack-protector
+endif
+
+ifdef CONFIG_STACKPROTECTOR_STRONG
+PURGATORY_CFLAGS_REMOVE                += -fstack-protector-strong
+endif
+
+CFLAGS_REMOVE_purgatory.o      += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_purgatory.o             += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_sha256.o         += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_sha256.o                        += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_string.o         += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_string.o                        += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_ctype.o          += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_ctype.o                 += $(PURGATORY_CFLAGS)
+
+AFLAGS_REMOVE_entry.o          += -Wa,-gdwarf-2
+AFLAGS_REMOVE_memcpy.o         += -Wa,-gdwarf-2
+AFLAGS_REMOVE_memset.o         += -Wa,-gdwarf-2
+
+$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+               $(call if_changed,ld)
+
+$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
+               $(call if_changed,ld)
+
+targets += kexec-purgatory.c
+
+quiet_cmd_bin2c = BIN2C   $@
+      cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
+
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
+       $(call if_changed,bin2c)
+
+obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
new file mode 100644 (file)
index 0000000..0194f45
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * purgatory: Runs between two kernels
+ *
+ * Copyright (C) 2022 Huawei Technologies Co, Ltd.
+ *
+ * Author: Li Zhengyu (lizhengyu3@huawei.com)
+ *
+ */
+
+.macro size, sym:req
+       .size \sym, . - \sym
+.endm
+
+.text
+
+.globl purgatory_start
+purgatory_start:
+
+       lla     sp, .Lstack
+       mv      s0, a0  /* The hartid of the current hart */
+       mv      s1, a1  /* Phys address of the FDT image */
+
+       jal     purgatory
+
+       /* Start new image. */
+       mv      a0, s0
+       mv      a1, s1
+       ld      a2, riscv_kernel_entry
+       jr      a2
+
+size purgatory_start
+
+.align 4
+       .rept   256
+       .quad   0
+       .endr
+.Lstack:
+
+.data
+
+.globl riscv_kernel_entry
+riscv_kernel_entry:
+       .quad   0
+size riscv_kernel_entry
+
+.end
diff --git a/arch/riscv/purgatory/purgatory.c b/arch/riscv/purgatory/purgatory.c
new file mode 100644 (file)
index 0000000..80596ab
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * purgatory: Runs between two kernels
+ *
+ * Copyright (C) 2022 Huawei Technologies Co, Ltd.
+ *
+ * Author: Li Zhengyu (lizhengyu3@huawei.com)
+ *
+ */
+
+#include <linux/purgatory.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/string.h>
+
+u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory");
+
+struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kexec-purgatory");
+
+static int verify_sha256_digest(void)
+{
+       struct kexec_sha_region *ptr, *end;
+       struct sha256_state ss;
+       u8 digest[SHA256_DIGEST_SIZE];
+
+       sha256_init(&ss);
+       end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
+       for (ptr = purgatory_sha_regions; ptr < end; ptr++)
+               sha256_update(&ss, (uint8_t *)(ptr->start), ptr->len);
+       sha256_final(&ss, digest);
+       if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)) != 0)
+               return 1;
+       return 0;
+}
+
+/* workaround for a warning with -Wmissing-prototypes */
+void purgatory(void);
+
+void purgatory(void)
+{
+       if (verify_sha256_digest())
+               for (;;)
+                       /* loop forever */
+                       ;
+}
index 5886e03..b1a88f6 100644 (file)
@@ -418,9 +418,6 @@ config COMPAT
          (and some other stuff like libraries and such) is needed for
          executing 31 bit applications.  It is safe to say "Y".
 
-config SYSVIPC_COMPAT
-       def_bool y if COMPAT && SYSVIPC
-
 config SMP
        def_bool y
 
@@ -735,11 +732,11 @@ config VFIO_AP
        depends on S390_AP_IOMMU && VFIO_MDEV && KVM
        depends on ZCRYPT
        help
-               This driver grants access to Adjunct Processor (AP) devices
-               via the VFIO mediated device interface.
+         This driver grants access to Adjunct Processor (AP) devices
+         via the VFIO mediated device interface.
 
-               To compile this driver as a module, choose M here: the module
-               will be called vfio_ap.
+         To compile this driver as a module, choose M here: the module
+         will be called vfio_ap.
 
 endmenu
 
index e94a2a7..c4300ea 100644 (file)
@@ -14,9 +14,9 @@ config DEBUG_ENTRY
          If unsure, say N.
 
 config CIO_INJECT
-       bool "CIO Inject interfaces"
-       depends on DEBUG_KERNEL && DEBUG_FS
-       help
-       This option provides a debugging facility to inject certain artificial events
-       and instruction responses to the CIO layer of Linux kernel. The newly created
-       debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
+       bool "CIO Inject interfaces"
+       depends on DEBUG_KERNEL && DEBUG_FS
+       help
+         This option provides a debugging facility to inject certain artificial events
+         and instruction responses to the CIO layer of Linux kernel. The newly created
+         debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
index 54c7536..1023e9d 100644 (file)
@@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
                                             unsigned int nbytes)
 {
        gw->walk_bytes_remain -= nbytes;
-       scatterwalk_unmap(&gw->walk);
+       scatterwalk_unmap(gw->walk_ptr);
        scatterwalk_advance(&gw->walk, nbytes);
        scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
        gw->walk_ptr = NULL;
@@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
                goto out;
        }
 
-       scatterwalk_unmap(&gw->walk);
+       scatterwalk_unmap(gw->walk_ptr);
        gw->walk_ptr = NULL;
 
        gw->ptr = gw->buf;
index f24d959..b74f107 100644 (file)
@@ -3,12 +3,24 @@
 #define __ASM_EXTABLE_H
 
 #include <linux/stringify.h>
+#include <linux/bits.h>
 #include <asm/asm-const.h>
 
-#define EX_TYPE_NONE   0
-#define EX_TYPE_FIXUP  1
-#define EX_TYPE_BPF    2
-#define EX_TYPE_UACCESS        3
+#define EX_TYPE_NONE           0
+#define EX_TYPE_FIXUP          1
+#define EX_TYPE_BPF            2
+#define EX_TYPE_UA_STORE       3
+#define EX_TYPE_UA_LOAD_MEM    4
+#define EX_TYPE_UA_LOAD_REG    5
+
+#define EX_DATA_REG_ERR_SHIFT  0
+#define EX_DATA_REG_ERR                GENMASK(3, 0)
+
+#define EX_DATA_REG_ADDR_SHIFT 4
+#define EX_DATA_REG_ADDR       GENMASK(7, 4)
+
+#define EX_DATA_LEN_SHIFT      8
+#define EX_DATA_LEN            GENMASK(11, 8)
 
 #define __EX_TABLE(_section, _fault, _target, _type)                   \
        stringify_in_c(.section _section,"a";)                          \
        stringify_in_c(.short   0;)                                     \
        stringify_in_c(.previous)
 
-#define __EX_TABLE_UA(_section, _fault, _target, _type, _reg)          \
-       stringify_in_c(.section _section,"a";)                          \
-       stringify_in_c(.align   4;)                                     \
-       stringify_in_c(.long    (_fault) - .;)                          \
-       stringify_in_c(.long    (_target) - .;)                         \
-       stringify_in_c(.short   (_type);)                               \
-       stringify_in_c(.macro extable_reg reg;)                         \
-       stringify_in_c(.set .Lfound, 0;)                                \
-       stringify_in_c(.set .Lregnr, 0;)                                \
-       stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \
-       stringify_in_c(.ifc "\reg", "%%\rs";)                           \
-       stringify_in_c(.set .Lfound, 1;)                                \
-       stringify_in_c(.short .Lregnr;)                                 \
-       stringify_in_c(.endif;)                                         \
-       stringify_in_c(.set .Lregnr, .Lregnr+1;)                        \
-       stringify_in_c(.endr;)                                          \
-       stringify_in_c(.ifne (.Lfound != 1);)                           \
-       stringify_in_c(.error "extable_reg: bad register argument";)    \
-       stringify_in_c(.endif;)                                         \
-       stringify_in_c(.endm;)                                          \
-       stringify_in_c(extable_reg _reg;)                               \
-       stringify_in_c(.purgem extable_reg;)                            \
+#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
+       stringify_in_c(.section _section,"a";)                                  \
+       stringify_in_c(.align   4;)                                             \
+       stringify_in_c(.long    (_fault) - .;)                                  \
+       stringify_in_c(.long    (_target) - .;)                                 \
+       stringify_in_c(.short   (_type);)                                       \
+       stringify_in_c(.macro   extable_reg regerr, regaddr;)                   \
+       stringify_in_c(.set     .Lfound, 0;)                                    \
+       stringify_in_c(.set     .Lcurr, 0;)                                     \
+       stringify_in_c(.irp     rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;)      \
+       stringify_in_c(         .ifc    "\regerr", "%%r\rs";)                   \
+       stringify_in_c(                 .set    .Lfound, 1;)                    \
+       stringify_in_c(                 .set    .Lregerr, .Lcurr;)              \
+       stringify_in_c(         .endif;)                                        \
+       stringify_in_c(         .set    .Lcurr, .Lcurr+1;)                      \
+       stringify_in_c(.endr;)                                                  \
+       stringify_in_c(.ifne    (.Lfound != 1);)                                \
+       stringify_in_c(         .error  "extable_reg: bad register argument1";) \
+       stringify_in_c(.endif;)                                                 \
+       stringify_in_c(.set     .Lfound, 0;)                                    \
+       stringify_in_c(.set     .Lcurr, 0;)                                     \
+       stringify_in_c(.irp     rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;)      \
+       stringify_in_c(         .ifc    "\regaddr", "%%r\rs";)                  \
+       stringify_in_c(                 .set    .Lfound, 1;)                    \
+       stringify_in_c(                 .set    .Lregaddr, .Lcurr;)             \
+       stringify_in_c(         .endif;)                                        \
+       stringify_in_c(         .set    .Lcurr, .Lcurr+1;)                      \
+       stringify_in_c(.endr;)                                                  \
+       stringify_in_c(.ifne    (.Lfound != 1);)                                \
+       stringify_in_c(         .error  "extable_reg: bad register argument2";) \
+       stringify_in_c(.endif;)                                                 \
+       stringify_in_c(.short   .Lregerr << EX_DATA_REG_ERR_SHIFT |             \
+                               .Lregaddr << EX_DATA_REG_ADDR_SHIFT |           \
+                               _len << EX_DATA_LEN_SHIFT;)                     \
+       stringify_in_c(.endm;)                                                  \
+       stringify_in_c(extable_reg _regerr,_regaddr;)                           \
+       stringify_in_c(.purgem  extable_reg;)                                   \
        stringify_in_c(.previous)
 
 #define EX_TABLE(_fault, _target)                                      \
        __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
+
 #define EX_TABLE_AMODE31(_fault, _target)                              \
        __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
-#define EX_TABLE_UA(_fault, _target, _reg)                             \
-       __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UACCESS, _reg)
+
+#define EX_TABLE_UA_STORE(_fault, _target, _regerr)                    \
+       __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
+
+#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len)  \
+       __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
+
+#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero)       \
+       __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
 
 #endif /* __ASM_EXTABLE_H */
index 7d6fe81..a386070 100644 (file)
 #define compat_mode_t  compat_mode_t
 typedef u16            compat_mode_t;
 
+#define __compat_uid_t __compat_uid_t
+typedef u16            __compat_uid_t;
+typedef u16            __compat_gid_t;
+
+#define compat_dev_t   compat_dev_t
+typedef u16            compat_dev_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16             compat_ipc_pid_t;
+
+#define compat_statfs  compat_statfs
+
 #include <asm-generic/compat.h>
 
 #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \
@@ -30,15 +42,9 @@ typedef u16          compat_mode_t;
                         PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
                         PSW32_ASC_PRIMARY)
 
-#define COMPAT_USER_HZ         100
 #define COMPAT_UTS_MACHINE     "s390\0\0\0\0"
 
-typedef u16            __compat_uid_t;
-typedef u16            __compat_gid_t;
-typedef u16            compat_dev_t;
 typedef u16            compat_nlink_t;
-typedef u16            compat_ipc_pid_t;
-typedef __kernel_fsid_t        compat_fsid_t;
 
 typedef struct {
        u32 mask;
@@ -79,26 +85,6 @@ struct compat_stat {
        u32             __unused5;
 };
 
-struct compat_flock {
-       short           l_type;
-       short           l_whence;
-       compat_off_t    l_start;
-       compat_off_t    l_len;
-       compat_pid_t    l_pid;
-};
-
-#define F_GETLK64       12
-#define F_SETLK64       13
-#define F_SETLKW64      14    
-
-struct compat_flock64 {
-       short           l_type;
-       short           l_whence;
-       compat_loff_t   l_start;
-       compat_loff_t   l_len;
-       compat_pid_t    l_pid;
-};
-
 struct compat_statfs {
        u32             f_type;
        u32             f_bsize;
@@ -129,10 +115,6 @@ struct compat_statfs64 {
        u32             f_spare[4];
 };
 
-#define COMPAT_RLIM_INFINITY           0xffffffff
-
-#define COMPAT_OFF_T_MAX       0x7fffffff
-
 /*
  * A pointer passed in from user mode. This should not
  * be used for syscall parameters, just declare them
@@ -155,61 +137,4 @@ static inline int is_compat_task(void)
 
 #endif
 
-struct compat_ipc64_perm {
-       compat_key_t key;
-       __compat_uid32_t uid;
-       __compat_gid32_t gid;
-       __compat_uid32_t cuid;
-       __compat_gid32_t cgid;
-       compat_mode_t mode;
-       unsigned short __pad1;
-       unsigned short seq;
-       unsigned short __pad2;
-       unsigned int __unused1;
-       unsigned int __unused2;
-};
-
-struct compat_semid64_ds {
-       struct compat_ipc64_perm sem_perm;
-       compat_ulong_t sem_otime;
-       compat_ulong_t sem_otime_high;
-       compat_ulong_t sem_ctime;
-       compat_ulong_t sem_ctime_high;
-       compat_ulong_t sem_nsems;
-       compat_ulong_t __unused1;
-       compat_ulong_t __unused2;
-};
-
-struct compat_msqid64_ds {
-       struct compat_ipc64_perm msg_perm;
-       compat_ulong_t msg_stime;
-       compat_ulong_t msg_stime_high;
-       compat_ulong_t msg_rtime;
-       compat_ulong_t msg_rtime_high;
-       compat_ulong_t msg_ctime;
-       compat_ulong_t msg_ctime_high;
-       compat_ulong_t msg_cbytes;
-       compat_ulong_t msg_qnum;
-       compat_ulong_t msg_qbytes;
-       compat_pid_t   msg_lspid;
-       compat_pid_t   msg_lrpid;
-       compat_ulong_t __unused1;
-       compat_ulong_t __unused2;
-};
-
-struct compat_shmid64_ds {
-       struct compat_ipc64_perm shm_perm;
-       compat_size_t  shm_segsz;
-       compat_ulong_t shm_atime;
-       compat_ulong_t shm_atime_high;
-       compat_ulong_t shm_dtime;
-       compat_ulong_t shm_dtime_high;
-       compat_ulong_t shm_ctime;
-       compat_ulong_t shm_ctime_high;
-       compat_pid_t   shm_cpid;
-       compat_pid_t   shm_lpid;
-       compat_ulong_t shm_nattch;
-       compat_ulong_t __unused1;
-       compat_ulong_t __unused2;
-};
 #endif /* _ASM_S390X_COMPAT_H */
index 63098df..649ecdc 100644 (file)
@@ -31,7 +31,7 @@
 #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
 
 /* Allocate control page with GFP_DMA */
-#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
+#define KEXEC_CONTROL_MEMORY_GFP (GFP_DMA | __GFP_NORETRY)
 
 /* Maximum address we can use for the crash control pages */
 #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
deleted file mode 100644 (file)
index 5209f22..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * livepatch.h - s390-specific Kernel Live Patching Core
- *
- *  Copyright (c) 2013-2015 SUSE
- *   Authors: Jiri Kosina
- *           Vojtech Pavlik
- *           Jiri Slaby
- */
-
-#ifndef ASM_LIVEPATCH_H
-#define ASM_LIVEPATCH_H
-
-#include <linux/ftrace.h>
-#include <asm/ptrace.h>
-
-static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
-{
-       ftrace_instruction_pointer_set(fregs, ip);
-}
-
-#endif
index add764a..bd66f8e 100644 (file)
@@ -304,12 +304,6 @@ static __always_inline void __noreturn disabled_wait(void)
        while (1);
 }
 
-/*
- * Basic Program Check Handler.
- */
-extern void s390_base_pgm_handler(void);
-extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
-
 #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
 
 extern int memcpy_real(void *, unsigned long, size_t);
index f850019..b23c658 100644 (file)
@@ -39,8 +39,15 @@ static inline bool on_stack(struct stack_info *info,
  * Kernel uses the packed stack layout (-mpacked-stack).
  */
 struct stack_frame {
-       unsigned long empty1[5];
-       unsigned int  empty2[8];
+       union {
+               unsigned long empty[9];
+               struct {
+                       unsigned long sie_control_block;
+                       unsigned long sie_savearea;
+                       unsigned long sie_reason;
+                       unsigned long sie_flags;
+               };
+       };
        unsigned long gprs[10];
        unsigned long back_chain;
 };
index 1f150a7..f4511e2 100644 (file)
@@ -3,7 +3,7 @@
  *  S390 version
  *    Copyright IBM Corp. 1999, 2000
  *    Author(s): Hartmut Penner (hp@de.ibm.com),
- *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *              Martin Schwidefsky (schwidefsky@de.ibm.com)
  *
  *  Derived from "include/asm-i386/uaccess.h"
  */
@@ -55,9 +55,6 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
        return n;
 }
 
-int __put_user_bad(void) __attribute__((noreturn));
-int __get_user_bad(void) __attribute__((noreturn));
-
 union oac {
        unsigned int val;
        struct {
@@ -80,8 +77,14 @@ union oac {
        };
 };
 
-#define __put_get_user_asm(to, from, size, oac_spec)                   \
+int __noreturn __put_user_bad(void);
+
+#define __put_user_asm(to, from, size)                                 \
 ({                                                                     \
+       union oac __oac_spec = {                                        \
+               .oac1.as = PSW_BITS_AS_SECONDARY,                       \
+               .oac1.a = 1,                                            \
+       };                                                              \
        int __rc;                                                       \
                                                                        \
        asm volatile(                                                   \
@@ -89,26 +92,15 @@ union oac {
                "0:     mvcos   %[_to],%[_from],%[_size]\n"             \
                "1:     xr      %[rc],%[rc]\n"                          \
                "2:\n"                                                  \
-               EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc])       \
+               EX_TABLE_UA_STORE(0b, 2b, %[rc])                        \
+               EX_TABLE_UA_STORE(1b, 2b, %[rc])                        \
                : [rc] "=&d" (__rc), [_to] "+Q" (*(to))                 \
                : [_size] "d" (size), [_from] "Q" (*(from)),            \
-                 [spec] "d" (oac_spec.val)                             \
+                 [spec] "d" (__oac_spec.val)                           \
                : "cc", "0");                                           \
        __rc;                                                           \
 })
 
-#define __put_user_asm(to, from, size)                         \
-       __put_get_user_asm(to, from, size, ((union oac) {       \
-               .oac1.as = PSW_BITS_AS_SECONDARY,               \
-               .oac1.a = 1                                     \
-       }))
-
-#define __get_user_asm(to, from, size)                         \
-       __put_get_user_asm(to, from, size, ((union oac) {       \
-               .oac2.as = PSW_BITS_AS_SECONDARY,               \
-               .oac2.a = 1                                     \
-       }))                                                     \
-
 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
 {
        int rc;
@@ -141,6 +133,31 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
        return rc;
 }
 
+int __noreturn __get_user_bad(void);
+
+#define __get_user_asm(to, from, size)                                 \
+({                                                                     \
+       union oac __oac_spec = {                                        \
+               .oac2.as = PSW_BITS_AS_SECONDARY,                       \
+               .oac2.a = 1,                                            \
+       };                                                              \
+       int __rc;                                                       \
+                                                                       \
+       asm volatile(                                                   \
+               "       lr      0,%[spec]\n"                            \
+               "0:     mvcos   0(%[_to]),%[_from],%[_size]\n"          \
+               "1:     xr      %[rc],%[rc]\n"                          \
+               "2:\n"                                                  \
+               EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize])  \
+               EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize])  \
+               : [rc] "=&d" (__rc), "=Q" (*(to))                       \
+               : [_size] "d" (size), [_from] "Q" (*(from)),            \
+                 [spec] "d" (__oac_spec.val), [_to] "a" (to),          \
+                 [_ksize] "K" (size)                                   \
+               : "cc", "0");                                           \
+       __rc;                                                           \
+})
+
 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
 {
        int rc;
@@ -177,77 +194,77 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
  */
-#define __put_user(x, ptr) \
-({                                                             \
-       __typeof__(*(ptr)) __x = (x);                           \
-       int __pu_err = -EFAULT;                                 \
-        __chk_user_ptr(ptr);                                    \
-       switch (sizeof (*(ptr))) {                              \
-       case 1:                                                 \
-       case 2:                                                 \
-       case 4:                                                 \
-       case 8:                                                 \
-               __pu_err = __put_user_fn(&__x, ptr,             \
-                                        sizeof(*(ptr)));       \
-               break;                                          \
-       default:                                                \
-               __put_user_bad();                               \
-               break;                                          \
-       }                                                       \
-       __builtin_expect(__pu_err, 0);                          \
+#define __put_user(x, ptr)                                             \
+({                                                                     \
+       __typeof__(*(ptr)) __x = (x);                                   \
+       int __pu_err = -EFAULT;                                         \
+                                                                       \
+       __chk_user_ptr(ptr);                                            \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1:                                                         \
+       case 2:                                                         \
+       case 4:                                                         \
+       case 8:                                                         \
+               __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr)));    \
+               break;                                                  \
+       default:                                                        \
+               __put_user_bad();                                       \
+               break;                                                  \
+       }                                                               \
+       __builtin_expect(__pu_err, 0);                                  \
 })
 
-#define put_user(x, ptr)                                       \
-({                                                             \
-       might_fault();                                          \
-       __put_user(x, ptr);                                     \
+#define put_user(x, ptr)                                               \
+({                                                                     \
+       might_fault();                                                  \
+       __put_user(x, ptr);                                             \
 })
 
-
-#define __get_user(x, ptr)                                     \
-({                                                             \
-       int __gu_err = -EFAULT;                                 \
-       __chk_user_ptr(ptr);                                    \
-       switch (sizeof(*(ptr))) {                               \
-       case 1: {                                               \
-               unsigned char __x = 0;                          \
-               __gu_err = __get_user_fn(&__x, ptr,             \
-                                        sizeof(*(ptr)));       \
-               (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
-               break;                                          \
-       };                                                      \
-       case 2: {                                               \
-               unsigned short __x = 0;                         \
-               __gu_err = __get_user_fn(&__x, ptr,             \
-                                        sizeof(*(ptr)));       \
-               (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
-               break;                                          \
-       };                                                      \
-       case 4: {                                               \
-               unsigned int __x = 0;                           \
-               __gu_err = __get_user_fn(&__x, ptr,             \
-                                        sizeof(*(ptr)));       \
-               (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
-               break;                                          \
-       };                                                      \
-       case 8: {                                               \
-               unsigned long long __x = 0;                     \
-               __gu_err = __get_user_fn(&__x, ptr,             \
-                                        sizeof(*(ptr)));       \
-               (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
-               break;                                          \
-       };                                                      \
-       default:                                                \
-               __get_user_bad();                               \
-               break;                                          \
-       }                                                       \
-       __builtin_expect(__gu_err, 0);                          \
+#define __get_user(x, ptr)                                             \
+({                                                                     \
+       int __gu_err = -EFAULT;                                         \
+                                                                       \
+       __chk_user_ptr(ptr);                                            \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1: {                                                       \
+               unsigned char __x;                                      \
+                                                                       \
+               __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));    \
+               (x) = *(__force __typeof__(*(ptr)) *)&__x;              \
+               break;                                                  \
+       };                                                              \
+       case 2: {                                                       \
+               unsigned short __x;                                     \
+                                                                       \
+               __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));    \
+               (x) = *(__force __typeof__(*(ptr)) *)&__x;              \
+               break;                                                  \
+       };                                                              \
+       case 4: {                                                       \
+               unsigned int __x;                                       \
+                                                                       \
+               __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));    \
+               (x) = *(__force __typeof__(*(ptr)) *)&__x;              \
+               break;                                                  \
+       };                                                              \
+       case 8: {                                                       \
+               unsigned long __x;                                      \
+                                                                       \
+               __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));    \
+               (x) = *(__force __typeof__(*(ptr)) *)&__x;              \
+               break;                                                  \
+       };                                                              \
+       default:                                                        \
+               __get_user_bad();                                       \
+               break;                                                  \
+       }                                                               \
+       __builtin_expect(__gu_err, 0);                                  \
 })
 
-#define get_user(x, ptr)                                       \
-({                                                             \
-       might_fault();                                          \
-       __get_user(x, ptr);                                     \
+#define get_user(x, ptr)                                               \
+({                                                                     \
+       might_fault();                                                  \
+       __get_user(x, ptr);                                             \
 })
 
 /*
@@ -278,19 +295,20 @@ int __noreturn __put_kernel_bad(void);
        int __rc;                                                       \
                                                                        \
        asm volatile(                                                   \
-               "0:   " insn "  %2,%1\n"                                \
-               "1:     xr      %0,%0\n"                                \
+               "0:   " insn "  %[_val],%[_to]\n"                       \
+               "1:     xr      %[rc],%[rc]\n"                          \
                "2:\n"                                                  \
-               EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0)             \
-               : "=d" (__rc), "+Q" (*(to))                             \
-               : "d" (val)                                             \
+               EX_TABLE_UA_STORE(0b, 2b, %[rc])                        \
+               EX_TABLE_UA_STORE(1b, 2b, %[rc])                        \
+               : [rc] "=d" (__rc), [_to] "+Q" (*(to))                  \
+               : [_val] "d" (val)                                      \
                : "cc");                                                \
        __rc;                                                           \
 })
 
 #define __put_kernel_nofault(dst, src, type, err_label)                        \
 do {                                                                   \
-       u64 __x = (u64)(*((type *)(src)));                              \
+       unsigned long __x = (unsigned long)(*((type *)(src)));          \
        int __pk_err;                                                   \
                                                                        \
        switch (sizeof(type)) {                                         \
@@ -321,12 +339,13 @@ int __noreturn __get_kernel_bad(void);
        int __rc;                                                       \
                                                                        \
        asm volatile(                                                   \
-               "0:   " insn "  %1,%2\n"                                \
-               "1:     xr      %0,%0\n"                                \
+               "0:   " insn "  %[_val],%[_from]\n"                     \
+               "1:     xr      %[rc],%[rc]\n"                          \
                "2:\n"                                                  \
-               EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0)             \
-               : "=d" (__rc), "+d" (val)                               \
-               : "Q" (*(from))                                         \
+               EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val])            \
+               EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val])            \
+               : [rc] "=d" (__rc), [_val] "=d" (val)                   \
+               : [_from] "Q" (*(from))                                 \
                : "cc");                                                \
        __rc;                                                           \
 })
@@ -337,28 +356,28 @@ do {                                                                      \
                                                                        \
        switch (sizeof(type)) {                                         \
        case 1: {                                                       \
-               u8 __x = 0;                                             \
+               unsigned char __x;                                      \
                                                                        \
                __gk_err = __get_kernel_asm(__x, (type *)(src), "ic");  \
                *((type *)(dst)) = (type)__x;                           \
                break;                                                  \
        };                                                              \
        case 2: {                                                       \
-               u16 __x = 0;                                            \
+               unsigned short __x;                                     \
                                                                        \
                __gk_err = __get_kernel_asm(__x, (type *)(src), "lh");  \
                *((type *)(dst)) = (type)__x;                           \
                break;                                                  \
        };                                                              \
        case 4: {                                                       \
-               u32 __x = 0;                                            \
+               unsigned int __x;                                       \
                                                                        \
                __gk_err = __get_kernel_asm(__x, (type *)(src), "l");   \
                *((type *)(dst)) = (type)__x;                           \
                break;                                                  \
        };                                                              \
        case 8: {                                                       \
-               u64 __x = 0;                                            \
+               unsigned long __x;                                      \
                                                                        \
                __gk_err = __get_kernel_asm(__x, (type *)(src), "lg");  \
                *((type *)(dst)) = (type)__x;                           \
index 9e9f75e..4260bc5 100644 (file)
@@ -28,6 +28,7 @@
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
 # ifdef CONFIG_COMPAT
+#   define __ARCH_WANT_COMPAT_STAT
 #   define __ARCH_WANT_SYS_TIME32
 #   define __ARCH_WANT_SYS_UTIME32
 # endif
index 5851041..27d6b3c 100644 (file)
@@ -33,7 +33,7 @@ CFLAGS_stacktrace.o   += -fno-optimize-sibling-calls
 CFLAGS_dumpstack.o     += -fno-optimize-sibling-calls
 CFLAGS_unwind_bc.o     += -fno-optimize-sibling-calls
 
-obj-y  := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
+obj-y  := traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
 obj-y  += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
 obj-y  += debug.o irq.o ipl.o dis.o diag.o vdso.o
 obj-y  += sysinfo.o lgr.o os_info.o machine_kexec.o
index 7c74f0e..d8ce965 100644 (file)
@@ -32,6 +32,22 @@ int main(void)
        /* pt_regs offsets */
        OFFSET(__PT_PSW, pt_regs, psw);
        OFFSET(__PT_GPRS, pt_regs, gprs);
+       OFFSET(__PT_R0, pt_regs, gprs[0]);
+       OFFSET(__PT_R1, pt_regs, gprs[1]);
+       OFFSET(__PT_R2, pt_regs, gprs[2]);
+       OFFSET(__PT_R3, pt_regs, gprs[3]);
+       OFFSET(__PT_R4, pt_regs, gprs[4]);
+       OFFSET(__PT_R5, pt_regs, gprs[5]);
+       OFFSET(__PT_R6, pt_regs, gprs[6]);
+       OFFSET(__PT_R7, pt_regs, gprs[7]);
+       OFFSET(__PT_R8, pt_regs, gprs[8]);
+       OFFSET(__PT_R9, pt_regs, gprs[9]);
+       OFFSET(__PT_R10, pt_regs, gprs[10]);
+       OFFSET(__PT_R11, pt_regs, gprs[11]);
+       OFFSET(__PT_R12, pt_regs, gprs[12]);
+       OFFSET(__PT_R13, pt_regs, gprs[13]);
+       OFFSET(__PT_R14, pt_regs, gprs[14]);
+       OFFSET(__PT_R15, pt_regs, gprs[15]);
        OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
        OFFSET(__PT_FLAGS, pt_regs, flags);
        OFFSET(__PT_CR1, pt_regs, cr1);
@@ -41,11 +57,11 @@ int main(void)
        /* stack_frame offsets */
        OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
        OFFSET(__SF_GPRS, stack_frame, gprs);
-       OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
-       OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
-       OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
-       OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
-       OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
+       OFFSET(__SF_EMPTY, stack_frame, empty[0]);
+       OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block);
+       OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
+       OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
+       OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
        DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
        BLANK();
        /* idle data offsets */
index 08cc86a..432c8c9 100644 (file)
@@ -149,7 +149,7 @@ static __init void setup_topology(void)
        topology_max_mnest = max_mnest;
 }
 
-static void early_pgm_check_handler(struct pt_regs *regs)
+void __do_early_pgm_check(struct pt_regs *regs)
 {
        if (!fixup_exception(regs))
                disabled_wait();
@@ -159,12 +159,11 @@ static noinline __init void setup_lowcore_early(void)
 {
        psw_t psw;
 
-       psw.addr = (unsigned long)s390_base_pgm_handler;
+       psw.addr = (unsigned long)early_pgm_check_handler;
        psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
        if (IS_ENABLED(CONFIG_KASAN))
                psw.mask |= PSW_MASK_DAT;
        S390_lowcore.program_new_psw = psw;
-       s390_base_pgm_handler_fn = early_pgm_check_handler;
        S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
 }
 
similarity index 52%
rename from arch/s390/kernel/base.S
rename to arch/s390/kernel/earlypgm.S
index 172c23c..f521c6d 100644 (file)
@@ -1,23 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- *  arch/s390/kernel/base.S
- *
  *    Copyright IBM Corp. 2006, 2007
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
  */
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
-#include <asm/nospec-insn.h>
-#include <asm/ptrace.h>
 
-       GEN_BR_THUNK %r9
-       GEN_BR_THUNK %r14
-
-__PT_R0 = __PT_GPRS
-__PT_R8 = __PT_GPRS + 64
-
-ENTRY(s390_base_pgm_handler)
+ENTRY(early_pgm_check_handler)
        stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
        aghi    %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
        la      %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -26,25 +16,8 @@ ENTRY(s390_base_pgm_handler)
        mvc     __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
        mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
        lgr     %r2,%r11
-       larl    %r1,s390_base_pgm_handler_fn
-       lg      %r9,0(%r1)
-       ltgr    %r9,%r9
-       jz      1f
-       BASR_EX %r14,%r9
+       brasl   %r14,__do_early_pgm_check
        mvc     __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
        lmg     %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
        lpswe   __LC_RETURN_PSW
-1:     larl    %r13,disabled_wait_psw
-       lpswe   0(%r13)
-ENDPROC(s390_base_pgm_handler)
-
-       .align  8
-disabled_wait_psw:
-       .quad   0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
-
-       .section .bss
-       .align 8
-       .globl s390_base_pgm_handler_fn
-s390_base_pgm_handler_fn:
-       .quad   0
-       .previous
+ENDPROC(early_pgm_check_handler)
index df41132..d2a1f2f 100644 (file)
 #include <asm/export.h>
 #include <asm/nospec-insn.h>
 
-__PT_R0      = __PT_GPRS
-__PT_R1      = __PT_GPRS + 8
-__PT_R2      = __PT_GPRS + 16
-__PT_R3      = __PT_GPRS + 24
-__PT_R4      = __PT_GPRS + 32
-__PT_R5      = __PT_GPRS + 40
-__PT_R6      = __PT_GPRS + 48
-__PT_R7      = __PT_GPRS + 56
-__PT_R8      = __PT_GPRS + 64
-__PT_R9      = __PT_GPRS + 72
-__PT_R10     = __PT_GPRS + 80
-__PT_R11     = __PT_GPRS + 88
-__PT_R12     = __PT_GPRS + 96
-__PT_R13     = __PT_GPRS + 104
-__PT_R14     = __PT_GPRS + 112
-__PT_R15     = __PT_GPRS + 120
-
 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
@@ -268,6 +251,10 @@ ENTRY(sie64a)
        BPEXIT  __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 .Lsie_entry:
        sie     0(%r14)
+# Let the next instruction be NOP to avoid triggering a machine check
+# and handling it in a guest as result of the instruction execution.
+       nopr    7
+.Lsie_leave:
        BPOFF
        BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 .Lsie_skip:
@@ -564,7 +551,7 @@ ENTRY(mcck_int_handler)
        jno     .Lmcck_panic
 #if IS_ENABLED(CONFIG_KVM)
        OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
-       OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f
+       OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
        oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
        j       5f
 4:     CHKSTG  .Lmcck_panic
index 56e5e37..995ec74 100644 (file)
@@ -17,10 +17,12 @@ void ext_int_handler(void);
 void io_int_handler(void);
 void mcck_int_handler(void);
 void restart_int_handler(void);
+void early_pgm_check_handler(void);
 
 void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs);
 void __do_pgm_check(struct pt_regs *regs);
 void __do_syscall(struct pt_regs *regs, int per_trap);
+void __do_early_pgm_check(struct pt_regs *regs);
 
 void do_protection_exception(struct pt_regs *regs);
 void do_dat_exception(struct pt_regs *regs);
index ea7729b..c27321c 100644 (file)
@@ -30,7 +30,7 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
        if (!stack)
                return NULL;
 
-       return (struct kvm_s390_sie_block *) stack->empty1[0];
+       return (struct kvm_s390_sie_block *)stack->sie_control_block;
 }
 
 static bool is_in_guest(struct pt_regs *regs)
index 71d86f7..89949b9 100644 (file)
@@ -94,9 +94,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        return 0;
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
-               unsigned long arg, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long new_stackp = args->stack;
+       unsigned long tls = args->tls;
        struct fake_frame
        {
                struct stack_frame sf;
@@ -130,15 +132,15 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
        frame->sf.gprs[9] = (unsigned long)frame;
 
        /* Store access registers to kernel stack of new process. */
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* kernel thread */
                memset(&frame->childregs, 0, sizeof(struct pt_regs));
                frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
                                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
                frame->childregs.psw.addr =
                                (unsigned long)__ret_from_fork;
-               frame->childregs.gprs[9] = new_stackp; /* function */
-               frame->childregs.gprs[10] = arg;
+               frame->childregs.gprs[9] = (unsigned long)args->fn;
+               frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
                frame->childregs.orig_gpr2 = -1;
                frame->childregs.last_break = 1;
                return 0;
index 76ad640..8fcb561 100644 (file)
@@ -1332,8 +1332,7 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
                mutex_unlock(&kvm->lock);
                return -EBUSY;
        }
-       bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
-                   KVM_S390_VM_CPU_FEAT_NR_BITS);
+       bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
        mutex_unlock(&kvm->lock);
        VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
                         data.feat[0],
@@ -1504,8 +1503,7 @@ static int kvm_s390_get_processor_feat(struct kvm *kvm,
 {
        struct kvm_s390_vm_cpu_feat data;
 
-       bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
-                   KVM_S390_VM_CPU_FEAT_NR_BITS);
+       bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
        if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
                return -EFAULT;
        VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
@@ -1520,9 +1518,7 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
 {
        struct kvm_s390_vm_cpu_feat data;
 
-       bitmap_copy((unsigned long *) data.feat,
-                   kvm_s390_available_cpu_feat,
-                   KVM_S390_VM_CPU_FEAT_NR_BITS);
+       bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
        if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
                return -EFAULT;
        VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
index 8ac8ad2..1e4d218 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include <linux/bitfield.h>
 #include <linux/extable.h>
+#include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/panic.h>
 #include <asm/asm-extable.h>
@@ -24,9 +26,34 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_r
        return true;
 }
 
-static bool ex_handler_uaccess(const struct exception_table_entry *ex, struct pt_regs *regs)
+static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs)
 {
-       regs->gprs[ex->data] = -EFAULT;
+       unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+
+       regs->gprs[reg_err] = -EFAULT;
+       regs->psw.addr = extable_fixup(ex);
+       return true;
+}
+
+static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs)
+{
+       unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+       unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+       size_t len = FIELD_GET(EX_DATA_LEN, ex->data);
+
+       regs->gprs[reg_err] = -EFAULT;
+       memset((void *)regs->gprs[reg_addr], 0, len);
+       regs->psw.addr = extable_fixup(ex);
+       return true;
+}
+
+static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex, struct pt_regs *regs)
+{
+       unsigned int reg_zero = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+       unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+
+       regs->gprs[reg_err] = -EFAULT;
+       regs->gprs[reg_zero] = 0;
        regs->psw.addr = extable_fixup(ex);
        return true;
 }
@@ -43,8 +70,12 @@ bool fixup_exception(struct pt_regs *regs)
                return ex_handler_fixup(ex, regs);
        case EX_TYPE_BPF:
                return ex_handler_bpf(ex, regs);
-       case EX_TYPE_UACCESS:
-               return ex_handler_uaccess(ex, regs);
+       case EX_TYPE_UA_STORE:
+               return ex_handler_ua_store(ex, regs);
+       case EX_TYPE_UA_LOAD_MEM:
+               return ex_handler_ua_load_mem(ex, regs);
+       case EX_TYPE_UA_LOAD_REG:
+               return ex_handler_ua_load_reg(ex, regs);
        }
        panic("invalid exception table entry");
 }
index 1ac7391..b8ae4a4 100644 (file)
@@ -2608,6 +2608,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
        return 0;
 }
 
+/*
+ * Give a chance to schedule after setting a key to 256 pages.
+ * We only hold the mm lock, which is a rwsem and the kvm srcu.
+ * Both can sleep.
+ */
+static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
+                                 unsigned long next, struct mm_walk *walk)
+{
+       cond_resched();
+       return 0;
+}
+
 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
                                      unsigned long hmask, unsigned long next,
                                      struct mm_walk *walk)
@@ -2630,12 +2642,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
        end = start + HPAGE_SIZE - 1;
        __storage_key_init_range(start, end);
        set_bit(PG_arch_1, &page->flags);
+       cond_resched();
        return 0;
 }
 
 static const struct mm_walk_ops enable_skey_walk_ops = {
        .hugetlb_entry          = __s390_enable_skey_hugetlb,
        .pte_entry              = __s390_enable_skey_pte,
+       .pmd_entry              = __s390_enable_skey_pmd,
 };
 
 int s390_enable_skey(void)
index 697df02..4909dcd 100644 (file)
@@ -748,7 +748,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
        pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
        ptev = pte_val(*ptep);
        if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
-               page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
+               page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
        pgste_set_unlock(ptep, pgste);
        preempt_enable();
 }
index ca01286..a808843 100644 (file)
@@ -92,9 +92,11 @@ void release_thread(struct task_struct *dead_task)
 asmlinkage void ret_from_fork(void);
 asmlinkage void ret_from_kernel_thread(void);
 
-int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp = args->stack;
+       unsigned long tls = args->tls;
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs;
 
@@ -114,11 +116,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
 
        childregs = task_pt_regs(p);
        p->thread.sp = (unsigned long) childregs;
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                memset(childregs, 0, sizeof(struct pt_regs));
                p->thread.pc = (unsigned long) ret_from_kernel_thread;
-               childregs->regs[4] = arg;
-               childregs->regs[5] = usp;
+               childregs->regs[4] = (unsigned long) args->fn_arg;
+               childregs->regs[5] = (unsigned long) args->fn;
                childregs->sr = SR_MD;
 #if defined(CONFIG_SH_FPU)
                childregs->sr |= SR_FD;
index 5c33f03..e8eeedc 100644 (file)
@@ -46,8 +46,7 @@ static void native_machine_shutdown(void)
 
 static void native_machine_power_off(void)
 {
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
 }
 
 static void native_machine_halt(void)
index 85b5736..ba449c4 100644 (file)
@@ -489,9 +489,4 @@ config COMPAT
        select ARCH_WANT_OLD_COMPAT_IPC
        select COMPAT_OLD_SIGACTION
 
-config SYSVIPC_COMPAT
-       bool
-       depends on COMPAT && SYSVIPC
-       default y
-
 source "drivers/sbus/char/Kconfig"
index bd949fc..e4382d2 100644 (file)
@@ -9,17 +9,25 @@
 #define compat_mode_t  compat_mode_t
 typedef u16            compat_mode_t;
 
+#define __compat_uid_t __compat_uid_t
+typedef u16            __compat_uid_t;
+typedef u16            __compat_gid_t;
+
+#define compat_dev_t   compat_dev_t
+typedef u16            compat_dev_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16             compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
+#define COMPAT_RLIM_INFINITY 0x7fffffff
+
 #include <asm-generic/compat.h>
 
-#define COMPAT_USER_HZ         100
 #define COMPAT_UTS_MACHINE     "sparc\0\0"
 
-typedef u16            __compat_uid_t;
-typedef u16            __compat_gid_t;
-typedef u16            compat_dev_t;
 typedef s16            compat_nlink_t;
-typedef u16            compat_ipc_pid_t;
-typedef __kernel_fsid_t        compat_fsid_t;
 
 struct compat_stat {
        compat_dev_t    st_dev;
@@ -75,46 +83,7 @@ struct compat_stat64 {
        unsigned int    __unused5;
 };
 
-struct compat_flock {
-       short           l_type;
-       short           l_whence;
-       compat_off_t    l_start;
-       compat_off_t    l_len;
-       compat_pid_t    l_pid;
-       short           __unused;
-};
-
-#define F_GETLK64      12
-#define F_SETLK64      13
-#define F_SETLKW64     14
-
-struct compat_flock64 {
-       short           l_type;
-       short           l_whence;
-       compat_loff_t   l_start;
-       compat_loff_t   l_len;
-       compat_pid_t    l_pid;
-       short           __unused;
-};
-
-struct compat_statfs {
-       int             f_type;
-       int             f_bsize;
-       int             f_blocks;
-       int             f_bfree;
-       int             f_bavail;
-       int             f_files;
-       int             f_ffree;
-       compat_fsid_t   f_fsid;
-       int             f_namelen;      /* SunOS ignores this field. */
-       int             f_frsize;
-       int             f_flags;
-       int             f_spare[4];
-};
-
-#define COMPAT_RLIM_INFINITY 0x7fffffff
-
-#define COMPAT_OFF_T_MAX       0x7fffffff
+#define __ARCH_COMPAT_FLOCK_PAD                short __unused;
 
 struct compat_ipc64_perm {
        compat_key_t key;
index 1e66278..d6bc767 100644 (file)
@@ -46,6 +46,7 @@
 #define __ARCH_WANT_SYS_TIME
 #define __ARCH_WANT_SYS_UTIME
 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_COMPAT_STAT
 #endif
 
 #ifdef __32bit_syscall_numbers__
index e03d6f8..47f5413 100644 (file)
@@ -11,8 +11,8 @@ struct stat {
        __kernel_ino_t st_ino;
        __kernel_mode_t st_mode;
        short   st_nlink;
-       __kernel_uid_t st_uid;
-       __kernel_gid_t st_gid;
+       __kernel_uid32_t st_uid;
+       __kernel_gid32_t st_gid;
        unsigned int st_rdev;
        long    st_size;
        long    st_atime;
index ce5ad5d..4321322 100644 (file)
@@ -2,15 +2,12 @@
 #ifndef _UAPI_SPARC_TERMBITS_H
 #define _UAPI_SPARC_TERMBITS_H
 
-#include <linux/posix_types.h>
-
-typedef unsigned char   cc_t;
-typedef unsigned int    speed_t;
+#include <asm-generic/termbits-common.h>
 
 #if defined(__sparc__) && defined(__arch64__)
-typedef unsigned int    tcflag_t;
+typedef unsigned int   tcflag_t;
 #else
-typedef unsigned long   tcflag_t;
+typedef unsigned long  tcflag_t;
 #endif
 
 #define NCC 8
@@ -61,21 +58,19 @@ struct ktermios {
 };
 
 /* c_cc characters */
-#define VINTR    0
-#define VQUIT    1
-#define VERASE   2
-#define VKILL    3
-#define VEOF     4
-#define VEOL     5
-#define VEOL2    6
-#define VSWTC    7
-#define VSTART   8
-#define VSTOP    9
-
-
+#define VINTR     0
+#define VQUIT     1
+#define VERASE    2
+#define VKILL     3
+#define VEOF      4
+#define VEOL      5
+#define VEOL2     6
+#define VSWTC     7
+#define VSTART    8
+#define VSTOP     9
 
 #define VSUSP    10
-#define VDSUSP   11  /* SunOS POSIX nicety I do believe... */
+#define VDSUSP   11            /* SunOS POSIX nicety I do believe... */
 #define VREPRINT 12
 #define VDISCARD 13
 #define VWERASE  14
@@ -90,121 +85,83 @@ struct ktermios {
 #endif
 
 /* c_iflag bits */
-#define IGNBRK 0x00000001
-#define BRKINT 0x00000002
-#define IGNPAR 0x00000004
-#define PARMRK 0x00000008
-#define INPCK  0x00000010
-#define ISTRIP 0x00000020
-#define INLCR  0x00000040
-#define IGNCR  0x00000080
-#define ICRNL  0x00000100
-#define IUCLC  0x00000200
-#define IXON   0x00000400
-#define IXANY  0x00000800
-#define IXOFF  0x00001000
-#define IMAXBEL        0x00002000
-#define IUTF8   0x00004000
+#define IUCLC  0x0200
+#define IXON   0x0400
+#define IXOFF  0x1000
+#define IMAXBEL        0x2000
+#define IUTF8   0x4000
 
 /* c_oflag bits */
-#define OPOST  0x00000001
-#define OLCUC  0x00000002
-#define ONLCR  0x00000004
-#define OCRNL  0x00000008
-#define ONOCR  0x00000010
-#define ONLRET 0x00000020
-#define OFILL  0x00000040
-#define OFDEL  0x00000080
-#define NLDLY  0x00000100
-#define   NL0  0x00000000
-#define   NL1  0x00000100
-#define CRDLY  0x00000600
-#define   CR0  0x00000000
-#define   CR1  0x00000200
-#define   CR2  0x00000400
-#define   CR3  0x00000600
-#define TABDLY 0x00001800
-#define   TAB0 0x00000000
-#define   TAB1 0x00000800
-#define   TAB2 0x00001000
-#define   TAB3 0x00001800
-#define   XTABS        0x00001800
-#define BSDLY  0x00002000
-#define   BS0  0x00000000
-#define   BS1  0x00002000
-#define VTDLY  0x00004000
-#define   VT0  0x00000000
-#define   VT1  0x00004000
-#define FFDLY  0x00008000
-#define   FF0  0x00000000
-#define   FF1  0x00008000
-#define PAGEOUT 0x00010000  /* SUNOS specific */
-#define WRAP    0x00020000  /* SUNOS specific */
+#define OLCUC  0x00002
+#define ONLCR  0x00004
+#define NLDLY  0x00100
+#define   NL0  0x00000
+#define   NL1  0x00100
+#define CRDLY  0x00600
+#define   CR0  0x00000
+#define   CR1  0x00200
+#define   CR2  0x00400
+#define   CR3  0x00600
+#define TABDLY 0x01800
+#define   TAB0 0x00000
+#define   TAB1 0x00800
+#define   TAB2 0x01000
+#define   TAB3 0x01800
+#define   XTABS        0x01800
+#define BSDLY  0x02000
+#define   BS0  0x00000
+#define   BS1  0x02000
+#define VTDLY  0x04000
+#define   VT0  0x00000
+#define   VT1  0x04000
+#define FFDLY  0x08000
+#define   FF0  0x00000
+#define   FF1  0x08000
+#define PAGEOUT 0x10000                        /* SUNOS specific */
+#define WRAP    0x20000                        /* SUNOS specific */
 
 /* c_cflag bit meaning */
-#define CBAUD    0x0000100f
-#define  B0      0x00000000   /* hang up */
-#define  B50     0x00000001
-#define  B75     0x00000002
-#define  B110    0x00000003
-#define  B134    0x00000004
-#define  B150    0x00000005
-#define  B200    0x00000006
-#define  B300    0x00000007
-#define  B600    0x00000008
-#define  B1200   0x00000009
-#define  B1800   0x0000000a
-#define  B2400   0x0000000b
-#define  B4800   0x0000000c
-#define  B9600   0x0000000d
-#define  B19200          0x0000000e
-#define  B38400          0x0000000f
-#define EXTA      B19200
-#define EXTB      B38400
-#define  CSIZE    0x00000030
-#define   CS5    0x00000000
-#define   CS6    0x00000010
-#define   CS7    0x00000020
-#define   CS8    0x00000030
-#define CSTOPB   0x00000040
-#define CREAD    0x00000080
-#define PARENB   0x00000100
-#define PARODD   0x00000200
-#define HUPCL    0x00000400
-#define CLOCAL   0x00000800
-#define CBAUDEX   0x00001000
+#define CBAUD          0x0000100f
+#define CSIZE          0x00000030
+#define   CS5          0x00000000
+#define   CS6          0x00000010
+#define   CS7          0x00000020
+#define   CS8          0x00000030
+#define CSTOPB         0x00000040
+#define CREAD          0x00000080
+#define PARENB         0x00000100
+#define PARODD         0x00000200
+#define HUPCL          0x00000400
+#define CLOCAL         0x00000800
+#define CBAUDEX                0x00001000
 /* We'll never see these speeds with the Zilogs, but for completeness... */
-#define  BOTHER   0x00001000
-#define  B57600   0x00001001
-#define  B115200  0x00001002
-#define  B230400  0x00001003
-#define  B460800  0x00001004
+#define BOTHER         0x00001000
+#define     B57600     0x00001001
+#define    B115200     0x00001002
+#define    B230400     0x00001003
+#define    B460800     0x00001004
 /* This is what we can do with the Zilogs. */
-#define  B76800   0x00001005
+#define     B76800     0x00001005
 /* This is what we can do with the SAB82532. */
-#define  B153600  0x00001006
-#define  B307200  0x00001007
-#define  B614400  0x00001008
-#define  B921600  0x00001009
+#define    B153600     0x00001006
+#define    B307200     0x00001007
+#define    B614400     0x00001008
+#define    B921600     0x00001009
 /* And these are the rest... */
-#define  B500000  0x0000100a
-#define  B576000  0x0000100b
-#define B1000000  0x0000100c
-#define B1152000  0x0000100d
-#define B1500000  0x0000100e
-#define B2000000  0x0000100f
+#define    B500000     0x0000100a
+#define    B576000     0x0000100b
+#define   B1000000     0x0000100c
+#define   B1152000     0x0000100d
+#define   B1500000     0x0000100e
+#define   B2000000     0x0000100f
 /* These have totally bogus values and nobody uses them
    so far. Later on we'd have to use say 0x10000x and
    adjust CBAUD constant and drivers accordingly.
-#define B2500000  0x00001010
-#define B3000000  0x00001011
-#define B3500000  0x00001012
-#define B4000000  0x00001013  */
-#define CIBAUD   0x100f0000  /* input baud rate (not used) */
-#define CMSPAR   0x40000000  /* mark or space (stick) parity */
-#define CRTSCTS          0x80000000  /* flow control */
-
-#define IBSHIFT          16            /* Shift from CBAUD to CIBAUD */
+#define   B2500000     0x00001010
+#define   B3000000     0x00001011
+#define   B3500000     0x00001012
+#define   B4000000     0x00001013 */
+#define CIBAUD         0x100f0000      /* input baud rate (not used) */
 
 /* c_lflag bits */
 #define ISIG   0x00000001
@@ -219,7 +176,7 @@ struct ktermios {
 #define ECHOCTL        0x00000200
 #define ECHOPRT        0x00000400
 #define ECHOKE 0x00000800
-#define DEFECHO 0x00001000  /* SUNOS thing, what is it? */
+#define DEFECHO 0x00001000             /* SUNOS thing, what is it? */
 #define FLUSHO 0x00002000
 #define PENDIN 0x00004000
 #define IEXTEN 0x00008000
@@ -244,21 +201,9 @@ struct ktermios {
 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
 #define TIOCSER_TEMT    0x01   /* Transmitter physically empty */
 
-
-/* tcflow() and TCXONC use these */
-#define        TCOOFF          0
-#define        TCOON           1
-#define        TCIOFF          2
-#define        TCION           3
-
-/* tcflush() and TCFLSH use these */
-#define        TCIFLUSH        0
-#define        TCOFLUSH        1
-#define        TCIOFLUSH       2
-
 /* tcsetattr uses these */
-#define        TCSANOW         0
-#define        TCSADRAIN       1
-#define        TCSAFLUSH       2
+#define TCSANOW                0
+#define TCSADRAIN      1
+#define TCSAFLUSH      2
 
 #endif /* _UAPI_SPARC_TERMBITS_H */
index 88c0c14..33b0215 100644 (file)
@@ -259,9 +259,11 @@ clone_stackframe(struct sparc_stackf __user *dst,
 extern void ret_from_fork(void);
 extern void ret_from_kernel_thread(void);
 
-int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long sp = args->stack;
+       unsigned long tls = args->tls;
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs, *regs = current_pt_regs();
        char *new_stack;
@@ -296,13 +298,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
        ti->ksp = (unsigned long) new_stack;
        p->thread.kregs = childregs;
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                extern int nwindows;
                unsigned long psr;
                memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
                ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
-               childregs->u_regs[UREG_G1] = sp; /* function */
-               childregs->u_regs[UREG_G2] = arg;
+               childregs->u_regs[UREG_G1] = (unsigned long) args->fn;
+               childregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
                psr = childregs->psr = get_psr();
                ti->kpsr = psr | PSR_PIL;
                ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows);
index 9a2ceb0..6335b69 100644 (file)
@@ -564,9 +564,11 @@ barf:
  * Parent -->  %o0 == childs  pid, %o1 == 0
  * Child  -->  %o0 == parents pid, %o1 == 1
  */
-int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long sp = args->stack;
+       unsigned long tls = args->tls;
        struct thread_info *t = task_thread_info(p);
        struct pt_regs *regs = current_pt_regs();
        struct sparc_stackf *parent_sf;
@@ -584,12 +586,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
                                       sizeof(struct sparc_stackf));
        t->fpsaved[0] = 0;
 
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                memset(child_trap_frame, 0, child_stack_sz);
                __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 
                        (current_pt_regs()->tstate + 1) & TSTATE_CWP;
-               t->kregs->u_regs[UREG_G1] = sp; /* function */
-               t->kregs->u_regs[UREG_G2] = arg;
+               t->kregs->u_regs[UREG_G1] = (unsigned long) args->fn;
+               t->kregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
                return 0;
        }
 
index e8983d0..4ec22e1 100644 (file)
@@ -6,6 +6,7 @@ config UML
        bool
        default y
        select ARCH_EPHEMERAL_INODES
+       select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_KCOV
        select ARCH_HAS_STRNCPY_FROM_USER
        select ARCH_HAS_STRNLEN_USER
index f145842..914da77 100644 (file)
@@ -64,6 +64,13 @@ config XTERM_CHAN
          its own xterm.
          It is safe to say 'Y' here.
 
+config XTERM_CHAN_DEFAULT_EMULATOR
+       string "xterm channel default terminal emulator"
+       depends on XTERM_CHAN
+       default "xterm"
+       help
+         This option allows changing the default terminal emulator.
+
 config NOCONFIG_CHAN
        bool
        default !(XTERM_CHAN && TTY_CHAN && PTY_CHAN && PORT_CHAN && NULL_CHAN)
@@ -231,6 +238,14 @@ config UML_NET_DAEMON
 
          If unsure, say N.
 
+config UML_NET_DAEMON_DEFAULT_SOCK
+       string "Default socket for daemon transport"
+       default "/tmp/uml.ctl"
+       depends on UML_NET_DAEMON
+       help
+         This option allows setting the default socket for the daemon
+         transport, normally it defaults to /tmp/uml.ctl.
+
 config UML_NET_VECTOR
        bool "Vector I/O high performance network devices"
        depends on UML_NET
index 803666e..e1dc429 100644 (file)
@@ -70,4 +70,6 @@ obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o
 USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o
 CFLAGS_null.o = -DDEV_NULL=$(DEV_NULL_PATH)
 
+CFLAGS_xterm.o += '-DCONFIG_XTERM_CHAN_DEFAULT_EMULATOR="$(CONFIG_XTERM_CHAN_DEFAULT_EMULATOR)"'
+
 include arch/um/scripts/Makefile.rules
index 6299705..26a702a 100644 (file)
@@ -133,7 +133,7 @@ static void line_timer_cb(struct work_struct *work)
        struct line *line = container_of(work, struct line, task.work);
 
        if (!line->throttled)
-               chan_interrupt(line, line->driver->read_irq);
+               chan_interrupt(line, line->read_irq);
 }
 
 int enable_chan(struct line *line)
@@ -195,9 +195,9 @@ void free_irqs(void)
                chan = list_entry(ele, struct chan, free_list);
 
                if (chan->input && chan->enabled)
-                       um_free_irq(chan->line->driver->read_irq, chan);
+                       um_free_irq(chan->line->read_irq, chan);
                if (chan->output && chan->enabled)
-                       um_free_irq(chan->line->driver->write_irq, chan);
+                       um_free_irq(chan->line->write_irq, chan);
                chan->enabled = 0;
        }
 }
@@ -215,9 +215,9 @@ static void close_one_chan(struct chan *chan, int delay_free_irq)
                spin_unlock_irqrestore(&irqs_to_free_lock, flags);
        } else {
                if (chan->input && chan->enabled)
-                       um_free_irq(chan->line->driver->read_irq, chan);
+                       um_free_irq(chan->line->read_irq, chan);
                if (chan->output && chan->enabled)
-                       um_free_irq(chan->line->driver->write_irq, chan);
+                       um_free_irq(chan->line->write_irq, chan);
                chan->enabled = 0;
        }
        if (chan->ops->close != NULL)
index 6040817..25727ed 100644 (file)
@@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
                       unsigned long *stack_out)
 {
        struct winch_data data;
-       int fds[2], n, err;
+       int fds[2], n, err, pid;
        char c;
 
        err = os_pipe(fds, 1, 1);
@@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
         * problem with /dev/net/tun, which if held open by this
         * thread, prevents the TUN/TAP device from being reused.
         */
-       err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
-       if (err < 0) {
+       pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
+       if (pid < 0) {
+               err = pid;
                printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
                       -err);
                goto out_close;
@@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
                goto out_close;
        }
 
-       return err;
+       return pid;
 
  out_close:
        close(fds[1]);
index fd24026..afde1e8 100644 (file)
@@ -65,7 +65,7 @@ static int daemon_setup(char *str, char **mac_out, void *data)
 
        *init = ((struct daemon_init)
                { .sock_type            = "unix",
-                 .ctl_sock             = "/tmp/uml.ctl" });
+                 .ctl_sock             = CONFIG_UML_NET_DAEMON_DEFAULT_SOCK });
 
        remain = split_if_spec(str, mac_out, &init->sock_type, &init->ctl_sock,
                               NULL);
index 8febf95..02b0bef 100644 (file)
@@ -139,7 +139,7 @@ static int flush_buffer(struct line *line)
                count = line->buffer + LINE_BUFSIZE - line->head;
 
                n = write_chan(line->chan_out, line->head, count,
-                              line->driver->write_irq);
+                              line->write_irq);
                if (n < 0)
                        return n;
                if (n == count) {
@@ -156,7 +156,7 @@ static int flush_buffer(struct line *line)
 
        count = line->tail - line->head;
        n = write_chan(line->chan_out, line->head, count,
-                      line->driver->write_irq);
+                      line->write_irq);
 
        if (n < 0)
                return n;
@@ -195,7 +195,7 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
                ret = buffer_data(line, buf, len);
        else {
                n = write_chan(line->chan_out, buf, len,
-                              line->driver->write_irq);
+                              line->write_irq);
                if (n < 0) {
                        ret = n;
                        goto out_up;
@@ -215,7 +215,7 @@ void line_throttle(struct tty_struct *tty)
 {
        struct line *line = tty->driver_data;
 
-       deactivate_chan(line->chan_in, line->driver->read_irq);
+       deactivate_chan(line->chan_in, line->read_irq);
        line->throttled = 1;
 }
 
@@ -224,7 +224,7 @@ void line_unthrottle(struct tty_struct *tty)
        struct line *line = tty->driver_data;
 
        line->throttled = 0;
-       chan_interrupt(line, line->driver->read_irq);
+       chan_interrupt(line, line->read_irq);
 }
 
 static irqreturn_t line_write_interrupt(int irq, void *data)
@@ -260,19 +260,23 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
        int err;
 
        if (input) {
-               err = um_request_irq(driver->read_irq, fd, IRQ_READ,
-                                    line_interrupt, IRQF_SHARED,
+               err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_READ,
+                                    line_interrupt, 0,
                                     driver->read_irq_name, data);
                if (err < 0)
                        return err;
+
+               line->read_irq = err;
        }
 
        if (output) {
-               err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
-                                    line_write_interrupt, IRQF_SHARED,
+               err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_WRITE,
+                                    line_write_interrupt, 0,
                                     driver->write_irq_name, data);
                if (err < 0)
                        return err;
+
+               line->write_irq = err;
        }
 
        return 0;
index bdb16b9..f15be75 100644 (file)
@@ -23,9 +23,7 @@ struct line_driver {
        const short minor_start;
        const short type;
        const short subtype;
-       const int read_irq;
        const char *read_irq_name;
-       const int write_irq;
        const char *write_irq_name;
        struct mc_device mc;
        struct tty_driver *driver;
@@ -35,6 +33,8 @@ struct line {
        struct tty_port port;
        int valid;
 
+       int read_irq, write_irq;
+
        char *init_str;
        struct list_head chan_list;
        struct chan *chan_in, *chan_out;
index 41eae2e..8514966 100644 (file)
@@ -47,9 +47,7 @@ static struct line_driver driver = {
        .minor_start            = 64,
        .type                   = TTY_DRIVER_TYPE_SERIAL,
        .subtype                = 0,
-       .read_irq               = SSL_IRQ,
        .read_irq_name          = "ssl",
-       .write_irq              = SSL_WRITE_IRQ,
        .write_irq_name         = "ssl-write",
        .mc  = {
                .list           = LIST_HEAD_INIT(driver.mc.list),
index e8b762f..489d5a7 100644 (file)
@@ -53,9 +53,7 @@ static struct line_driver driver = {
        .minor_start            = 0,
        .type                   = TTY_DRIVER_TYPE_CONSOLE,
        .subtype                = SYSTEM_TYPE_CONSOLE,
-       .read_irq               = CONSOLE_IRQ,
        .read_irq_name          = "console",
-       .write_irq              = CONSOLE_WRITE_IRQ,
        .write_irq_name         = "console-write",
        .mc  = {
                .list           = LIST_HEAD_INIT(driver.mc.list),
index ba562d6..82ff378 100644 (file)
@@ -63,6 +63,7 @@ struct virtio_uml_device {
 
        u8 config_changed_irq:1;
        uint64_t vq_irq_vq_map;
+       int recv_rc;
 };
 
 struct virtio_uml_vq_info {
@@ -148,14 +149,6 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev,
 
        rc = vhost_user_recv_header(fd, msg);
 
-       if (rc == -ECONNRESET && vu_dev->registered) {
-               struct virtio_uml_platform_data *pdata;
-
-               pdata = vu_dev->pdata;
-
-               virtio_break_device(&vu_dev->vdev);
-               schedule_work(&pdata->conn_broken_wk);
-       }
        if (rc)
                return rc;
        size = msg->header.size;
@@ -164,6 +157,21 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev,
        return full_read(fd, &msg->payload, size, false);
 }
 
+static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
+                                  int rc)
+{
+       struct virtio_uml_platform_data *pdata = vu_dev->pdata;
+
+       if (rc != -ECONNRESET)
+               return;
+
+       if (!vu_dev->registered)
+               return;
+
+       virtio_break_device(&vu_dev->vdev);
+       schedule_work(&pdata->conn_broken_wk);
+}
+
 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
                                struct vhost_user_msg *msg,
                                size_t max_payload_size)
@@ -171,8 +179,10 @@ static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
        int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
                                 max_payload_size, true);
 
-       if (rc)
+       if (rc) {
+               vhost_user_check_reset(vu_dev, rc);
                return rc;
+       }
 
        if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
                return -EPROTO;
@@ -369,6 +379,7 @@ static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
                                 sizeof(msg.msg.payload) +
                                 sizeof(msg.extra_payload));
 
+       vu_dev->recv_rc = rc;
        if (rc)
                return IRQ_NONE;
 
@@ -412,7 +423,9 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
        if (!um_irq_timetravel_handler_used())
                ret = vu_req_read_message(vu_dev, NULL);
 
-       if (vu_dev->vq_irq_vq_map) {
+       if (vu_dev->recv_rc) {
+               vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
+       } else if (vu_dev->vq_irq_vq_map) {
                struct virtqueue *vq;
 
                virtio_device_for_each_vq((&vu_dev->vdev), vq) {
index 87ca4a4..6918de5 100644 (file)
@@ -42,7 +42,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts)
 }
 
 /* Only changed by xterm_setup, which is a setup */
-static char *terminal_emulator = "xterm";
+static char *terminal_emulator = CONFIG_XTERM_CHAN_DEFAULT_EMULATOR;
 static char *title_switch = "-T";
 static char *exec_switch = "-e";
 
@@ -79,8 +79,9 @@ __uml_setup("xterm=", xterm_setup,
 "    respectively.  The title switch must have the form '<switch> title',\n"
 "    not '<switch>=title'.  Similarly, the exec switch must have the form\n"
 "    '<switch> command arg1 arg2 ...'.\n"
-"    The default values are 'xterm=xterm,-T,-e'.  Values for gnome-terminal\n"
-"    are 'xterm=gnome-terminal,-t,-x'.\n\n"
+"    The default values are 'xterm=" CONFIG_XTERM_CHAN_DEFAULT_EMULATOR
+     ",-T,-e'.\n"
+"    Values for gnome-terminal are 'xterm=gnome-terminal,-t,-x'.\n\n"
 );
 
 static int xterm_open(int input, int output, int primary, void *d,
index f1f3f52..b2d834a 100644 (file)
@@ -4,6 +4,7 @@ generic-y += bug.h
 generic-y += compat.h
 generic-y += current.h
 generic-y += device.h
+generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += extable.h
index e187c78..749dfe8 100644 (file)
@@ -4,19 +4,15 @@
 
 #define TIMER_IRQ              0
 #define UMN_IRQ                        1
-#define CONSOLE_IRQ            2
-#define CONSOLE_WRITE_IRQ      3
-#define UBD_IRQ                        4
-#define UM_ETH_IRQ             5
-#define SSL_IRQ                        6
-#define SSL_WRITE_IRQ          7
-#define ACCEPT_IRQ             8
-#define MCONSOLE_IRQ           9
-#define WINCH_IRQ              10
-#define SIGIO_WRITE_IRQ        11
-#define TELNETD_IRQ            12
-#define XTERM_IRQ              13
-#define RANDOM_IRQ             14
+#define UBD_IRQ                        2
+#define UM_ETH_IRQ             3
+#define ACCEPT_IRQ             4
+#define MCONSOLE_IRQ           5
+#define WINCH_IRQ              6
+#define SIGIO_WRITE_IRQ        7
+#define TELNETD_IRQ            8
+#define XTERM_IRQ              9
+#define RANDOM_IRQ             10
 
 #ifdef CONFIG_UML_NET_VECTOR
 
index 1395cbd..c7b4b49 100644 (file)
@@ -60,6 +60,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_RESTORE_SIGMASK    7
 #define TIF_NOTIFY_RESUME      8
 #define TIF_SECCOMP            9       /* secure computing */
+#define TIF_SINGLESTEP         10      /* single stepping userspace */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
@@ -68,5 +69,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_MEMDIE            (1 << TIF_MEMDIE)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
+#define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 
 #endif
index c85e40c..58938d7 100644 (file)
@@ -43,7 +43,7 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
 {
        PT_REGS_IP(regs) = eip;
        PT_REGS_SP(regs) = esp;
-       current->ptrace &= ~PT_DTRACE;
+       clear_thread_flag(TIF_SINGLESTEP);
 #ifdef SUBARCH_EXECVE1
        SUBARCH_EXECVE1(regs->regs);
 #endif
index 8050468..80b90b1 100644 (file)
@@ -154,16 +154,17 @@ void fork_handler(void)
        userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp,
-               unsigned long arg, struct task_struct * p, unsigned long tls)
+int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long sp = args->stack;
+       unsigned long tls = args->tls;
        void (*handler)(void);
-       int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
        int ret = 0;
 
        p->thread = (struct thread_struct) INIT_THREAD;
 
-       if (!kthread) {
+       if (!args->fn) {
                memcpy(&p->thread.regs.regs, current_pt_regs(),
                       sizeof(p->thread.regs.regs));
                PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
@@ -175,14 +176,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                arch_copy_thread(&current->thread.arch, &p->thread.arch);
        } else {
                get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
-               p->thread.request.u.thread.proc = (int (*)(void *))sp;
-               p->thread.request.u.thread.arg = (void *)arg;
+               p->thread.request.u.thread.proc = args->fn;
+               p->thread.request.u.thread.arg = args->fn_arg;
                handler = new_thread_handler;
        }
 
        new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
 
-       if (!kthread) {
+       if (!args->fn) {
                clear_flushed_tls(p);
 
                /*
@@ -335,7 +336,7 @@ int singlestepping(void * t)
 {
        struct task_struct *task = t ? t : current;
 
-       if (!(task->ptrace & PT_DTRACE))
+       if (!test_thread_flag(TIF_SINGLESTEP))
                return 0;
 
        if (task->thread.singlestep_syscall)
index bfaf6ab..5154b27 100644 (file)
@@ -11,7 +11,7 @@
 
 void user_enable_single_step(struct task_struct *child)
 {
-       child->ptrace |= PT_DTRACE;
+       set_tsk_thread_flag(child, TIF_SINGLESTEP);
        child->thread.singlestep_syscall = 0;
 
 #ifdef SUBARCH_SET_SINGLESTEPPING
@@ -21,7 +21,7 @@ void user_enable_single_step(struct task_struct *child)
 
 void user_disable_single_step(struct task_struct *child)
 {
-       child->ptrace &= ~PT_DTRACE;
+       clear_tsk_thread_flag(child, TIF_SINGLESTEP);
        child->thread.singlestep_syscall = 0;
 
 #ifdef SUBARCH_SET_SINGLESTEPPING
@@ -120,7 +120,7 @@ static void send_sigtrap(struct uml_pt_regs *regs, int error_code)
 }
 
 /*
- * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
+ * XXX Check TIF_SINGLESTEP for singlestepping check and
  * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
  */
 int syscall_trace_enter(struct pt_regs *regs)
@@ -144,7 +144,7 @@ void syscall_trace_leave(struct pt_regs *regs)
        audit_syscall_exit(regs);
 
        /* Fake a debug trap */
-       if (ptraced & PT_DTRACE)
+       if (test_thread_flag(TIF_SINGLESTEP))
                send_sigtrap(&regs->regs, 0);
 
        if (!test_thread_flag(TIF_SYSCALL_TRACE))
index 88cd9b5..ae4658f 100644 (file)
@@ -53,7 +53,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
        unsigned long sp;
        int err;
 
-       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+       if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED))
                singlestep = 1;
 
        /* Did we come from a system call? */
@@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs)
         * on the host.  The tracing thread will check this flag and
         * PTRACE_SYSCALL if necessary.
         */
-       if (current->ptrace & PT_DTRACE)
+       if (test_thread_flag(TIF_SINGLESTEP))
                current->thread.singlestep_syscall =
                        is_syscall(PT_REGS_IP(&current->thread.regs));
 
index cf531fb..9783ebc 100644 (file)
@@ -41,11 +41,11 @@ config FORCE_DYNAMIC_FTRACE
        depends on FUNCTION_TRACER
        select DYNAMIC_FTRACE
        help
-        We keep the static function tracing (!DYNAMIC_FTRACE) around
-        in order to test the non static function tracing in the
-        generic code, as other architectures still use it. But we
-        only need to keep it around for x86_64. No need to keep it
-        for x86_32. For x86_32, force DYNAMIC_FTRACE. 
+         We keep the static function tracing (!DYNAMIC_FTRACE) around
+         in order to test the non static function tracing in the
+         generic code, as other architectures still use it. But we
+         only need to keep it around for x86_64. No need to keep it
+         for x86_32. For x86_32, force DYNAMIC_FTRACE.
 #
 # Arch settings
 #
@@ -258,6 +258,7 @@ config X86
        select HAVE_PREEMPT_DYNAMIC_CALL
        select HAVE_RSEQ
        select HAVE_SYSCALL_TRACEPOINTS
+       select HAVE_UACCESS_VALIDATION          if HAVE_OBJTOOL
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_USER_RETURN_NOTIFIER
        select HAVE_GENERIC_VDSO
@@ -393,9 +394,9 @@ config CC_HAS_SANE_STACKPROTECTOR
        default $(success,$(srctree)/scripts/gcc-x86_64-has-stack-protector.sh $(CC)) if 64BIT
        default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC))
        help
-          We have to make sure stack protector is unconditionally disabled if
-          the compiler produces broken code or if it does not let us control
-          the segment on 32-bit kernels.
+         We have to make sure stack protector is unconditionally disabled if
+         the compiler produces broken code or if it does not let us control
+         the segment on 32-bit kernels.
 
 menu "Processor type and features"
 
@@ -531,7 +532,7 @@ config X86_EXTENDED_PLATFORM
 
          If you have one of these systems, or if you want to build a
          generic distribution kernel, say Y here - otherwise say N.
-endif
+endif # X86_32
 
 if X86_64
 config X86_EXTENDED_PLATFORM
@@ -550,7 +551,7 @@ config X86_EXTENDED_PLATFORM
 
          If you have one of these systems, or if you want to build a
          generic distribution kernel, say Y here - otherwise say N.
-endif
+endif # X86_64
 # This is an alphabetically sorted list of 64 bit extended platforms
 # Please maintain the alphabetic order if and when there are additions
 config X86_NUMACHIP
@@ -598,9 +599,9 @@ config X86_GOLDFISH
        bool "Goldfish (Virtual Platform)"
        depends on X86_EXTENDED_PLATFORM
        help
-        Enable support for the Goldfish virtual platform used primarily
-        for Android development. Unless you are building for the Android
-        Goldfish emulator say N here.
+         Enable support for the Goldfish virtual platform used primarily
+         for Android development. Unless you are building for the Android
+         Goldfish emulator say N here.
 
 config X86_INTEL_CE
        bool "CE4100 TV platform"
@@ -899,7 +900,7 @@ config INTEL_TDX_GUEST
          memory contents and CPU state. TDX guests are protected from
          some attacks from the VMM.
 
-endif #HYPERVISOR_GUEST
+endif # HYPERVISOR_GUEST
 
 source "arch/x86/Kconfig.cpu"
 
@@ -1166,16 +1167,16 @@ config X86_MCE_INTEL
        prompt "Intel MCE features"
        depends on X86_MCE && X86_LOCAL_APIC
        help
-          Additional support for intel specific MCE features such as
-          the thermal monitor.
+         Additional support for intel specific MCE features such as
+         the thermal monitor.
 
 config X86_MCE_AMD
        def_bool y
        prompt "AMD MCE features"
        depends on X86_MCE && X86_LOCAL_APIC && AMD_NB
        help
-          Additional support for AMD specific MCE features such as
-          the DRAM Error Threshold.
+         Additional support for AMD specific MCE features such as
+         the DRAM Error Threshold.
 
 config X86_ANCIENT_MCE
        bool "Support for old Pentium 5 / WinChip machine checks"
@@ -1253,18 +1254,18 @@ config X86_VSYSCALL_EMULATION
        default y
        depends on X86_64
        help
-        This enables emulation of the legacy vsyscall page.  Disabling
-        it is roughly equivalent to booting with vsyscall=none, except
-        that it will also disable the helpful warning if a program
-        tries to use a vsyscall.  With this option set to N, offending
-        programs will just segfault, citing addresses of the form
-        0xffffffffff600?00.
+         This enables emulation of the legacy vsyscall page.  Disabling
+         it is roughly equivalent to booting with vsyscall=none, except
+         that it will also disable the helpful warning if a program
+         tries to use a vsyscall.  With this option set to N, offending
+         programs will just segfault, citing addresses of the form
+         0xffffffffff600?00.
 
-        This option is required by many programs built before 2013, and
-        care should be used even with newer programs if set to N.
+         This option is required by many programs built before 2013, and
+         care should be used even with newer programs if set to N.
 
-        Disabling this option saves about 7K of kernel size and
-        possibly 4K of additional runtime pagetable memory.
+         Disabling this option saves about 7K of kernel size and
+         possibly 4K of additional runtime pagetable memory.
 
 config X86_IOPL_IOPERM
        bool "IOPERM and IOPL Emulation"
@@ -1357,17 +1358,16 @@ config MICROCODE_AMD
          If you select this option, microcode patch loading support for AMD
          processors will be enabled.
 
-config MICROCODE_OLD_INTERFACE
-       bool "Ancient loading interface (DEPRECATED)"
+config MICROCODE_LATE_LOADING
+       bool "Late microcode loading (DANGEROUS)"
        default n
        depends on MICROCODE
        help
-         DO NOT USE THIS! This is the ancient /dev/cpu/microcode interface
-         which was used by userspace tools like iucode_tool and microcode.ctl.
-         It is inadequate because it runs too late to be able to properly
-         load microcode on a machine and it needs special tools. Instead, you
-         should've switched to the early loading method with the initrd or
-         builtin microcode by now: Documentation/x86/microcode.rst
+         Loading microcode late, when the system is up and executing instructions
+         is a tricky business and should be avoided if possible. Just the sequence
+         of synchronizing all cores and SMT threads is one fragile dance which does
+         not guarantee that cores might not softlock after the loading. Therefore,
+         use this at your own risk. Late loading taints the kernel too.
 
 config X86_MSR
        tristate "/dev/cpu/*/msr - Model-specific register support"
@@ -2001,15 +2001,15 @@ config EFI_MIXED
        bool "EFI mixed-mode support"
        depends on EFI_STUB && X86_64
        help
-          Enabling this feature allows a 64-bit kernel to be booted
-          on a 32-bit firmware, provided that your CPU supports 64-bit
-          mode.
+         Enabling this feature allows a 64-bit kernel to be booted
+         on a 32-bit firmware, provided that your CPU supports 64-bit
+         mode.
 
-          Note that it is not possible to boot a mixed-mode enabled
-          kernel via the EFI boot stub - a bootloader that supports
-          the EFI handover protocol must be used.
+         Note that it is not possible to boot a mixed-mode enabled
+         kernel via the EFI boot stub - a bootloader that supports
+         the EFI handover protocol must be used.
 
-          If unsure, say N.
+         If unsure, say N.
 
 source "kernel/Kconfig.hz"
 
@@ -2234,16 +2234,16 @@ config RANDOMIZE_MEMORY
        select DYNAMIC_MEMORY_LAYOUT
        default RANDOMIZE_BASE
        help
-          Randomizes the base virtual address of kernel memory sections
-          (physical memory mapping, vmalloc & vmemmap). This security feature
-          makes exploits relying on predictable memory locations less reliable.
+         Randomizes the base virtual address of kernel memory sections
+         (physical memory mapping, vmalloc & vmemmap). This security feature
+         makes exploits relying on predictable memory locations less reliable.
 
-          The order of allocations remains unchanged. Entropy is generated in
-          the same way as RANDOMIZE_BASE. Current implementation in the optimal
-          configuration have in average 30,000 different possible virtual
-          addresses for each memory section.
+         The order of allocations remains unchanged. Entropy is generated in
+         the same way as RANDOMIZE_BASE. Current implementation in the optimal
+         configuration have in average 30,000 different possible virtual
+         addresses for each memory section.
 
-          If unsure, say Y.
+         If unsure, say Y.
 
 config RANDOMIZE_MEMORY_PHYSICAL_PADDING
        hex "Physical memory mapping padding" if EXPERT
@@ -2253,12 +2253,12 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
        range 0x1 0x40 if MEMORY_HOTPLUG
        range 0x0 0x40
        help
-          Define the padding in terabytes added to the existing physical
-          memory size during kernel memory randomization. It is useful
-          for memory hotplug support but reduces the entropy available for
-          address randomization.
+         Define the padding in terabytes added to the existing physical
+         memory size during kernel memory randomization. It is useful
+         for memory hotplug support but reduces the entropy available for
+         address randomization.
 
-          If unsure, leave at the default value.
+         If unsure, leave at the default value.
 
 config HOTPLUG_CPU
        def_bool y
@@ -2605,7 +2605,6 @@ source "drivers/idle/Kconfig"
 
 endmenu
 
-
 menu "Bus options (PCI etc.)"
 
 choice
@@ -2829,7 +2828,6 @@ config AMD_NB
 
 endmenu
 
-
 menu "Binary Emulations"
 
 config IA32_EMULATION
@@ -2867,18 +2865,12 @@ config COMPAT
        def_bool y
        depends on IA32_EMULATION || X86_X32_ABI
 
-if COMPAT
 config COMPAT_FOR_U64_ALIGNMENT
        def_bool y
-
-config SYSVIPC_COMPAT
-       def_bool y
-       depends on SYSVIPC
-endif
+       depends on COMPAT
 
 endmenu
 
-
 config HAVE_ATOMIC_IOMAP
        def_bool y
        depends on X86_32
index d872a75..340399f 100644 (file)
@@ -73,20 +73,19 @@ config DEBUG_TLBFLUSH
        bool "Set upper limit of TLB entries to flush one-by-one"
        depends on DEBUG_KERNEL
        help
+         X86-only for now.
 
-       X86-only for now.
+         This option allows the user to tune the amount of TLB entries the
+         kernel flushes one-by-one instead of doing a full TLB flush. In
+         certain situations, the former is cheaper. This is controlled by the
+         tlb_flushall_shift knob under /sys/kernel/debug/x86. If you set it
+         to -1, the code flushes the whole TLB unconditionally. Otherwise,
+         for positive values of it, the kernel will use single TLB entry
+         invalidating instructions according to the following formula:
 
-       This option allows the user to tune the amount of TLB entries the
-       kernel flushes one-by-one instead of doing a full TLB flush. In
-       certain situations, the former is cheaper. This is controlled by the
-       tlb_flushall_shift knob under /sys/kernel/debug/x86. If you set it
-       to -1, the code flushes the whole TLB unconditionally. Otherwise,
-       for positive values of it, the kernel will use single TLB entry
-       invalidating instructions according to the following formula:
+         flush_entries <= active_tlb_entries / 2^tlb_flushall_shift
 
-       flush_entries <= active_tlb_entries / 2^tlb_flushall_shift
-
-       If in doubt, say "N".
+         If in doubt, say "N".
 
 config IOMMU_DEBUG
        bool "Enable IOMMU debugging"
@@ -119,10 +118,10 @@ config X86_DECODER_SELFTEST
        depends on DEBUG_KERNEL && INSTRUCTION_DECODER
        depends on !COMPILE_TEST
        help
-        Perform x86 instruction decoder selftests at build time.
-        This option is useful for checking the sanity of x86 instruction
-        decoder code.
-        If unsure, say "N".
+         Perform x86 instruction decoder selftests at build time.
+         This option is useful for checking the sanity of x86 instruction
+         decoder code.
+         If unsure, say "N".
 
 choice
        prompt "IO delay type"
index 0352e45..f912d77 100644 (file)
@@ -163,7 +163,7 @@ extra_header_fields:
        .long   0x200                           # SizeOfHeaders
        .long   0                               # CheckSum
        .word   IMAGE_SUBSYSTEM_EFI_APPLICATION # Subsystem (EFI application)
-#ifdef CONFIG_DXE_MEM_ATTRIBUTES
+#ifdef CONFIG_EFI_DXE_MEM_ATTRIBUTES
        .word   IMAGE_DLL_CHARACTERISTICS_NX_COMPAT     # DllCharacteristics
 #else
        .word   0                               # DllCharacteristics
index 09c5696..dabdf3d 100644 (file)
@@ -6,24 +6,24 @@ config PERF_EVENTS_INTEL_UNCORE
        depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
        default y
        help
-       Include support for Intel uncore performance events. These are
-       available on NehalemEX and more modern processors.
+         Include support for Intel uncore performance events. These are
+         available on NehalemEX and more modern processors.
 
 config PERF_EVENTS_INTEL_RAPL
        tristate "Intel/AMD rapl performance events"
        depends on PERF_EVENTS && (CPU_SUP_INTEL || CPU_SUP_AMD) && PCI
        default y
        help
-       Include support for Intel and AMD rapl performance events for power
-       monitoring on modern processors.
+         Include support for Intel and AMD rapl performance events for power
+         monitoring on modern processors.
 
 config PERF_EVENTS_INTEL_CSTATE
        tristate "Intel cstate performance events"
        depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
        default y
        help
-       Include support for Intel cstate performance events for power
-       monitoring on modern processors.
+         Include support for Intel cstate performance events for power
+         monitoring on modern processors.
 
 config PERF_EVENTS_AMD_POWER
        depends on PERF_EVENTS && CPU_SUP_AMD
index 955ae91..45024ab 100644 (file)
@@ -276,7 +276,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
        INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
        INTEL_EVENT_CONSTRAINT(0x32, 0xf),      /* SW_PREFETCH_ACCESS.* */
-       INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
+       INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
        INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
index 20fd0ac..b1221da 100644 (file)
 #define compat_mode_t  compat_mode_t
 typedef u16            compat_mode_t;
 
+#define __compat_uid_t __compat_uid_t
+typedef u16            __compat_uid_t;
+typedef u16            __compat_gid_t;
+
+#define compat_dev_t   compat_dev_t
+typedef u16            compat_dev_t;
+
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16             compat_ipc_pid_t;
+
+#define compat_statfs  compat_statfs
+
 #include <asm-generic/compat.h>
 
-#define COMPAT_USER_HZ         100
 #define COMPAT_UTS_MACHINE     "i686\0\0"
 
-typedef u16            __compat_uid_t;
-typedef u16            __compat_gid_t;
-typedef u16            compat_dev_t;
 typedef u16            compat_nlink_t;
-typedef u16            compat_ipc_pid_t;
-typedef __kernel_fsid_t        compat_fsid_t;
 
 struct compat_stat {
        u32             st_dev;
@@ -48,29 +54,11 @@ struct compat_stat {
        u32             __unused5;
 };
 
-struct compat_flock {
-       short           l_type;
-       short           l_whence;
-       compat_off_t    l_start;
-       compat_off_t    l_len;
-       compat_pid_t    l_pid;
-};
-
-#define F_GETLK64      12      /*  using 'struct flock64' */
-#define F_SETLK64      13
-#define F_SETLKW64     14
-
 /*
- * IA32 uses 4 byte alignment for 64 bit quantities,
- * so we need to pack this structure.
+ * IA32 uses 4 byte alignment for 64 bit quantities, so we need to pack the
+ * compat flock64 structure.
  */
-struct compat_flock64 {
-       short           l_type;
-       short           l_whence;
-       compat_loff_t   l_start;
-       compat_loff_t   l_len;
-       compat_pid_t    l_pid;
-} __attribute__((packed));
+#define __ARCH_NEED_COMPAT_FLOCK64_PACKED
 
 struct compat_statfs {
        int             f_type;
@@ -87,68 +75,6 @@ struct compat_statfs {
        int             f_spare[4];
 };
 
-#define COMPAT_RLIM_INFINITY           0xffffffff
-
-#define COMPAT_OFF_T_MAX       0x7fffffff
-
-struct compat_ipc64_perm {
-       compat_key_t key;
-       __compat_uid32_t uid;
-       __compat_gid32_t gid;
-       __compat_uid32_t cuid;
-       __compat_gid32_t cgid;
-       unsigned short mode;
-       unsigned short __pad1;
-       unsigned short seq;
-       unsigned short __pad2;
-       compat_ulong_t unused1;
-       compat_ulong_t unused2;
-};
-
-struct compat_semid64_ds {
-       struct compat_ipc64_perm sem_perm;
-       compat_ulong_t sem_otime;
-       compat_ulong_t sem_otime_high;
-       compat_ulong_t sem_ctime;
-       compat_ulong_t sem_ctime_high;
-       compat_ulong_t sem_nsems;
-       compat_ulong_t __unused3;
-       compat_ulong_t __unused4;
-};
-
-struct compat_msqid64_ds {
-       struct compat_ipc64_perm msg_perm;
-       compat_ulong_t msg_stime;
-       compat_ulong_t msg_stime_high;
-       compat_ulong_t msg_rtime;
-       compat_ulong_t msg_rtime_high;
-       compat_ulong_t msg_ctime;
-       compat_ulong_t msg_ctime_high;
-       compat_ulong_t msg_cbytes;
-       compat_ulong_t msg_qnum;
-       compat_ulong_t msg_qbytes;
-       compat_pid_t   msg_lspid;
-       compat_pid_t   msg_lrpid;
-       compat_ulong_t __unused4;
-       compat_ulong_t __unused5;
-};
-
-struct compat_shmid64_ds {
-       struct compat_ipc64_perm shm_perm;
-       compat_size_t  shm_segsz;
-       compat_ulong_t shm_atime;
-       compat_ulong_t shm_atime_high;
-       compat_ulong_t shm_dtime;
-       compat_ulong_t shm_dtime_high;
-       compat_ulong_t shm_ctime;
-       compat_ulong_t shm_ctime_high;
-       compat_pid_t   shm_cpid;
-       compat_pid_t   shm_lpid;
-       compat_ulong_t shm_nattch;
-       compat_ulong_t __unused4;
-       compat_ulong_t __unused5;
-};
-
 #ifdef CONFIG_X86_X32_ABI
 #define COMPAT_USE_64BIT_TIME \
        (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
index 66d3e3b..ea34cc3 100644 (file)
@@ -54,7 +54,7 @@ extern const char * const x86_power_flags[32];
 extern const char * const x86_bug_flags[NBUGINTS*32];
 
 #define test_cpu_cap(c, bit)                                           \
-        test_bit(bit, (unsigned long *)((c)->x86_capability))
+        arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
 
 /*
  * There are 32 bits/features in each mask word.  The high bits
index bed74a0..71943dc 100644 (file)
@@ -270,6 +270,8 @@ static inline u32 efi64_convert_status(efi_status_t status)
        return (u32)(status | (u64)status >> 32);
 }
 
+#define __efi64_split(val)             (val) & U32_MAX, (u64)(val) >> 32
+
 #define __efi64_argmap_free_pages(addr, size)                          \
        ((addr), 0, (size))
 
@@ -317,6 +319,13 @@ static inline u32 efi64_convert_status(efi_status_t status)
 #define __efi64_argmap_hash_log_extend_event(prot, fl, addr, size, ev) \
        ((prot), (fl), 0ULL, (u64)(addr), 0ULL, (u64)(size), 0ULL, ev)
 
+/* DXE services */
+#define __efi64_argmap_get_memory_space_descriptor(phys, desc) \
+       (__efi64_split(phys), (desc))
+
+#define __efi64_argmap_set_memory_space_descriptor(phys, size, flags) \
+       (__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
+
 /*
  * The macros below handle the plumbing for the argument mapping. To add a
  * mapping for a specific EFI method, simply define a macro
index 155c991..eeed395 100644 (file)
@@ -42,9 +42,13 @@ extern int ex_get_fixup_type(unsigned long ip);
 extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
 
 #ifdef CONFIG_X86_MCE
-extern void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr);
+extern void __noreturn ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr);
 #else
-static inline void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) { }
+static inline void __noreturn ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr)
+{
+       for (;;)
+               cpu_relax();
+}
 #endif
 
 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_X86_64)
index 99a8820..b2486b2 100644 (file)
@@ -11,7 +11,7 @@
 
 extern void save_fpregs_to_fpstate(struct fpu *fpu);
 extern void fpu__drop(struct fpu *fpu);
-extern int  fpu_clone(struct task_struct *dst, unsigned long clone_flags);
+extern int  fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal);
 extern void fpu_flush_thread(void);
 
 /*
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
deleted file mode 100644 (file)
index 7c5cc66..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * livepatch.h - x86-specific Kernel Live Patching Core
- *
- * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
- * Copyright (C) 2014 SUSE
- */
-
-#ifndef _ASM_X86_LIVEPATCH_H
-#define _ASM_X86_LIVEPATCH_H
-
-#include <asm/setup.h>
-#include <linux/ftrace.h>
-
-static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
-{
-       ftrace_instruction_pointer_set(fregs, ip);
-}
-
-#endif /* _ASM_X86_LIVEPATCH_H */
index 91d0f93..356308c 100644 (file)
@@ -559,7 +559,7 @@ static __always_inline void native_swapgs(void)
 #endif
 }
 
-static inline unsigned long current_top_of_stack(void)
+static __always_inline unsigned long current_top_of_stack(void)
 {
        /*
         *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
@@ -569,7 +569,7 @@ static inline unsigned long current_top_of_stack(void)
        return this_cpu_read_stable(cpu_current_top_of_stack);
 }
 
-static inline bool on_thread_stack(void)
+static __always_inline bool on_thread_stack(void)
 {
        return (unsigned long)(current_top_of_stack() -
                               current_stack_pointer) < THREAD_SIZE;
index b5f0d2f..c08eb0f 100644 (file)
@@ -78,13 +78,13 @@ static inline void update_task_stack(struct task_struct *task)
 }
 
 static inline void kthread_frame_init(struct inactive_task_frame *frame,
-                                     unsigned long fun, unsigned long arg)
+                                     int (*fun)(void *), void *arg)
 {
-       frame->bx = fun;
+       frame->bx = (unsigned long)fun;
 #ifdef CONFIG_X86_32
-       frame->di = arg;
+       frame->di = (unsigned long)arg;
 #else
-       frame->r12 = arg;
+       frame->r12 = (unsigned long)arg;
 #endif
 }
 
index 80e9d52..761173c 100644 (file)
@@ -22,6 +22,7 @@
 #  include <asm/unistd_32_ia32.h>
 #  define __ARCH_WANT_SYS_TIME
 #  define __ARCH_WANT_SYS_UTIME
+#  define __ARCH_WANT_COMPAT_STAT
 #  define __ARCH_WANT_COMPAT_SYS_PREADV64
 #  define __ARCH_WANT_COMPAT_SYS_PWRITEV64
 #  define __ARCH_WANT_COMPAT_SYS_PREADV64V2
index 1fc67df..fa9ec20 100644 (file)
@@ -347,9 +347,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
 void make_lowmem_page_readonly(void *vaddr);
 void make_lowmem_page_readwrite(void *vaddr);
 
-#define xen_remap(cookie, size) ioremap((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
 static inline bool xen_arch_need_swiotlb(struct device *dev,
                                         phys_addr_t phys,
                                         dma_addr_t dev_addr)
index 2e91427..c296cb1 100644 (file)
@@ -2222,6 +2222,7 @@ void cpu_init_secondary(void)
 }
 #endif
 
+#ifdef CONFIG_MICROCODE_LATE_LOADING
 /*
  * The microcode loader calls this upon late microcode load to recheck features,
  * only when microcode has been updated. Caller holds microcode_mutex and CPU
@@ -2251,6 +2252,7 @@ void microcode_check(void)
        pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
        pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
 }
+#endif
 
 /*
  * Invoked from core CPU hotplug code after hotplug operations
index 239ff5f..ad57e0e 100644 (file)
@@ -373,101 +373,10 @@ static int apply_microcode_on_target(int cpu)
        return ret;
 }
 
-#ifdef CONFIG_MICROCODE_OLD_INTERFACE
-static int do_microcode_update(const void __user *buf, size_t size)
-{
-       int error = 0;
-       int cpu;
-
-       for_each_online_cpu(cpu) {
-               struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-               enum ucode_state ustate;
-
-               if (!uci->valid)
-                       continue;
-
-               ustate = microcode_ops->request_microcode_user(cpu, buf, size);
-               if (ustate == UCODE_ERROR) {
-                       error = -1;
-                       break;
-               } else if (ustate == UCODE_NEW) {
-                       apply_microcode_on_target(cpu);
-               }
-       }
-
-       return error;
-}
-
-static int microcode_open(struct inode *inode, struct file *file)
-{
-       return capable(CAP_SYS_RAWIO) ? stream_open(inode, file) : -EPERM;
-}
-
-static ssize_t microcode_write(struct file *file, const char __user *buf,
-                              size_t len, loff_t *ppos)
-{
-       ssize_t ret = -EINVAL;
-       unsigned long nr_pages = totalram_pages();
-
-       if ((len >> PAGE_SHIFT) > nr_pages) {
-               pr_err("too much data (max %ld pages)\n", nr_pages);
-               return ret;
-       }
-
-       cpus_read_lock();
-       mutex_lock(&microcode_mutex);
-
-       if (do_microcode_update(buf, len) == 0)
-               ret = (ssize_t)len;
-
-       if (ret > 0)
-               perf_check_microcode();
-
-       mutex_unlock(&microcode_mutex);
-       cpus_read_unlock();
-
-       return ret;
-}
-
-static const struct file_operations microcode_fops = {
-       .owner                  = THIS_MODULE,
-       .write                  = microcode_write,
-       .open                   = microcode_open,
-       .llseek         = no_llseek,
-};
-
-static struct miscdevice microcode_dev = {
-       .minor                  = MICROCODE_MINOR,
-       .name                   = "microcode",
-       .nodename               = "cpu/microcode",
-       .fops                   = &microcode_fops,
-};
-
-static int __init microcode_dev_init(void)
-{
-       int error;
-
-       error = misc_register(&microcode_dev);
-       if (error) {
-               pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
-               return error;
-       }
-
-       return 0;
-}
-
-static void __exit microcode_dev_exit(void)
-{
-       misc_deregister(&microcode_dev);
-}
-#else
-#define microcode_dev_init()   0
-#define microcode_dev_exit()   do { } while (0)
-#endif
-
 /* fake device for request_firmware */
 static struct platform_device  *microcode_pdev;
 
+#ifdef CONFIG_MICROCODE_LATE_LOADING
 /*
  * Late loading dance. Why the heavy-handed stomp_machine effort?
  *
@@ -584,6 +493,9 @@ static int microcode_reload_late(void)
 {
        int ret;
 
+       pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
+       pr_err("You should switch to early loading, if possible.\n");
+
        atomic_set(&late_cpus_in,  0);
        atomic_set(&late_cpus_out, 0);
 
@@ -632,9 +544,14 @@ put:
        if (ret == 0)
                ret = size;
 
+       add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+
        return ret;
 }
 
+static DEVICE_ATTR_WO(reload);
+#endif
+
 static ssize_t version_show(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
@@ -651,7 +568,6 @@ static ssize_t pf_show(struct device *dev,
        return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
 }
 
-static DEVICE_ATTR_WO(reload);
 static DEVICE_ATTR(version, 0444, version_show, NULL);
 static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);
 
@@ -804,7 +720,9 @@ static int mc_cpu_down_prep(unsigned int cpu)
 }
 
 static struct attribute *cpu_root_microcode_attrs[] = {
+#ifdef CONFIG_MICROCODE_LATE_LOADING
        &dev_attr_reload.attr,
+#endif
        NULL
 };
 
@@ -838,10 +756,7 @@ static int __init microcode_init(void)
 
        cpus_read_lock();
        mutex_lock(&microcode_mutex);
-
        error = subsys_interface_register(&mc_cpu_interface);
-       if (!error)
-               perf_check_microcode();
        mutex_unlock(&microcode_mutex);
        cpus_read_unlock();
 
@@ -856,10 +771,6 @@ static int __init microcode_init(void)
                goto out_driver;
        }
 
-       error = microcode_dev_init();
-       if (error)
-               goto out_ucode_group;
-
        register_syscore_ops(&mc_syscore_ops);
        cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
                                  mc_cpu_starting, NULL);
@@ -870,10 +781,6 @@ static int __init microcode_init(void)
 
        return 0;
 
- out_ucode_group:
-       sysfs_remove_group(&cpu_subsys.dev_root->kobj,
-                          &cpu_root_microcode_group);
-
  out_driver:
        cpus_read_lock();
        mutex_lock(&microcode_mutex);
index 3c24e61..19876eb 100644 (file)
@@ -152,7 +152,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
 
        page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
 
-       ret = sgx_encl_get_backing(encl, page_index, &b);
+       ret = sgx_encl_lookup_backing(encl, page_index, &b);
        if (ret)
                return ret;
 
@@ -718,7 +718,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
  *   0 on success,
  *   -errno otherwise.
  */
-int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+static int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
                         struct sgx_backing *backing)
 {
        pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
@@ -743,6 +743,107 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
        return 0;
 }
 
+/*
+ * When called from ksgxd, returns the mem_cgroup of a struct mm stored
+ * in the enclave's mm_list. When not called from ksgxd, just returns
+ * the mem_cgroup of the current task.
+ */
+static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl)
+{
+       struct mem_cgroup *memcg = NULL;
+       struct sgx_encl_mm *encl_mm;
+       int idx;
+
+       /*
+        * If called from normal task context, return the mem_cgroup
+        * of the current task's mm. The remainder of the handling is for
+        * ksgxd.
+        */
+       if (!current_is_ksgxd())
+               return get_mem_cgroup_from_mm(current->mm);
+
+       /*
+        * Search the enclave's mm_list to find an mm associated with
+        * this enclave to charge the allocation to.
+        */
+       idx = srcu_read_lock(&encl->srcu);
+
+       list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+               if (!mmget_not_zero(encl_mm->mm))
+                       continue;
+
+               memcg = get_mem_cgroup_from_mm(encl_mm->mm);
+
+               mmput_async(encl_mm->mm);
+
+               break;
+       }
+
+       srcu_read_unlock(&encl->srcu, idx);
+
+       /*
+        * In the rare case that there isn't an mm associated with
+        * the enclave, set memcg to the current active mem_cgroup.
+        * This will be the root mem_cgroup if there is no active
+        * mem_cgroup.
+        */
+       if (!memcg)
+               return get_mem_cgroup_from_mm(NULL);
+
+       return memcg;
+}
+
+/**
+ * sgx_encl_alloc_backing() - allocate a new backing storage page
+ * @encl:      an enclave pointer
+ * @page_index:        enclave page index
+ * @backing:   data for accessing backing storage for the page
+ *
+ * When called from ksgxd, sets the active memcg from one of the
+ * mms in the enclave's mm_list prior to any backing page allocation,
+ * in order to ensure that shmem page allocations are charged to the
+ * enclave.
+ *
+ * Return:
+ *   0 on success,
+ *   -errno otherwise.
+ */
+int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
+                          struct sgx_backing *backing)
+{
+       struct mem_cgroup *encl_memcg = sgx_encl_get_mem_cgroup(encl);
+       struct mem_cgroup *memcg = set_active_memcg(encl_memcg);
+       int ret;
+
+       ret = sgx_encl_get_backing(encl, page_index, backing);
+
+       set_active_memcg(memcg);
+       mem_cgroup_put(encl_memcg);
+
+       return ret;
+}
+
+/**
+ * sgx_encl_lookup_backing() - retrieve an existing backing storage page
+ * @encl:      an enclave pointer
+ * @page_index:        enclave page index
+ * @backing:   data for accessing backing storage for the page
+ *
+ * Retrieve a backing page for loading data back into an EPC page with ELDU.
+ * It is the caller's responsibility to ensure that it is appropriate to use
+ * sgx_encl_lookup_backing() rather than sgx_encl_alloc_backing(). If lookup is
+ * not used correctly, this will cause an allocation which is not accounted for.
+ *
+ * Return:
+ *   0 on success,
+ *   -errno otherwise.
+ */
+int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
+                          struct sgx_backing *backing)
+{
+       return sgx_encl_get_backing(encl, page_index, backing);
+}
+
 /**
  * sgx_encl_put_backing() - Unpin the backing storage
  * @backing:   data for accessing backing storage for the page
index d44e737..332ef35 100644 (file)
@@ -103,10 +103,13 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
                     unsigned long end, unsigned long vm_flags);
 
+bool current_is_ksgxd(void);
 void sgx_encl_release(struct kref *ref);
 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
-int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
-                        struct sgx_backing *backing);
+int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
+                           struct sgx_backing *backing);
+int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
+                          struct sgx_backing *backing);
 void sgx_encl_put_backing(struct sgx_backing *backing);
 int sgx_encl_test_and_clear_young(struct mm_struct *mm,
                                  struct sgx_encl_page *page);
index ab4ec54..a78652d 100644 (file)
@@ -313,7 +313,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
        sgx_encl_put_backing(backing);
 
        if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
-               ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
+               ret = sgx_encl_alloc_backing(encl, PFN_DOWN(encl->size),
                                           &secs_backing);
                if (ret)
                        goto out;
@@ -384,7 +384,7 @@ static void sgx_reclaim_pages(void)
                page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
 
                mutex_lock(&encl_page->encl->lock);
-               ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]);
+               ret = sgx_encl_alloc_backing(encl_page->encl, page_index, &backing[i]);
                if (ret) {
                        mutex_unlock(&encl_page->encl->lock);
                        goto skip;
@@ -475,6 +475,11 @@ static bool __init sgx_page_reclaimer_init(void)
        return true;
 }
 
+bool current_is_ksgxd(void)
+{
+       return current == ksgxd_tsk;
+}
+
 static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
 {
        struct sgx_numa_node *node = &sgx_numa_nodes[nid];
index 0fdc807..0531d6a 100644 (file)
@@ -556,7 +556,7 @@ static inline void fpu_inherit_perms(struct fpu *dst_fpu)
 }
 
 /* Clone current's FPU state on fork */
-int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
+int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
 {
        struct fpu *src_fpu = &current->thread.fpu;
        struct fpu *dst_fpu = &dst->thread.fpu;
@@ -579,7 +579,7 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
         * No FPU state inheritance for kernel threads and IO
         * worker threads.
         */
-       if (dst->flags & (PF_KTHREAD | PF_IO_WORKER)) {
+       if (minimal) {
                /* Clear out the minimal state */
                memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
                       init_fpstate_copy_size());
index 566bb8e..0611fd8 100644 (file)
@@ -376,9 +376,6 @@ void machine_kexec(struct kimage *image)
 #ifdef CONFIG_KEXEC_FILE
 void *arch_kexec_kernel_image_load(struct kimage *image)
 {
-       vfree(image->elf_headers);
-       image->elf_headers = NULL;
-
        if (!image->fops || !image->fops->load)
                return ERR_PTR(-ENOEXEC);
 
@@ -514,6 +511,15 @@ overflow:
               (int)ELF64_R_TYPE(rel[i].r_info), value);
        return -ENOEXEC;
 }
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+       vfree(image->elf_headers);
+       image->elf_headers = NULL;
+       image->elf_headers_sz = 0;
+
+       return kexec_image_post_load_cleanup_default(image);
+}
 #endif /* CONFIG_KEXEC_FILE */
 
 static int
index 58fb48d..9b2772b 100644 (file)
@@ -131,9 +131,11 @@ static int set_new_tls(struct task_struct *p, unsigned long tls)
                return do_set_thread_area_64(p, ARCH_SET_FS, tls);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long sp = args->stack;
+       unsigned long tls = args->tls;
        struct inactive_task_frame *frame;
        struct fork_frame *fork_frame;
        struct pt_regs *childregs;
@@ -171,13 +173,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
        frame->flags = X86_EFLAGS_FIXED;
 #endif
 
-       fpu_clone(p, clone_flags);
+       fpu_clone(p, clone_flags, args->fn);
 
        /* Kernel thread ? */
        if (unlikely(p->flags & PF_KTHREAD)) {
                p->thread.pkru = pkru_get_init_value();
                memset(childregs, 0, sizeof(struct pt_regs));
-               kthread_frame_init(frame, sp, arg);
+               kthread_frame_init(frame, args->fn, args->fn_arg);
                return 0;
        }
 
@@ -193,10 +195,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
        if (sp)
                childregs->sp = sp;
 
-       if (unlikely(p->flags & PF_IO_WORKER)) {
+       if (unlikely(args->fn)) {
                /*
-                * An IO thread is a user space thread, but it doesn't
-                * return to ret_after_fork().
+                * A user space thread, but it doesn't return to
+                * ret_after_fork().
                 *
                 * In order to indicate that to tools like gdb,
                 * we reset the stack and instruction pointers.
@@ -206,7 +208,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
                 */
                childregs->sp = 0;
                childregs->ip = 0;
-               kthread_frame_init(frame, sp, arg);
+               kthread_frame_init(frame, args->fn, args->fn_arg);
                return 0;
        }
 
index fa700b4..c3636ea 100644 (file)
@@ -739,10 +739,10 @@ static void native_machine_halt(void)
 
 static void native_machine_power_off(void)
 {
-       if (pm_power_off) {
+       if (kernel_can_power_off()) {
                if (!reboot_force)
                        machine_shutdown();
-               pm_power_off();
+               do_kernel_power_off();
        }
        /* A fallback in case there is no PM info available */
        tboot_shutdown(TB_SHUTDOWN_HALT);
index 249981b..3ebb853 100644 (file)
@@ -903,18 +903,18 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_CMDLINE_BOOL
 #ifdef CONFIG_CMDLINE_OVERRIDE
-       strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+       strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 #else
        if (builtin_cmdline[0]) {
                /* append boot loader cmdline to builtin */
                strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
                strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
-               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+               strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
        }
 #endif
 #endif
 
-       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+       strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
        *cmdline_p = command_line;
 
        /*
index 0f3c307..8e2b255 100644 (file)
@@ -180,8 +180,7 @@ void set_task_blockstep(struct task_struct *task, bool on)
         *
         * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
         * task is current or it can't be running, otherwise we can race
-        * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
-        * PTRACE_KILL is not safe.
+        * with __switch_to_xtra(). We rely on ptrace_freeze_traced().
         */
        local_irq_disable();
        debugctl = get_debugctlmsr();
index a0702b6..e2e95a6 100644 (file)
@@ -90,7 +90,7 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
 {
        struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
        struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
-       int auto_eoi_old, auto_eoi_new;
+       bool auto_eoi_old, auto_eoi_new;
 
        if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
                return;
@@ -100,16 +100,16 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
        else
                __clear_bit(vector, synic->vec_bitmap);
 
-       auto_eoi_old = bitmap_weight(synic->auto_eoi_bitmap, 256);
+       auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
 
        if (synic_has_vector_auto_eoi(synic, vector))
                __set_bit(vector, synic->auto_eoi_bitmap);
        else
                __clear_bit(vector, synic->auto_eoi_bitmap);
 
-       auto_eoi_new = bitmap_weight(synic->auto_eoi_bitmap, 256);
+       auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
 
-       if (!!auto_eoi_old == !!auto_eoi_new)
+       if (auto_eoi_old == auto_eoi_new)
                return;
 
        if (!enable_apicv)
@@ -1855,7 +1855,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
                all_cpus = flush_ex.hv_vp_set.format !=
                        HV_GENERIC_SET_SPARSE_4K;
 
-               if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
+               if (hc->var_cnt != hweight64(valid_bank_mask))
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
 
                if (all_cpus)
@@ -1956,7 +1956,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
                valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
                all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
 
-               if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
+               if (hc->var_cnt != hweight64(valid_bank_mask))
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
 
                if (all_cpus)
index f5aeade..a07e8cd 100644 (file)
@@ -6219,7 +6219,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
        int size = PAGE_SIZE << L1D_CACHE_ORDER;
 
        /*
-        * This code is only executed when the the flush mode is 'cond' or
+        * This code is only executed when the flush mode is 'cond' or
         * 'always'
         */
        if (static_branch_likely(&vmx_l1d_flush_cond)) {
index b81ef4f..e9473c7 100644 (file)
@@ -11937,7 +11937,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        if (current->mm == kvm->mm) {
                /*
                 * Free memory regions allocated on behalf of userspace,
-                * unless the the memory map has changed due to process exit
+                * unless the memory map has changed due to process exit
                 * or fd copying.
                 */
                mutex_lock(&kvm->slots_lock);
index cb290a2..39c5246 100644 (file)
@@ -1240,8 +1240,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
 void __ref vmemmap_free(unsigned long start, unsigned long end,
                struct vmem_altmap *altmap)
 {
-       VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
-       VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+       VM_BUG_ON(!PAGE_ALIGNED(start));
+       VM_BUG_ON(!PAGE_ALIGNED(end));
 
        remove_pagetable(start, end, false, altmap);
 }
@@ -1605,8 +1605,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 {
        int err;
 
-       VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
-       VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+       VM_BUG_ON(!PAGE_ALIGNED(start));
+       VM_BUG_ON(!PAGE_ALIGNED(end));
 
        if (end - start < PAGES_PER_SECTION * sizeof(struct page))
                err = vmemmap_populate_basepages(start, end, node, NULL);
index 854dd81..9ffe2ba 100644 (file)
@@ -8,7 +8,7 @@
  * The below thunking functions are only used after ExitBootServices()
  * has been called. This simplifies things considerably as compared with
  * the early EFI thunking because we can leave all the kernel state
- * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
+ * intact (GDT, IDT, etc) and simply invoke the 32-bit EFI runtime
  * services from __KERNEL32_CS. This means we can continue to service
  * interrupts across an EFI mixed mode call.
  *
index 3ee234b..255a44d 100644 (file)
@@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
 {
        long res;
        void *stub_addr;
+
+       BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
+
        res = syscall_stub_data(mm_idp, (unsigned long *)desc,
-                               (sizeof(*desc) + sizeof(long) - 1) &
-                                   ~(sizeof(long) - 1),
+                               sizeof(*desc) / sizeof(long),
                                addr, &stub_addr);
        if (!res) {
                unsigned long args[] = { func,
index ca85d14..f33a442 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/pci.h>
 #include <linux/gfp.h>
 #include <linux/edd.h>
+#include <linux/reboot.h>
 
 #include <xen/xen.h>
 #include <xen/events.h>
@@ -1069,8 +1070,7 @@ static void xen_machine_halt(void)
 
 static void xen_machine_power_off(void)
 {
-       if (pm_power_off)
-               pm_power_off();
+       do_kernel_power_off();
        xen_reboot(SHUTDOWN_poweroff);
 }
 
index 7e38292..68e0e2f 100644 (file)
@@ -263,10 +263,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  * involved.  Much simpler to just not copy those live frames across.
  */
 
-int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
-               unsigned long thread_fn_arg, struct task_struct *p,
-               unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
+       unsigned long clone_flags = args->flags;
+       unsigned long usp_thread_fn = args->stack;
+       unsigned long tls = args->tls;
        struct pt_regs *childregs = task_pt_regs(p);
 
 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
@@ -286,7 +287,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
 #error Unsupported Xtensa ABI
 #endif
 
-       if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (!args->fn) {
                struct pt_regs *regs = current_pt_regs();
                unsigned long usp = usp_thread_fn ?
                        usp_thread_fn : regs->areg[1];
@@ -338,15 +339,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
                 * Window underflow will load registers from the
                 * spill slots on the stack on return from _switch_to.
                 */
-               SPILL_SLOT(childregs, 2) = usp_thread_fn;
-               SPILL_SLOT(childregs, 3) = thread_fn_arg;
+               SPILL_SLOT(childregs, 2) = (unsigned long)args->fn;
+               SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg;
 #elif defined(__XTENSA_CALL0_ABI__)
                /*
                 * a12 = thread_fn, a13 = thread_fn arg.
                 * _switch_to epilogue will load registers from the stack.
                 */
-               ((unsigned long *)p->thread.sp)[0] = usp_thread_fn;
-               ((unsigned long *)p->thread.sp)[1] = thread_fn_arg;
+               ((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
+               ((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
 #else
 #error Unsupported Xtensa ABI
 #endif
index 22cdaa6..f294771 100644 (file)
@@ -224,12 +224,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 
 void user_enable_single_step(struct task_struct *child)
 {
-       child->ptrace |= PT_SINGLESTEP;
+       set_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
 
 void user_disable_single_step(struct task_struct *child)
 {
-       child->ptrace &= ~PT_SINGLESTEP;
+       clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
 
 /*
index c9ffd42..876d5df 100644 (file)
@@ -472,7 +472,7 @@ static void do_signal(struct pt_regs *regs)
                /* Set up the stack frame */
                ret = setup_frame(&ksig, sigmask_to_save(), regs);
                signal_setup_done(ret, &ksig, 0);
-               if (current->ptrace & PT_SINGLESTEP)
+               if (test_thread_flag(TIF_SINGLESTEP))
                        task_pt_regs(current)->icountlevel = 1;
 
                return;
@@ -498,7 +498,7 @@ static void do_signal(struct pt_regs *regs)
        /* If there's no signal to deliver, we just restore the saved mask.  */
        restore_saved_sigmask();
 
-       if (current->ptrace & PT_SINGLESTEP)
+       if (test_thread_flag(TIF_SINGLESTEP))
                task_pt_regs(current)->icountlevel = 1;
        return;
 }
index a3893d8..f92d022 100644 (file)
@@ -722,6 +722,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs)
                bio_alloc_cache_prune(cache, -1U);
        }
        free_percpu(bs->cache);
+       bs->cache = NULL;
 }
 
 /**
@@ -1366,10 +1367,12 @@ void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
                struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
                struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
                unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
-               void *src_buf;
+               void *src_buf = bvec_kmap_local(&src_bv);
+               void *dst_buf = bvec_kmap_local(&dst_bv);
 
-               src_buf = bvec_kmap_local(&src_bv);
-               memcpy_to_bvec(&dst_bv, src_buf);
+               memcpy(dst_buf, src_buf, bytes);
+
+               kunmap_local(dst_buf);
                kunmap_local(src_buf);
 
                bio_advance_iter_single(src, src_iter, bytes);
index 40161a3..764e740 100644 (file)
@@ -1974,12 +1974,8 @@ EXPORT_SYMBOL_GPL(bio_associate_blkg);
  */
 void bio_clone_blkg_association(struct bio *dst, struct bio *src)
 {
-       if (src->bi_blkg) {
-               if (dst->bi_blkg)
-                       blkg_put(dst->bi_blkg);
-               blkg_get(src->bi_blkg);
-               dst->bi_blkg = src->bi_blkg;
-       }
+       if (src->bi_blkg)
+               bio_associate_blkg_from_css(dst, bio_blkcg_css(src));
 }
 EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
 
index 80fa73c..06ff5bb 100644 (file)
@@ -939,7 +939,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
 
        blk_flush_plug(current->plug, false);
 
-       if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
+       if (bio_queue_enter(bio))
                return 0;
        if (queue_is_mq(q)) {
                ret = blk_mq_poll(q, cookie, iob, flags);
index 18c68d8..56ed48d 100644 (file)
@@ -54,13 +54,8 @@ static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj,
                container_of(attr, struct blk_ia_range_sysfs_entry, attr);
        struct blk_independent_access_range *iar =
                container_of(kobj, struct blk_independent_access_range, kobj);
-       ssize_t ret;
 
-       mutex_lock(&iar->queue->sysfs_lock);
-       ret = entry->show(iar, buf);
-       mutex_unlock(&iar->queue->sysfs_lock);
-
-       return ret;
+       return entry->show(iar, buf);
 }
 
 static const struct sysfs_ops blk_ia_range_sysfs_ops = {
index 5b676c7..9568bf8 100644 (file)
@@ -87,7 +87,17 @@ struct iolatency_grp;
 struct blk_iolatency {
        struct rq_qos rqos;
        struct timer_list timer;
-       atomic_t enabled;
+
+       /*
+        * ->enabled is the master enable switch gating the throttling logic and
+        * inflight tracking. The number of cgroups which have iolat enabled is
+        * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
+        * from ->enable_work with the request_queue frozen. For details, See
+        * blkiolatency_enable_work_fn().
+        */
+       bool enabled;
+       atomic_t enable_cnt;
+       struct work_struct enable_work;
 };
 
 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
@@ -95,11 +105,6 @@ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
        return container_of(rqos, struct blk_iolatency, rqos);
 }
 
-static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
-{
-       return atomic_read(&blkiolat->enabled) > 0;
-}
-
 struct child_latency_info {
        spinlock_t lock;
 
@@ -464,7 +469,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
        struct blkcg_gq *blkg = bio->bi_blkg;
        bool issue_as_root = bio_issue_as_root_blkg(bio);
 
-       if (!blk_iolatency_enabled(blkiolat))
+       if (!blkiolat->enabled)
                return;
 
        while (blkg && blkg->parent) {
@@ -594,7 +599,6 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
        u64 window_start;
        u64 now;
        bool issue_as_root = bio_issue_as_root_blkg(bio);
-       bool enabled = false;
        int inflight = 0;
 
        blkg = bio->bi_blkg;
@@ -605,8 +609,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
        if (!iolat)
                return;
 
-       enabled = blk_iolatency_enabled(iolat->blkiolat);
-       if (!enabled)
+       if (!iolat->blkiolat->enabled)
                return;
 
        now = ktime_to_ns(ktime_get());
@@ -645,6 +648,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 
        del_timer_sync(&blkiolat->timer);
+       flush_work(&blkiolat->enable_work);
        blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
        kfree(blkiolat);
 }
@@ -716,6 +720,44 @@ next:
        rcu_read_unlock();
 }
 
+/**
+ * blkiolatency_enable_work_fn - Enable or disable iolatency on the device
+ * @work: enable_work of the blk_iolatency of interest
+ *
+ * iolatency needs to keep track of the number of in-flight IOs per cgroup. This
+ * is relatively expensive as it involves walking up the hierarchy twice for
+ * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
+ * want to disable the in-flight tracking.
+ *
+ * We have to make sure that the counting is balanced - we don't want to leak
+ * the in-flight counts by disabling accounting in the completion path while IOs
+ * are in flight. This is achieved by ensuring that no IO is in flight by
+ * freezing the queue while flipping ->enabled. As this requires a sleepable
+ * context, ->enabled flipping is punted to this work function.
+ */
+static void blkiolatency_enable_work_fn(struct work_struct *work)
+{
+       struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
+                                                     enable_work);
+       bool enabled;
+
+       /*
+        * There can only be one instance of this function running for @blkiolat
+        * and it's guaranteed to be executed at least once after the latest
+        * ->enabled_cnt modification. Acting on the latest ->enable_cnt is
+        * sufficient.
+        *
+        * Also, we know @blkiolat is safe to access as ->enable_work is flushed
+        * in blkcg_iolatency_exit().
+        */
+       enabled = atomic_read(&blkiolat->enable_cnt);
+       if (enabled != blkiolat->enabled) {
+               blk_mq_freeze_queue(blkiolat->rqos.q);
+               blkiolat->enabled = enabled;
+               blk_mq_unfreeze_queue(blkiolat->rqos.q);
+       }
+}
+
 int blk_iolatency_init(struct request_queue *q)
 {
        struct blk_iolatency *blkiolat;
@@ -741,17 +783,15 @@ int blk_iolatency_init(struct request_queue *q)
        }
 
        timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
+       INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
 
        return 0;
 }
 
-/*
- * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
- * return 0.
- */
-static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
 {
        struct iolatency_grp *iolat = blkg_to_lat(blkg);
+       struct blk_iolatency *blkiolat = iolat->blkiolat;
        u64 oldval = iolat->min_lat_nsec;
 
        iolat->min_lat_nsec = val;
@@ -759,13 +799,15 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
        iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
                                    BLKIOLATENCY_MAX_WIN_SIZE);
 
-       if (!oldval && val)
-               return 1;
+       if (!oldval && val) {
+               if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
+                       schedule_work(&blkiolat->enable_work);
+       }
        if (oldval && !val) {
                blkcg_clear_delay(blkg);
-               return -1;
+               if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
+                       schedule_work(&blkiolat->enable_work);
        }
-       return 0;
 }
 
 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -797,7 +839,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
        u64 lat_val = 0;
        u64 oldval;
        int ret;
-       int enable = 0;
 
        ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
        if (ret)
@@ -832,41 +873,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
        blkg = ctx.blkg;
        oldval = iolat->min_lat_nsec;
 
-       enable = iolatency_set_min_lat_nsec(blkg, lat_val);
-       if (enable) {
-               if (!blk_get_queue(blkg->q)) {
-                       ret = -ENODEV;
-                       goto out;
-               }
-
-               blkg_get(blkg);
-       }
-
-       if (oldval != iolat->min_lat_nsec) {
+       iolatency_set_min_lat_nsec(blkg, lat_val);
+       if (oldval != iolat->min_lat_nsec)
                iolatency_clear_scaling(blkg);
-       }
-
        ret = 0;
 out:
        blkg_conf_finish(&ctx);
-       if (ret == 0 && enable) {
-               struct iolatency_grp *tmp = blkg_to_lat(blkg);
-               struct blk_iolatency *blkiolat = tmp->blkiolat;
-
-               blk_mq_freeze_queue(blkg->q);
-
-               if (enable == 1)
-                       atomic_inc(&blkiolat->enabled);
-               else if (enable == -1)
-                       atomic_dec(&blkiolat->enabled);
-               else
-                       WARN_ON_ONCE(1);
-
-               blk_mq_unfreeze_queue(blkg->q);
-
-               blkg_put(blkg);
-               blk_put_queue(blkg->q);
-       }
        return ret ?: nbytes;
 }
 
@@ -1005,14 +1017,8 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
 {
        struct iolatency_grp *iolat = pd_to_lat(pd);
        struct blkcg_gq *blkg = lat_to_blkg(iolat);
-       struct blk_iolatency *blkiolat = iolat->blkiolat;
-       int ret;
 
-       ret = iolatency_set_min_lat_nsec(blkg, 0);
-       if (ret == 1)
-               atomic_inc(&blkiolat->enabled);
-       if (ret == -1)
-               atomic_dec(&blkiolat->enabled);
+       iolatency_set_min_lat_nsec(blkg, 0);
        iolatency_clear_scaling(blkg);
 }
 
index 68ac23d..2dcd738 100644 (file)
@@ -228,7 +228,6 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
                BUG_ON(real_tag >= tags->nr_tags);
                sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
        } else {
-               BUG_ON(tag >= tags->nr_reserved_tags);
                sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
        }
 }
index ae116b7..e9bf950 100644 (file)
@@ -133,7 +133,8 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv,
 {
        struct mq_inflight *mi = priv;
 
-       if ((!mi->part->bd_partno || rq->part == mi->part) &&
+       if (rq->part && blk_do_io_stat(rq) &&
+           (!mi->part->bd_partno || rq->part == mi->part) &&
            blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
                mi->inflight[rq_data_dir(rq)]++;
 
@@ -1151,24 +1152,6 @@ void blk_mq_start_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_start_request);
 
-/**
- * blk_end_sync_rq - executes a completion event on a request
- * @rq: request to complete
- * @error: end I/O status of the request
- */
-static void blk_end_sync_rq(struct request *rq, blk_status_t error)
-{
-       struct completion *waiting = rq->end_io_data;
-
-       rq->end_io_data = (void *)(uintptr_t)error;
-
-       /*
-        * complete last, if this is a stack request the process (and thus
-        * the rq pointer) could be invalid right after this complete()
-        */
-       complete(waiting);
-}
-
 /*
  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
  * queues. This is important for md arrays to benefit from merging
@@ -1203,33 +1186,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
        plug->rq_count++;
 }
 
-static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
-               rq_end_io_fn *done, bool use_plug)
-{
-       WARN_ON(irqs_disabled());
-       WARN_ON(!blk_rq_is_passthrough(rq));
-
-       rq->end_io = done;
-
-       blk_account_io_start(rq);
-
-       if (use_plug && current->plug) {
-               blk_add_rq_to_plug(current->plug, rq);
-               return;
-       }
-       /*
-        * don't check dying flag for MQ because the request won't
-        * be reused after dying flag is set
-        */
-       blk_mq_sched_insert_request(rq, at_head, true, false);
-}
-
-
 /**
  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
  * @rq:                request to insert
  * @at_head:    insert request at head or tail of queue
- * @done:      I/O completion handler
  *
  * Description:
  *    Insert a fully prepared request at the back of the I/O scheduler queue
@@ -1238,13 +1198,32 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
  * Note:
  *    This function will invoke @done directly if the queue is dead.
  */
-void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
+void blk_execute_rq_nowait(struct request *rq, bool at_head)
 {
-       __blk_execute_rq_nowait(rq, at_head, done, true);
+       WARN_ON(irqs_disabled());
+       WARN_ON(!blk_rq_is_passthrough(rq));
 
+       blk_account_io_start(rq);
+       if (current->plug)
+               blk_add_rq_to_plug(current->plug, rq);
+       else
+               blk_mq_sched_insert_request(rq, at_head, true, false);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
+struct blk_rq_wait {
+       struct completion done;
+       blk_status_t ret;
+};
+
+static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+{
+       struct blk_rq_wait *wait = rq->end_io_data;
+
+       wait->ret = ret;
+       complete(&wait->done);
+}
+
 static bool blk_rq_is_poll(struct request *rq)
 {
        if (!rq->mq_hctx)
@@ -1276,30 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
  */
 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
 {
-       DECLARE_COMPLETION_ONSTACK(wait);
-       unsigned long hang_check;
+       struct blk_rq_wait wait = {
+               .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+       };
+
+       WARN_ON(irqs_disabled());
+       WARN_ON(!blk_rq_is_passthrough(rq));
 
-       /*
-        * iopoll requires request to be submitted to driver, so can't
-        * use plug
-        */
        rq->end_io_data = &wait;
-       __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
-                       !blk_rq_is_poll(rq));
-
-       /* Prevent hang_check timer from firing at us during very long I/O */
-       hang_check = sysctl_hung_task_timeout_secs;
-
-       if (blk_rq_is_poll(rq))
-               blk_rq_poll_completion(rq, &wait);
-       else if (hang_check)
-               while (!wait_for_completion_io_timeout(&wait,
-                               hang_check * (HZ/2)))
-                       ;
-       else
-               wait_for_completion_io(&wait);
+       rq->end_io = blk_end_sync_rq;
+
+       blk_account_io_start(rq);
+       blk_mq_sched_insert_request(rq, at_head, true, false);
+
+       if (blk_rq_is_poll(rq)) {
+               blk_rq_poll_completion(rq, &wait.done);
+       } else {
+               /*
+                * Prevent hang_check timer from firing at us during very long
+                * I/O
+                */
+               unsigned long hang_check = sysctl_hung_task_timeout_secs;
+
+               if (hang_check)
+                       while (!wait_for_completion_io_timeout(&wait.done,
+                                       hang_check * (HZ/2)))
+                               ;
+               else
+                       wait_for_completion_io(&wait.done);
+       }
 
-       return (blk_status_t)(uintptr_t)rq->end_io_data;
+       return wait.ret;
 }
 EXPORT_SYMBOL(blk_execute_rq);
 
@@ -2174,8 +2160,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
  */
 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
 {
-       struct blk_mq_hw_ctx *hctx;
-
+       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
        /*
         * If the IO scheduler does not respect hardware queues when
         * dispatching, we just don't bother with multiple HW queues and
@@ -2183,8 +2168,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
         * just causes lock contention inside the scheduler and pointless cache
         * bouncing.
         */
-       hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
-                                    raw_smp_processor_id());
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
+
        if (!blk_mq_hctx_stopped(hctx))
                return hctx;
        return NULL;
index 36532b9..27205ae 100644 (file)
@@ -385,6 +385,8 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
 
        if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
                return -EINVAL;
+       if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
+               return -EINVAL;
        if (disk->open_partitions)
                return -EBUSY;
 
index 8d6cd5d..b6a172d 100644 (file)
@@ -107,6 +107,8 @@ source "drivers/usb/Kconfig"
 
 source "drivers/mmc/Kconfig"
 
+source "drivers/ufs/Kconfig"
+
 source "drivers/memstick/Kconfig"
 
 source "drivers/leds/Kconfig"
@@ -225,8 +227,6 @@ source "drivers/mux/Kconfig"
 
 source "drivers/opp/Kconfig"
 
-source "drivers/visorbus/Kconfig"
-
 source "drivers/siox/Kconfig"
 
 source "drivers/slimbus/Kconfig"
@@ -239,4 +239,6 @@ source "drivers/most/Kconfig"
 
 source "drivers/peci/Kconfig"
 
+source "drivers/hte/Kconfig"
+
 endmenu
index f735c49..9a30842 100644 (file)
@@ -128,6 +128,7 @@ obj-$(CONFIG_PM_OPP)                += opp/
 obj-$(CONFIG_CPU_FREQ)         += cpufreq/
 obj-$(CONFIG_CPU_IDLE)         += cpuidle/
 obj-y                          += mmc/
+obj-y                          += ufs/
 obj-$(CONFIG_MEMSTICK)         += memstick/
 obj-$(CONFIG_NEW_LEDS)         += leds/
 obj-$(CONFIG_INFINIBAND)       += infiniband/
@@ -181,10 +182,10 @@ obj-$(CONFIG_FPGA)                += fpga/
 obj-$(CONFIG_FSI)              += fsi/
 obj-$(CONFIG_TEE)              += tee/
 obj-$(CONFIG_MULTIPLEXER)      += mux/
-obj-$(CONFIG_UNISYS_VISORBUS)  += visorbus/
 obj-$(CONFIG_SIOX)             += siox/
 obj-$(CONFIG_GNSS)             += gnss/
 obj-$(CONFIG_INTERCONNECT)     += interconnect/
 obj-$(CONFIG_COUNTER)          += counter/
 obj-$(CONFIG_MOST)             += most/
 obj-$(CONFIG_PECI)             += peci/
+obj-$(CONFIG_HTE)              += hte/
index cd02996..868c47b 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0+
 /* fakekey.c
- * Functions for simulating keypresses.
+ * Functions for simulating key presses.
  *
  * Copyright (C) 2010 the Speakup Team
  */
@@ -78,7 +78,7 @@ void speakup_fake_down_arrow(void)
 }
 
 /*
- * Are we handling a simulated keypress on the current CPU?
+ * Are we handling a simulated key press on the current CPU?
  * Returns a boolean.
  */
 bool speakup_fake_key_pressed(void)
index 53580bd..3418ea3 100644 (file)
@@ -59,7 +59,7 @@ const struct old_serial_port *spk_serial_init(int index)
        }
        ser = rs_table + index;
 
-       /*      Divisor, bytesize and parity */
+       /*      Divisor, byte size and parity */
        quot = ser->baud_base / baud;
        cval = cflag & (CSIZE | CSTOPB);
 #if defined(__powerpc__) || defined(__alpha__)
index 023172c..a55b607 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
  * package and is not a general device driver.
  * This driver is for the Aicom Acent PC internal synthesizer.
  */
index 3a863dc..2697c51 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
  * package and is not a general device driver.
  */
 
index cd63581..c84a7e0 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
  * package and is not a general device driver.
  */
 #include <linux/jiffies.h>
index a0c3b8a..4d16d60 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * s not a general device driver.
  */
 #include "spk_priv.h"
index 76dfa3f..b8103eb 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
  * package and is not a general device driver.
  */
 #include "spk_priv.h"
index 092cfd0..eaebf62 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * s not a general device driver.
  */
 #include <linux/jiffies.h>
index 78ca498..2a7e8d7 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * s not a general device driver.
  */
 #include <linux/unistd.h>
index a9dd5c4..6f01e01 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * package it's not a general device driver.
  * This driver is for the RC Systems DoubleTalk PC internal synthesizer.
  */
index 63c2f29..34f11cd 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright (C) 2003 David Borowski.
  * Copyright (C) 2007 Samuel Thibault.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * s not a general device driver.
  */
 #include "spk_priv.h"
index 1618be8..f61b62f 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * package it's not a general device driver.
  * This driver is for the Keynote Gold internal synthesizer.
  */
index 3e59b38..f885cfa 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * s not a general device driver.
  */
 #include "speakup.h"
index 19824e7..99f1d4a 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2003  Kirk Reiser.
  *
- * this code is specificly written as a driver for the speakup screenreview
+ * this code is specifically written as a driver for the speakup screenreview
  * package and is not a general device driver.
  */
 
@@ -397,6 +397,7 @@ static int softsynth_probe(struct spk_synth *synth)
        synthu_device.name = "softsynthu";
        synthu_device.fops = &softsynthu_fops;
        if (misc_register(&synthu_device)) {
+               misc_deregister(&synth_device);
                pr_warn("Couldn't initialize miscdevice /dev/softsynthu.\n");
                return -ENODEV;
        }
index bd3d8dc..5e3bb3a 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * s not a general device driver.
  */
 #include "spk_priv.h"
index a7326f2..9e78134 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
  *
- * specificly written as a driver for the speakup screenreview
+ * specifically written as a driver for the speakup screenreview
  * s not a general device driver.
  */
 #include "spk_priv.h"
index db487ff..c29e41b 100644 (file)
@@ -32,7 +32,6 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
-
 static int acpi_ac_add(struct acpi_device *device);
 static int acpi_ac_remove(struct acpi_device *device);
 static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -125,6 +124,7 @@ static int get_ac_property(struct power_supply *psy,
        default:
                return -EINVAL;
        }
+
        return 0;
 }
 
@@ -286,6 +286,7 @@ static int acpi_ac_resume(struct device *dev)
                return 0;
        if (old_state != ac->state)
                kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+
        return 0;
 }
 #else
@@ -296,7 +297,6 @@ static int acpi_ac_remove(struct acpi_device *device)
 {
        struct acpi_ac *ac = NULL;
 
-
        if (!device || !acpi_driver_data(device))
                return -EINVAL;
 
index 990ff5b..e07782b 100644 (file)
@@ -1707,24 +1707,23 @@ static int acpi_video_resume(struct notifier_block *nb,
        int i;
 
        switch (val) {
-       case PM_HIBERNATION_PREPARE:
-       case PM_SUSPEND_PREPARE:
-       case PM_RESTORE_PREPARE:
-               return NOTIFY_DONE;
-       }
-
-       video = container_of(nb, struct acpi_video_bus, pm_nb);
-
-       dev_info(&video->device->dev, "Restoring backlight state\n");
+       case PM_POST_HIBERNATION:
+       case PM_POST_SUSPEND:
+       case PM_POST_RESTORE:
+               video = container_of(nb, struct acpi_video_bus, pm_nb);
+
+               dev_info(&video->device->dev, "Restoring backlight state\n");
+
+               for (i = 0; i < video->attached_count; i++) {
+                       video_device = video->attached_array[i].bind_info;
+                       if (video_device && video_device->brightness)
+                               acpi_video_device_lcd_set_level(video_device,
+                                               video_device->brightness->curr);
+               }
 
-       for (i = 0; i < video->attached_count; i++) {
-               video_device = video->attached_array[i].bind_info;
-               if (video_device && video_device->brightness)
-                       acpi_video_device_lcd_set_level(video_device,
-                                       video_device->brightness->curr);
+               return NOTIFY_OK;
        }
-
-       return NOTIFY_OK;
+       return NOTIFY_DONE;
 }
 
 static acpi_status
index dc208f5..306513f 100644 (file)
@@ -52,7 +52,6 @@ static bool battery_driver_registered;
 static int battery_bix_broken_package;
 static int battery_notification_delay_ms;
 static int battery_ac_is_broken;
-static int battery_quirk_notcharging;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -216,10 +215,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
                        val->intval = POWER_SUPPLY_STATUS_CHARGING;
                else if (acpi_battery_is_charged(battery))
                        val->intval = POWER_SUPPLY_STATUS_FULL;
-               else if (battery_quirk_notcharging)
-                       val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
                else
-                       val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+                       val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
                break;
        case POWER_SUPPLY_PROP_PRESENT:
                val->intval = acpi_battery_present(battery);
@@ -1105,12 +1102,6 @@ battery_ac_is_broken_quirk(const struct dmi_system_id *d)
        return 0;
 }
 
-static int __init battery_quirk_not_charging(const struct dmi_system_id *d)
-{
-       battery_quirk_notcharging = 1;
-       return 0;
-}
-
 static const struct dmi_system_id bat_dmi_table[] __initconst = {
        {
                /* NEC LZ750/LS */
@@ -1140,19 +1131,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
                },
        },
        {
-               /*
-                * On Lenovo ThinkPads the BIOS specification defines
-                * a state when the bits for charging and discharging
-                * are both set to 0. That state is "Not Charging".
-                */
-               .callback = battery_quirk_not_charging,
-               .ident = "Lenovo ThinkPad",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
-               },
-       },
-       {
                /* Microsoft Surface Go 3 */
                .callback = battery_notification_delay_quirk,
                .matches = {
index 3b299b2..903528f 100644 (file)
@@ -315,7 +315,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
                goto end;
        }
 
-       /* wait for completion and check for PCC errro bit */
+       /* wait for completion and check for PCC error bit */
        ret = check_pcc_chan(pcc_ss_id, true);
 
        if (pcc_ss_data->pcc_mrtt)
index c0da24c..4919e7a 100644 (file)
@@ -151,6 +151,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
 static const struct acpi_device_id pch_fivr_device_ids[] = {
        {"INTC1045", 0},
        {"INTC1049", 0},
+       {"INTC1064", 0},
        {"INTC10A3", 0},
        {"", 0},
 };
index 407b89d..86561ed 100644 (file)
@@ -232,6 +232,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
        {"INTC1050", 0},
        {"INTC1060", 0},
        {"INTC1061", 0},
+       {"INTC1065", 0},
+       {"INTC1066", 0},
        {"INTC10A4", 0},
        {"INTC10A5", 0},
        {"", 0},
index 42a5563..b7113fa 100644 (file)
@@ -27,6 +27,7 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
        {"INT3532"},
        {"INTC1040"},
        {"INTC1041"},
+       {"INTC1042"},
        {"INTC1043"},
        {"INTC1044"},
        {"INTC1045"},
@@ -37,6 +38,11 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
        {"INTC1050"},
        {"INTC1060"},
        {"INTC1061"},
+       {"INTC1062"},
+       {"INTC1063"},
+       {"INTC1064"},
+       {"INTC1065"},
+       {"INTC1066"},
        {"INTC10A0"},
        {"INTC10A1"},
        {"INTC10A2"},
index 4472852..e7b4b4e 100644 (file)
@@ -14,6 +14,7 @@
        {"INT3404", }, /* Fan */ \
        {"INTC1044", }, /* Fan for Tiger Lake generation */ \
        {"INTC1048", }, /* Fan for Alder Lake generation */ \
+       {"INTC1063", }, /* Fan for Meteor Lake generation */ \
        {"INTC10A2", }, /* Fan for Raptor Lake generation */ \
        {"PNP0C0B", } /* Generic ACPI fan */
 
index ef10480..8d76911 100644 (file)
@@ -79,17 +79,17 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
 
 static int find_child_checks(struct acpi_device *adev, bool check_children)
 {
-       bool sta_present = true;
        unsigned long long sta;
        acpi_status status;
 
+       if (check_children && list_empty(&adev->children))
+               return -ENODEV;
+
        status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
        if (status == AE_NOT_FOUND)
-               sta_present = false;
-       else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
-               return -ENODEV;
+               return FIND_CHILD_MIN_SCORE;
 
-       if (check_children && list_empty(&adev->children))
+       if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
                return -ENODEV;
 
        /*
@@ -99,8 +99,10 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
         * matched going forward.  [This means a second spec violation in a row,
         * so whatever we do here is best effort anyway.]
         */
-       return sta_present && !adev->pnp.type.platform_id ?
-                       FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+       if (adev->pnp.type.platform_id)
+               return FIND_CHILD_MIN_SCORE;
+
+       return FIND_CHILD_MAX_SCORE;
 }
 
 struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
index 7a70c4b..3269a88 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/io-64-nonatomic-lo-hi.h>
 
 #include "acpica/accommon.h"
-#include "acpica/acnamesp.h"
 #include "internal.h"
 
 /* Definitions for ACPI_DEBUG_PRINT() */
@@ -1496,91 +1495,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 }
 EXPORT_SYMBOL(acpi_check_region);
 
-static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
-                                             void *_res, void **return_value)
-{
-       struct acpi_mem_space_context **mem_ctx;
-       union acpi_operand_object *handler_obj;
-       union acpi_operand_object *region_obj2;
-       union acpi_operand_object *region_obj;
-       struct resource *res = _res;
-       acpi_status status;
-
-       region_obj = acpi_ns_get_attached_object(handle);
-       if (!region_obj)
-               return AE_OK;
-
-       handler_obj = region_obj->region.handler;
-       if (!handler_obj)
-               return AE_OK;
-
-       if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               return AE_OK;
-
-       if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
-               return AE_OK;
-
-       region_obj2 = acpi_ns_get_secondary_object(region_obj);
-       if (!region_obj2)
-               return AE_OK;
-
-       mem_ctx = (void *)&region_obj2->extra.region_context;
-
-       if (!(mem_ctx[0]->address >= res->start &&
-             mem_ctx[0]->address < res->end))
-               return AE_OK;
-
-       status = handler_obj->address_space.setup(region_obj,
-                                                 ACPI_REGION_DEACTIVATE,
-                                                 NULL, (void **)mem_ctx);
-       if (ACPI_SUCCESS(status))
-               region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
-
-       return status;
-}
-
-/**
- * acpi_release_memory - Release any mappings done to a memory region
- * @handle: Handle to namespace node
- * @res: Memory resource
- * @level: A level that terminates the search
- *
- * Walks through @handle and unmaps all SystemMemory Operation Regions that
- * overlap with @res and that have already been activated (mapped).
- *
- * This is a helper that allows drivers to place special requirements on memory
- * region that may overlap with operation regions, primarily allowing them to
- * safely map the region as non-cached memory.
- *
- * The unmapped Operation Regions will be automatically remapped next time they
- * are called, so the drivers do not need to do anything else.
- */
-acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
-                               u32 level)
-{
-       acpi_status status;
-
-       if (!(res->flags & IORESOURCE_MEM))
-               return AE_TYPE;
-
-       status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
-                                    acpi_deactivate_mem_region, NULL,
-                                    res, NULL);
-       if (ACPI_FAILURE(status))
-               return status;
-
-       /*
-        * Wait for all of the mappings queued up for removal by
-        * acpi_deactivate_mem_region() to actually go away.
-        */
-       synchronize_rcu();
-       rcu_barrier();
-       flush_scheduled_work();
-
-       return AE_OK;
-}
-EXPORT_SYMBOL_GPL(acpi_release_memory);
-
 /*
  * Let drivers know whether the resource checks are effective
  */
index e9c84d0..6a5572a 100644 (file)
 #define ACPI_IDLE_STATE_START  (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
 
 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
-module_param(max_cstate, uint, 0000);
-static unsigned int nocst __read_mostly;
-module_param(nocst, uint, 0000);
-static int bm_check_disable __read_mostly;
-module_param(bm_check_disable, uint, 0000);
+module_param(max_cstate, uint, 0400);
+static bool nocst __read_mostly;
+module_param(nocst, bool, 0400);
+static bool bm_check_disable __read_mostly;
+module_param(bm_check_disable, bool, 0400);
 
 static unsigned int latency_factor __read_mostly = 2;
 module_param(latency_factor, uint, 0644);
index 3147702..04ea156 100644 (file)
@@ -1035,20 +1035,22 @@ static void acpi_sleep_hibernate_setup(void)
 static inline void acpi_sleep_hibernate_setup(void) {}
 #endif /* !CONFIG_HIBERNATION */
 
-static void acpi_power_off_prepare(void)
+static int acpi_power_off_prepare(struct sys_off_data *data)
 {
        /* Prepare to power off the system */
        acpi_sleep_prepare(ACPI_STATE_S5);
        acpi_disable_all_gpes();
        acpi_os_wait_events_complete();
+       return NOTIFY_DONE;
 }
 
-static void acpi_power_off(void)
+static int acpi_power_off(struct sys_off_data *data)
 {
        /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
        pr_debug("%s called\n", __func__);
        local_irq_disable();
        acpi_enter_sleep_state(ACPI_STATE_S5);
+       return NOTIFY_DONE;
 }
 
 int __init acpi_sleep_init(void)
@@ -1067,8 +1069,14 @@ int __init acpi_sleep_init(void)
 
        if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
                sleep_states[ACPI_STATE_S5] = 1;
-               pm_power_off_prepare = acpi_power_off_prepare;
-               pm_power_off = acpi_power_off;
+
+               register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
+                                        SYS_OFF_PRIO_FIRMWARE,
+                                        acpi_power_off_prepare, NULL);
+
+               register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+                                        SYS_OFF_PRIO_FIRMWARE,
+                                        acpi_power_off, NULL);
        } else {
                acpi_no_s5 = true;
        }
index 7e775ba..0e3ed5e 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
 
 #define to_amba_driver(d)      container_of(d, struct amba_driver, drv)
 
@@ -94,31 +98,11 @@ static ssize_t driver_override_store(struct device *_dev,
                                     const char *buf, size_t count)
 {
        struct amba_device *dev = to_amba_device(_dev);
-       char *driver_override, *old, *cp;
-
-       /* We need to keep extra room for a newline */
-       if (count >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, count, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
-
-       cp = strchr(driver_override, '\n');
-       if (cp)
-               *cp = '\0';
-
-       device_lock(_dev);
-       old = dev->driver_override;
-       if (strlen(driver_override)) {
-               dev->driver_override = driver_override;
-       } else {
-               kfree(driver_override);
-               dev->driver_override = NULL;
-       }
-       device_unlock(_dev);
+       int ret;
 
-       kfree(old);
+       ret = driver_set_override(_dev, &dev->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
@@ -273,6 +257,36 @@ static void amba_shutdown(struct device *dev)
                drv->shutdown(to_amba_device(dev));
 }
 
+static int amba_dma_configure(struct device *dev)
+{
+       struct amba_driver *drv = to_amba_driver(dev->driver);
+       enum dev_dma_attr attr;
+       int ret = 0;
+
+       if (dev->of_node) {
+               ret = of_dma_configure(dev, dev->of_node, true);
+       } else if (has_acpi_companion(dev)) {
+               attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
+               ret = acpi_dma_configure(dev, attr);
+       }
+
+       if (!ret && !drv->driver_managed_dma) {
+               ret = iommu_device_use_default_domain(dev);
+               if (ret)
+                       arch_teardown_dma_ops(dev);
+       }
+
+       return ret;
+}
+
+static void amba_dma_cleanup(struct device *dev)
+{
+       struct amba_driver *drv = to_amba_driver(dev->driver);
+
+       if (!drv->driver_managed_dma)
+               iommu_device_unuse_default_domain(dev);
+}
+
 #ifdef CONFIG_PM
 /*
  * Hooks to provide runtime PM of the pclk (bus clock).  It is safe to
@@ -341,7 +355,8 @@ struct bus_type amba_bustype = {
        .probe          = amba_probe,
        .remove         = amba_remove,
        .shutdown       = amba_shutdown,
-       .dma_configure  = platform_dma_configure,
+       .dma_configure  = amba_dma_configure,
+       .dma_cleanup    = amba_dma_cleanup,
        .pm             = &amba_pm,
 };
 EXPORT_SYMBOL_GPL(amba_bustype);
index f3b639e..362c0de 100644 (file)
@@ -133,18 +133,45 @@ static int binder_set_stop_on_user_error(const char *val,
 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
        param_get_int, &binder_stop_on_user_error, 0644);
 
-#define binder_debug(mask, x...) \
-       do { \
-               if (binder_debug_mask & mask) \
-                       pr_info_ratelimited(x); \
-       } while (0)
+static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       if (binder_debug_mask & mask) {
+               va_start(args, format);
+               vaf.va = &args;
+               vaf.fmt = format;
+               pr_info_ratelimited("%pV", &vaf);
+               va_end(args);
+       }
+}
+
+#define binder_txn_error(x...) \
+       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
+
+static __printf(1, 2) void binder_user_error(const char *format, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
+               va_start(args, format);
+               vaf.va = &args;
+               vaf.fmt = format;
+               pr_info_ratelimited("%pV", &vaf);
+               va_end(args);
+       }
+
+       if (binder_stop_on_user_error)
+               binder_stop_on_user_error = 2;
+}
 
-#define binder_user_error(x...) \
+#define binder_set_extended_error(ee, _id, _command, _param) \
        do { \
-               if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
-                       pr_info_ratelimited(x); \
-               if (binder_stop_on_user_error) \
-                       binder_stop_on_user_error = 2; \
+               (ee)->id = _id; \
+               (ee)->command = _command; \
+               (ee)->param = _param; \
        } while (0)
 
 #define to_flat_binder_object(hdr) \
@@ -1481,6 +1508,8 @@ static void binder_free_txn_fixups(struct binder_transaction *t)
 
        list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
                fput(fixup->file);
+               if (fixup->target_fd >= 0)
+                       put_unused_fd(fixup->target_fd);
                list_del(&fixup->fixup_entry);
                kfree(fixup);
        }
@@ -1855,8 +1884,10 @@ static void binder_deferred_fd_close(int fd)
        if (!twcb)
                return;
        init_task_work(&twcb->twork, binder_do_fd_close);
-       close_fd_get_file(fd, &twcb->file);
+       twcb->file = close_fd_get_file(fd);
        if (twcb->file) {
+               // pin it until binder_do_fd_close(); see comments there
+               get_file(twcb->file);
                filp_close(twcb->file, current->files);
                task_work_add(current, &twcb->twork, TWA_RESUME);
        } else {
@@ -2220,6 +2251,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
        }
        fixup->file = file;
        fixup->offset = fd_offset;
+       fixup->target_fd = -1;
        trace_binder_transaction_fd_send(t, fd, fixup->offset);
        list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
 
@@ -2705,6 +2737,24 @@ static struct binder_node *binder_get_node_refs_for_txn(
        return target_node;
 }
 
+static void binder_set_txn_from_error(struct binder_transaction *t, int id,
+                                     uint32_t command, int32_t param)
+{
+       struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
+
+       if (!from) {
+               /* annotation for sparse */
+               __release(&from->proc->inner_lock);
+               return;
+       }
+
+       /* don't override existing errors */
+       if (from->ee.command == BR_OK)
+               binder_set_extended_error(&from->ee, id, command, param);
+       binder_inner_proc_unlock(from->proc);
+       binder_thread_dec_tmpref(from);
+}
+
 static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply,
@@ -2750,6 +2800,10 @@ static void binder_transaction(struct binder_proc *proc,
        e->offsets_size = tr->offsets_size;
        strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
 
+       binder_inner_proc_lock(proc);
+       binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
+       binder_inner_proc_unlock(proc);
+
        if (reply) {
                binder_inner_proc_lock(proc);
                in_reply_to = thread->transaction_stack;
@@ -2785,6 +2839,8 @@ static void binder_transaction(struct binder_proc *proc,
                if (target_thread == NULL) {
                        /* annotation for sparse */
                        __release(&target_thread->proc->inner_lock);
+                       binder_txn_error("%d:%d reply target not found\n",
+                               thread->pid, proc->pid);
                        return_error = BR_DEAD_REPLY;
                        return_error_line = __LINE__;
                        goto err_dead_binder;
@@ -2850,6 +2906,8 @@ static void binder_transaction(struct binder_proc *proc,
                        }
                }
                if (!target_node) {
+                       binder_txn_error("%d:%d cannot find target node\n",
+                               thread->pid, proc->pid);
                        /*
                         * return_error is set above
                         */
@@ -2859,6 +2917,8 @@ static void binder_transaction(struct binder_proc *proc,
                }
                e->to_node = target_node->debug_id;
                if (WARN_ON(proc == target_proc)) {
+                       binder_txn_error("%d:%d self transactions not allowed\n",
+                               thread->pid, proc->pid);
                        return_error = BR_FAILED_REPLY;
                        return_error_param = -EINVAL;
                        return_error_line = __LINE__;
@@ -2866,6 +2926,8 @@ static void binder_transaction(struct binder_proc *proc,
                }
                if (security_binder_transaction(proc->cred,
                                                target_proc->cred) < 0) {
+                       binder_txn_error("%d:%d transaction credentials failed\n",
+                               thread->pid, proc->pid);
                        return_error = BR_FAILED_REPLY;
                        return_error_param = -EPERM;
                        return_error_line = __LINE__;
@@ -2937,6 +2999,8 @@ static void binder_transaction(struct binder_proc *proc,
        /* TODO: reuse incoming transaction for reply */
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        if (t == NULL) {
+               binder_txn_error("%d:%d cannot allocate transaction\n",
+                       thread->pid, proc->pid);
                return_error = BR_FAILED_REPLY;
                return_error_param = -ENOMEM;
                return_error_line = __LINE__;
@@ -2948,6 +3012,8 @@ static void binder_transaction(struct binder_proc *proc,
 
        tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
        if (tcomplete == NULL) {
+               binder_txn_error("%d:%d cannot allocate work for transaction\n",
+                       thread->pid, proc->pid);
                return_error = BR_FAILED_REPLY;
                return_error_param = -ENOMEM;
                return_error_line = __LINE__;
@@ -2994,6 +3060,8 @@ static void binder_transaction(struct binder_proc *proc,
                security_cred_getsecid(proc->cred, &secid);
                ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
                if (ret) {
+                       binder_txn_error("%d:%d failed to get security context\n",
+                               thread->pid, proc->pid);
                        return_error = BR_FAILED_REPLY;
                        return_error_param = ret;
                        return_error_line = __LINE__;
@@ -3002,7 +3070,8 @@ static void binder_transaction(struct binder_proc *proc,
                added_size = ALIGN(secctx_sz, sizeof(u64));
                extra_buffers_size += added_size;
                if (extra_buffers_size < added_size) {
-                       /* integer overflow of extra_buffers_size */
+                       binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
+                               thread->pid, proc->pid);
                        return_error = BR_FAILED_REPLY;
                        return_error_param = -EINVAL;
                        return_error_line = __LINE__;
@@ -3016,9 +3085,15 @@ static void binder_transaction(struct binder_proc *proc,
                tr->offsets_size, extra_buffers_size,
                !reply && (t->flags & TF_ONE_WAY), current->tgid);
        if (IS_ERR(t->buffer)) {
-               /*
-                * -ESRCH indicates VMA cleared. The target is dying.
-                */
+               char *s;
+
+               ret = PTR_ERR(t->buffer);
+               s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
+                       : (ret == -ENOSPC) ? ": no space left"
+                       : (ret == -ENOMEM) ? ": memory allocation failed"
+                       : "";
+               binder_txn_error("cannot allocate buffer%s", s);
+
                return_error_param = PTR_ERR(t->buffer);
                return_error = return_error_param == -ESRCH ?
                        BR_DEAD_REPLY : BR_FAILED_REPLY;
@@ -3101,6 +3176,8 @@ static void binder_transaction(struct binder_proc *proc,
                                                  t->buffer,
                                                  buffer_offset,
                                                  sizeof(object_offset))) {
+                       binder_txn_error("%d:%d copy offset from buffer failed\n",
+                               thread->pid, proc->pid);
                        return_error = BR_FAILED_REPLY;
                        return_error_param = -EINVAL;
                        return_error_line = __LINE__;
@@ -3159,6 +3236,8 @@ static void binder_transaction(struct binder_proc *proc,
                                                        t->buffer,
                                                        object_offset,
                                                        fp, sizeof(*fp))) {
+                               binder_txn_error("%d:%d translate binder failed\n",
+                                       thread->pid, proc->pid);
                                return_error = BR_FAILED_REPLY;
                                return_error_param = ret;
                                return_error_line = __LINE__;
@@ -3176,6 +3255,8 @@ static void binder_transaction(struct binder_proc *proc,
                                                        t->buffer,
                                                        object_offset,
                                                        fp, sizeof(*fp))) {
+                               binder_txn_error("%d:%d translate handle failed\n",
+                                       thread->pid, proc->pid);
                                return_error = BR_FAILED_REPLY;
                                return_error_param = ret;
                                return_error_line = __LINE__;
@@ -3196,6 +3277,8 @@ static void binder_transaction(struct binder_proc *proc,
                                                        t->buffer,
                                                        object_offset,
                                                        fp, sizeof(*fp))) {
+                               binder_txn_error("%d:%d translate fd failed\n",
+                                       thread->pid, proc->pid);
                                return_error = BR_FAILED_REPLY;
                                return_error_param = ret;
                                return_error_line = __LINE__;
@@ -3265,6 +3348,8 @@ static void binder_transaction(struct binder_proc *proc,
                                                                  object_offset,
                                                                  fda, sizeof(*fda));
                        if (ret) {
+                               binder_txn_error("%d:%d translate fd array failed\n",
+                                       thread->pid, proc->pid);
                                return_error = BR_FAILED_REPLY;
                                return_error_param = ret > 0 ? -EINVAL : ret;
                                return_error_line = __LINE__;
@@ -3292,6 +3377,8 @@ static void binder_transaction(struct binder_proc *proc,
                                (const void __user *)(uintptr_t)bp->buffer,
                                bp->length);
                        if (ret) {
+                               binder_txn_error("%d:%d deferred copy failed\n",
+                                       thread->pid, proc->pid);
                                return_error = BR_FAILED_REPLY;
                                return_error_param = ret;
                                return_error_line = __LINE__;
@@ -3315,6 +3402,8 @@ static void binder_transaction(struct binder_proc *proc,
                                                        t->buffer,
                                                        object_offset,
                                                        bp, sizeof(*bp))) {
+                               binder_txn_error("%d:%d failed to fixup parent\n",
+                                       thread->pid, proc->pid);
                                return_error = BR_FAILED_REPLY;
                                return_error_param = ret;
                                return_error_line = __LINE__;
@@ -3422,6 +3511,8 @@ static void binder_transaction(struct binder_proc *proc,
        return;
 
 err_dead_proc_or_thread:
+       binder_txn_error("%d:%d dead process or thread\n",
+               thread->pid, proc->pid);
        return_error_line = __LINE__;
        binder_dequeue_work(proc, tcomplete);
 err_translate_failed:
@@ -3457,21 +3548,26 @@ err_bad_call_stack:
 err_empty_call_stack:
 err_dead_binder:
 err_invalid_target_handle:
-       if (target_thread)
-               binder_thread_dec_tmpref(target_thread);
-       if (target_proc)
-               binder_proc_dec_tmpref(target_proc);
        if (target_node) {
                binder_dec_node(target_node, 1, 0);
                binder_dec_node_tmpref(target_node);
        }
 
        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-                    "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
-                    proc->pid, thread->pid, return_error, return_error_param,
+                    "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+                    proc->pid, thread->pid, reply ? "reply" :
+                    (tr->flags & TF_ONE_WAY ? "async" : "call"),
+                    target_proc ? target_proc->pid : 0,
+                    target_thread ? target_thread->pid : 0,
+                    t_debug_id, return_error, return_error_param,
                     (u64)tr->data_size, (u64)tr->offsets_size,
                     return_error_line);
 
+       if (target_thread)
+               binder_thread_dec_tmpref(target_thread);
+       if (target_proc)
+               binder_proc_dec_tmpref(target_proc);
+
        {
                struct binder_transaction_log_entry *fe;
 
@@ -3491,10 +3587,16 @@ err_invalid_target_handle:
 
        BUG_ON(thread->return_error.cmd != BR_OK);
        if (in_reply_to) {
+               binder_set_txn_from_error(in_reply_to, t_debug_id,
+                               return_error, return_error_param);
                thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
                binder_enqueue_thread_work(thread, &thread->return_error.work);
                binder_send_failed_reply(in_reply_to, return_error);
        } else {
+               binder_inner_proc_lock(proc);
+               binder_set_extended_error(&thread->ee, t_debug_id,
+                               return_error, return_error_param);
+               binder_inner_proc_unlock(proc);
                thread->return_error.cmd = return_error;
                binder_enqueue_thread_work(thread, &thread->return_error.work);
        }
@@ -3984,7 +4086,7 @@ static int binder_thread_write(struct binder_proc *proc,
                } break;
 
                default:
-                       pr_err("%d:%d unknown command %d\n",
+                       pr_err("%d:%d unknown command %u\n",
                               proc->pid, thread->pid, cmd);
                        return -EINVAL;
                }
@@ -4075,10 +4177,9 @@ static int binder_wait_for_work(struct binder_thread *thread,
  * Now that we are in the context of the transaction target
  * process, we can allocate and install fds. Process the
  * list of fds to translate and fixup the buffer with the
- * new fds.
+ * new fds first and only then install the files.
  *
- * If we fail to allocate an fd, then free the resources by
- * fput'ing files that have not been processed and ksys_close'ing
+ * If we fail to allocate an fd, skip the install and release
  * any fds that have already been allocated.
  */
 static int binder_apply_fd_fixups(struct binder_proc *proc,
@@ -4095,41 +4196,31 @@ static int binder_apply_fd_fixups(struct binder_proc *proc,
                                     "failed fd fixup txn %d fd %d\n",
                                     t->debug_id, fd);
                        ret = -ENOMEM;
-                       break;
+                       goto err;
                }
                binder_debug(BINDER_DEBUG_TRANSACTION,
                             "fd fixup txn %d fd %d\n",
                             t->debug_id, fd);
                trace_binder_transaction_fd_recv(t, fd, fixup->offset);
-               fd_install(fd, fixup->file);
-               fixup->file = NULL;
+               fixup->target_fd = fd;
                if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
                                                fixup->offset, &fd,
                                                sizeof(u32))) {
                        ret = -EINVAL;
-                       break;
+                       goto err;
                }
        }
        list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
-               if (fixup->file) {
-                       fput(fixup->file);
-               } else if (ret) {
-                       u32 fd;
-                       int err;
-
-                       err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
-                                                           t->buffer,
-                                                           fixup->offset,
-                                                           sizeof(fd));
-                       WARN_ON(err);
-                       if (!err)
-                               binder_deferred_fd_close(fd);
-               }
+               fd_install(fixup->target_fd, fixup->file);
                list_del(&fixup->fixup_entry);
                kfree(fixup);
        }
 
        return ret;
+
+err:
+       binder_free_txn_fixups(t);
+       return ret;
 }
 
 static int binder_thread_read(struct binder_proc *proc,
@@ -4490,7 +4581,7 @@ retry:
                trace_binder_transaction_received(t);
                binder_stat_br(proc, thread, cmd);
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
+                            "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
                             proc->pid, thread->pid,
                             (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
                                (cmd == BR_TRANSACTION_SEC_CTX) ?
@@ -4632,6 +4723,7 @@ static struct binder_thread *binder_get_thread_ilocked(
        thread->return_error.cmd = BR_OK;
        thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
        thread->reply_error.cmd = BR_OK;
+       thread->ee.command = BR_OK;
        INIT_LIST_HEAD(&new_thread->waiting_thread_node);
        return thread;
 }
@@ -5070,6 +5162,22 @@ static int binder_ioctl_get_freezer_info(
        return 0;
 }
 
+static int binder_ioctl_get_extended_error(struct binder_thread *thread,
+                                          void __user *ubuf)
+{
+       struct binder_extended_error ee;
+
+       binder_inner_proc_lock(thread->proc);
+       ee = thread->ee;
+       binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
+       binder_inner_proc_unlock(thread->proc);
+
+       if (copy_to_user(ubuf, &ee, sizeof(ee)))
+               return -EFAULT;
+
+       return 0;
+}
+
 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        int ret;
@@ -5278,6 +5386,11 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                binder_inner_proc_unlock(proc);
                break;
        }
+       case BINDER_GET_EXTENDED_ERROR:
+               ret = binder_ioctl_get_extended_error(thread, ubuf);
+               if (ret < 0)
+                       goto err;
+               break;
        default:
                ret = -EINVAL;
                goto err;
index 2ac1008..5649a03 100644 (file)
@@ -1175,14 +1175,11 @@ static void binder_alloc_clear_buf(struct binder_alloc *alloc,
                unsigned long size;
                struct page *page;
                pgoff_t pgoff;
-               void *kptr;
 
                page = binder_alloc_get_page(alloc, buffer,
                                             buffer_offset, &pgoff);
                size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
-               kptr = kmap(page) + pgoff;
-               memset(kptr, 0, size);
-               kunmap(page);
+               memset_page(page, pgoff, 0, size);
                bytes -= size;
                buffer_offset += size;
        }
@@ -1220,9 +1217,9 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
                page = binder_alloc_get_page(alloc, buffer,
                                             buffer_offset, &pgoff);
                size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
-               kptr = kmap(page) + pgoff;
+               kptr = kmap_local_page(page) + pgoff;
                ret = copy_from_user(kptr, from, size);
-               kunmap(page);
+               kunmap_local(kptr);
                if (ret)
                        return bytes - size + ret;
                bytes -= size;
@@ -1247,23 +1244,14 @@ static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
                unsigned long size;
                struct page *page;
                pgoff_t pgoff;
-               void *tmpptr;
-               void *base_ptr;
 
                page = binder_alloc_get_page(alloc, buffer,
                                             buffer_offset, &pgoff);
                size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
-               base_ptr = kmap_atomic(page);
-               tmpptr = base_ptr + pgoff;
                if (to_buffer)
-                       memcpy(tmpptr, ptr, size);
+                       memcpy_to_page(page, pgoff, ptr, size);
                else
-                       memcpy(ptr, tmpptr, size);
-               /*
-                * kunmap_atomic() takes care of flushing the cache
-                * if this device has VIVT cache arch
-                */
-               kunmap_atomic(base_ptr);
+                       memcpy_from_page(ptr, page, pgoff, size);
                bytes -= size;
                pgoff = 0;
                ptr = ptr + size;
index d6b6b8c..8dc0bcc 100644 (file)
@@ -480,6 +480,8 @@ struct binder_proc {
  *                        (only accessed by this thread)
  * @reply_error:          transaction errors reported by target thread
  *                        (protected by @proc->inner_lock)
+ * @ee:                   extended error information from this thread
+ *                        (protected by @proc->inner_lock)
  * @wait:                 wait queue for thread work
  * @stats:                per-thread statistics
  *                        (atomics, no lock needed)
@@ -504,6 +506,7 @@ struct binder_thread {
        bool process_todo;
        struct binder_error return_error;
        struct binder_error reply_error;
+       struct binder_extended_error ee;
        wait_queue_head_t wait;
        struct binder_stats stats;
        atomic_t tmp_ref;
@@ -515,6 +518,7 @@ struct binder_thread {
  * @fixup_entry:          list entry
  * @file:                 struct file to be associated with new fd
  * @offset:               offset in buffer data to this fixup
+ * @target_fd:            fd to use by the target to install @file
  *
  * List element for fd fixups in a transaction. Since file
  * descriptors need to be allocated in the context of the
@@ -525,6 +529,7 @@ struct binder_txn_fd_fixup {
        struct list_head fixup_entry;
        struct file *file;
        size_t offset;
+       int target_fd;
 };
 
 struct binder_transaction {
index e3605cd..6c5e94f 100644 (file)
@@ -60,6 +60,7 @@ enum binderfs_stats_mode {
 
 struct binder_features {
        bool oneway_spam_detection;
+       bool extended_error;
 };
 
 static const struct constant_table binderfs_param_stats[] = {
@@ -75,6 +76,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
 
 static struct binder_features binder_features = {
        .oneway_spam_detection = true,
+       .extended_error = true,
 };
 
 static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -615,6 +617,12 @@ static int init_binder_features(struct super_block *sb)
        if (IS_ERR(dentry))
                return PTR_ERR(dentry);
 
+       dentry = binderfs_create_file(dir, "extended_error",
+                                     &binder_features_fops,
+                                     &binder_features.extended_error);
+       if (IS_ERR(dentry))
+               return PTR_ERR(dentry);
+
        return 0;
 }
 
index 2448441..400e651 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/gpio/consumer.h>
 
 #include <scsi/scsi_host.h>
-#include <mach/palmld.h>
 
 #define DRV_NAME "pata_palmld"
 
@@ -63,7 +62,7 @@ static int palmld_pata_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* remap drive's physical memory address */
-       mem = devm_ioremap(dev, PALMLD_IDE_PHYS, 0x1000);
+       mem = devm_platform_ioremap_resource(pdev, 0);
        if (!mem)
                return -ENOMEM;
 
index 02f7f13..83217d2 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
 obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
 obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
 obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o
+obj-$(CONFIG_ACPI) += physical_location.o
 
 obj-y                  += test/
 
index f73b836..579c851 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_pressure.h>
+
 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
 static struct cpumask scale_freq_counters_mask;
 static bool scale_freq_invariant;
@@ -195,6 +198,8 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
 
        th_pressure = max_capacity - capacity;
 
+       trace_thermal_pressure_update(cpu, th_pressure);
+
        for_each_cpu(cpu, cpus)
                WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
 }
index 2882af2..ab71403 100644 (file)
@@ -159,6 +159,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
 extern int devres_release_all(struct device *dev);
 extern void device_block_probing(void);
 extern void device_unblock_probing(void);
+extern void deferred_probe_extend_timeout(void);
 
 /* /sys/devices directory */
 extern struct kset *devices_kset;
index 97936ec..7ca47e5 100644 (file)
@@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv)
        if (drv->bus->p->drivers_autoprobe) {
                error = driver_attach(drv);
                if (error)
-                       goto out_unregister;
+                       goto out_del_list;
        }
        module_add_driver(drv->owner, drv);
 
@@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv)
 
        return 0;
 
+out_del_list:
+       klist_del(&priv->knode_bus);
 out_unregister:
        kobject_put(&priv->kobj);
        /* drv->p is freed in driver_release()  */
index 2eede2e..7cd789c 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/dma-map-ops.h> /* for dma_default_coherent */
 
 #include "base.h"
+#include "physical_location.h"
 #include "power/power.h"
 
 #ifdef CONFIG_SYSFS_DEPRECATED
@@ -2649,8 +2650,17 @@ static int device_add_attrs(struct device *dev)
                        goto err_remove_dev_waiting_for_supplier;
        }
 
+       if (dev_add_physical_location(dev)) {
+               error = device_add_group(dev,
+                       &dev_attr_physical_location_group);
+               if (error)
+                       goto err_remove_dev_removable;
+       }
+
        return 0;
 
+ err_remove_dev_removable:
+       device_remove_file(dev, &dev_attr_removable);
  err_remove_dev_waiting_for_supplier:
        device_remove_file(dev, &dev_attr_waiting_for_supplier);
  err_remove_dev_online:
@@ -2672,6 +2682,11 @@ static void device_remove_attrs(struct device *dev)
        struct class *class = dev->class;
        const struct device_type *type = dev->type;
 
+       if (dev->physical_location) {
+               device_remove_group(dev, &dev_attr_physical_location_group);
+               kfree(dev->physical_location);
+       }
+
        device_remove_file(dev, &dev_attr_removable);
        device_remove_file(dev, &dev_attr_waiting_for_supplier);
        device_remove_file(dev, &dev_attr_online);
index 3fc3b59..11b0fb6 100644 (file)
@@ -60,6 +60,7 @@ static bool initcalls_done;
 /* Save the async probe drivers' name from kernel cmdline */
 #define ASYNC_DRV_NAMES_MAX_LEN        256
 static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
+static bool async_probe_default;
 
 /*
  * In some cases, like suspend to RAM or hibernation, It might be reasonable
@@ -257,7 +258,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs);
 
 int driver_deferred_probe_timeout;
 EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
 
 static int __init deferred_probe_timeout_setup(char *str)
 {
@@ -274,10 +274,10 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
  * @dev: device to check
  *
  * Return:
- * -ENODEV if initcalls have completed and modules are disabled.
- * -ETIMEDOUT if the deferred probe timeout was set and has expired
- *  and modules are enabled.
- * -EPROBE_DEFER in other cases.
+ * -ENODEV if initcalls have completed and modules are disabled.
+ * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ *   and modules are enabled.
+ * -EPROBE_DEFER in other cases.
  *
  * Drivers or subsystems can opt-in to calling this function instead of directly
  * returning -EPROBE_DEFER.
@@ -312,10 +312,23 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
        list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
                dev_info(p->device, "deferred probe pending\n");
        mutex_unlock(&deferred_probe_mutex);
-       wake_up_all(&probe_timeout_waitqueue);
 }
 static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
 
+void deferred_probe_extend_timeout(void)
+{
+       /*
+        * If the work hasn't been queued yet or if the work expired, don't
+        * start a new one.
+        */
+       if (cancel_delayed_work(&deferred_probe_timeout_work)) {
+               schedule_delayed_work(&deferred_probe_timeout_work,
+                               driver_deferred_probe_timeout * HZ);
+               pr_debug("Extended deferred probe timeout by %d secs\n",
+                                       driver_deferred_probe_timeout);
+       }
+}
+
 /**
  * deferred_probe_initcall() - Enable probing of deferred devices
  *
@@ -671,6 +684,8 @@ sysfs_failed:
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+       if (dev->bus && dev->bus->dma_cleanup)
+               dev->bus->dma_cleanup(dev);
 pinctrl_bind_failed:
        device_links_no_driver(dev);
        device_unbind_cleanup(dev);
@@ -716,9 +731,6 @@ int driver_probe_done(void)
  */
 void wait_for_device_probe(void)
 {
-       /* wait for probe timeout */
-       wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
-
        /* wait for the deferred probe workqueue to finish */
        flush_work(&deferred_probe_work);
 
@@ -797,7 +809,11 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev)
 
 static inline bool cmdline_requested_async_probing(const char *drv_name)
 {
-       return parse_option_str(async_probe_drv_names, drv_name);
+       bool async_drv;
+
+       async_drv = parse_option_str(async_probe_drv_names, drv_name);
+
+       return (async_probe_default != async_drv);
 }
 
 /* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
@@ -807,6 +823,8 @@ static int __init save_async_options(char *buf)
                pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
 
        strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+       async_probe_default = parse_option_str(async_probe_drv_names, "*");
+
        return 1;
 }
 __setup("driver_async_probe=", save_async_options);
@@ -941,6 +959,7 @@ out_unlock:
 static int __device_attach(struct device *dev, bool allow_async)
 {
        int ret = 0;
+       bool async = false;
 
        device_lock(dev);
        if (dev->p->dead) {
@@ -979,7 +998,7 @@ static int __device_attach(struct device *dev, bool allow_async)
                         */
                        dev_dbg(dev, "scheduling asynchronous probe\n");
                        get_device(dev);
-                       async_schedule_dev(__device_attach_async_helper, dev);
+                       async = true;
                } else {
                        pm_request_idle(dev);
                }
@@ -989,6 +1008,8 @@ static int __device_attach(struct device *dev, bool allow_async)
        }
 out_unlock:
        device_unlock(dev);
+       if (async)
+               async_schedule_dev(__device_attach_async_helper, dev);
        return ret;
 }
 
@@ -1082,6 +1103,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
 
        __device_driver_lock(dev, dev->parent);
        drv = dev->p->async_driver;
+       dev->p->async_driver = NULL;
        ret = driver_probe_device(drv, dev);
        __device_driver_unlock(dev, dev->parent);
 
@@ -1128,7 +1150,7 @@ static int __driver_attach(struct device *dev, void *data)
                 */
                dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
                device_lock(dev);
-               if (!dev->driver) {
+               if (!dev->driver && !dev->p->async_driver) {
                        get_device(dev);
                        dev->p->async_driver = drv;
                        async_schedule_dev(__driver_attach_async_helper, dev);
@@ -1199,6 +1221,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
 
                device_remove(dev);
 
+               if (dev->bus && dev->bus->dma_cleanup)
+                       dev->bus->dma_cleanup(dev);
+
                device_links_driver_cleanup(dev);
                device_unbind_cleanup(dev);
 
index 8c0d33e..15a75af 100644 (file)
@@ -31,6 +31,75 @@ static struct device *next_device(struct klist_iter *i)
 }
 
 /**
+ * driver_set_override() - Helper to set or clear driver override.
+ * @dev: Device to change
+ * @override: Address of string to change (e.g. &device->driver_override);
+ *            The contents will be freed and hold newly allocated override.
+ * @s: NUL-terminated string, new driver name to force a match, pass empty
+ *     string to clear it ("" or "\n", where the latter is only for sysfs
+ *     interface).
+ * @len: length of @s
+ *
+ * Helper to set or clear driver override in a device, intended for the cases
+ * when the driver_override field is allocated by driver/bus code.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int driver_set_override(struct device *dev, const char **override,
+                       const char *s, size_t len)
+{
+       const char *new, *old;
+       char *cp;
+
+       if (!override || !s)
+               return -EINVAL;
+
+       /*
+        * The stored value will be used in sysfs show callback (sysfs_emit()),
+        * which has a length limit of PAGE_SIZE and adds a trailing newline.
+        * Thus we can store one character less to avoid truncation during sysfs
+        * show.
+        */
+       if (len >= (PAGE_SIZE - 1))
+               return -EINVAL;
+
+       if (!len) {
+               /* Empty string passed - clear override */
+               device_lock(dev);
+               old = *override;
+               *override = NULL;
+               device_unlock(dev);
+               kfree(old);
+
+               return 0;
+       }
+
+       cp = strnchr(s, len, '\n');
+       if (cp)
+               len = cp - s;
+
+       new = kstrndup(s, len, GFP_KERNEL);
+       if (!new)
+               return -ENOMEM;
+
+       device_lock(dev);
+       old = *override;
+       if (cp != s) {
+               *override = new;
+       } else {
+               /* "\n" passed - clear override */
+               kfree(new);
+               *override = NULL;
+       }
+       device_unlock(dev);
+
+       kfree(old);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(driver_set_override);
+
+/**
  * driver_for_each_device - Iterator for devices bound to a driver.
  * @drv: Driver we're iterating.
  * @start: Device to begin with
@@ -177,6 +246,7 @@ int driver_register(struct device_driver *drv)
                return ret;
        }
        kobject_uevent(&drv->p->kobj, KOBJ_ADD);
+       deferred_probe_extend_timeout();
 
        return ret;
 }
index 38f3b66..5166b32 100644 (file)
@@ -29,6 +29,9 @@ if FW_LOADER
 config FW_LOADER_PAGED_BUF
        bool
 
+config FW_LOADER_SYSFS
+       bool
+
 config EXTRA_FIRMWARE
        string "Build named firmware blobs into the kernel binary"
        help
@@ -72,6 +75,7 @@ config EXTRA_FIRMWARE_DIR
 
 config FW_LOADER_USER_HELPER
        bool "Enable the firmware sysfs fallback mechanism"
+       select FW_LOADER_SYSFS
        select FW_LOADER_PAGED_BUF
        help
          This option enables a sysfs loading facility to enable firmware
@@ -159,21 +163,34 @@ config FW_LOADER_USER_HELPER_FALLBACK
 
 config FW_LOADER_COMPRESS
        bool "Enable compressed firmware support"
-       select FW_LOADER_PAGED_BUF
-       select XZ_DEC
        help
          This option enables the support for loading compressed firmware
          files. The caller of firmware API receives the decompressed file
          content. The compressed file is loaded as a fallback, only after
          loading the raw file failed at first.
 
-         Currently only XZ-compressed files are supported, and they have to
-         be compressed with either none or crc32 integrity check type (pass
-         "-C crc32" option to xz command).
-
          Compressed firmware support does not apply to firmware images
          that are built into the kernel image (CONFIG_EXTRA_FIRMWARE).
 
+if FW_LOADER_COMPRESS
+config FW_LOADER_COMPRESS_XZ
+       bool "Enable XZ-compressed firmware support"
+       select FW_LOADER_PAGED_BUF
+       select XZ_DEC
+       default y
+       help
+         This option adds the support for XZ-compressed files.
+         The files have to be compressed with either none or crc32
+         integrity check type (pass "-C crc32" option to xz command).
+
+config FW_LOADER_COMPRESS_ZSTD
+       bool "Enable ZSTD-compressed firmware support"
+       select ZSTD_DECOMPRESS
+       help
+         This option adds the support for ZSTD-compressed files.
+
+endif # FW_LOADER_COMPRESS
+
 config FW_CACHE
        bool "Enable firmware caching during suspend"
        depends on PM_SLEEP
@@ -186,5 +203,19 @@ config FW_CACHE
 
          If unsure, say Y.
 
+config FW_UPLOAD
+       bool "Enable users to initiate firmware updates using sysfs"
+       select FW_LOADER_SYSFS
+       select FW_LOADER_PAGED_BUF
+       help
+         Enabling this option will allow device drivers to expose a persistent
+         sysfs interface that allows firmware updates to be initiated from
+         userspace. For example, FPGA based PCIe cards load firmware and FPGA
+         images from local FLASH when the card boots. The images in FLASH may
+         be updated with new images provided by the user. Enable this device
+         to support cards that rely on user-initiated updates for firmware files.
+
+         If unsure, say N.
+
 endif # FW_LOADER
 endmenu
index e878434..60d19f9 100644 (file)
@@ -6,5 +6,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
 firmware_class-objs := main.o
 firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
 firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o
+firmware_class-$(CONFIG_FW_LOADER_SYSFS) += sysfs.o
+firmware_class-$(CONFIG_FW_UPLOAD) += sysfs_upload.o
 
 obj-y += builtin/
index 4afb0e9..bf68e39 100644 (file)
@@ -3,12 +3,9 @@
 #include <linux/types.h>
 #include <linux/kconfig.h>
 #include <linux/list.h>
-#include <linux/slab.h>
 #include <linux/security.h>
-#include <linux/highmem.h>
 #include <linux/umh.h>
 #include <linux/sysctl.h>
-#include <linux/vmalloc.h>
 #include <linux/module.h>
 
 #include "fallback.h"
  * firmware fallback mechanism
  */
 
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
-
-extern struct firmware_fallback_config fw_fallback_config;
-
-/* These getters are vetted to use int properly */
-static inline int __firmware_loading_timeout(void)
-{
-       return fw_fallback_config.loading_timeout;
-}
-
-/* These setters are vetted to use int properly */
-static void __fw_fallback_set_timeout(int timeout)
-{
-       fw_fallback_config.loading_timeout = timeout;
-}
-
 /*
  * use small loading timeout for caching devices' firmware because all these
  * firmware images have been loaded successfully at lease once, also system is
@@ -58,52 +39,11 @@ static long firmware_loading_timeout(void)
                __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
 }
 
-static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
-{
-       return __fw_state_check(fw_priv, FW_STATUS_DONE);
-}
-
-static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
-{
-       return __fw_state_check(fw_priv, FW_STATUS_LOADING);
-}
-
 static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv,  long timeout)
 {
        return __fw_state_wait_common(fw_priv, timeout);
 }
 
-struct fw_sysfs {
-       bool nowait;
-       struct device dev;
-       struct fw_priv *fw_priv;
-       struct firmware *fw;
-};
-
-static struct fw_sysfs *to_fw_sysfs(struct device *dev)
-{
-       return container_of(dev, struct fw_sysfs, dev);
-}
-
-static void __fw_load_abort(struct fw_priv *fw_priv)
-{
-       /*
-        * There is a small window in which user can write to 'loading'
-        * between loading done/aborted and disappearance of 'loading'
-        */
-       if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
-               return;
-
-       fw_state_aborted(fw_priv);
-}
-
-static void fw_load_abort(struct fw_sysfs *fw_sysfs)
-{
-       struct fw_priv *fw_priv = fw_sysfs->fw_priv;
-
-       __fw_load_abort(fw_priv);
-}
-
 static LIST_HEAD(pending_fw_head);
 
 void kill_pending_fw_fallback_reqs(bool only_kill_custom)
@@ -120,376 +60,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
        mutex_unlock(&fw_lock);
 }
 
-static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
-                           char *buf)
-{
-       return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
-}
-
-/**
- * timeout_store() - set number of seconds to wait for firmware
- * @class: device class pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for timeout value
- * @count: number of bytes in @buf
- *
- *     Sets the number of seconds to wait for the firmware.  Once
- *     this expires an error will be returned to the driver and no
- *     firmware will be provided.
- *
- *     Note: zero means 'wait forever'.
- **/
-static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
-                            const char *buf, size_t count)
-{
-       int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
-
-       if (tmp_loading_timeout < 0)
-               tmp_loading_timeout = 0;
-
-       __fw_fallback_set_timeout(tmp_loading_timeout);
-
-       return count;
-}
-static CLASS_ATTR_RW(timeout);
-
-static struct attribute *firmware_class_attrs[] = {
-       &class_attr_timeout.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(firmware_class);
-
-static void fw_dev_release(struct device *dev)
-{
-       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-
-       kfree(fw_sysfs);
-}
-
-static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
-{
-       if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
-               return -ENOMEM;
-       if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
-               return -ENOMEM;
-       if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
-               return -ENOMEM;
-
-       return 0;
-}
-
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-       int err = 0;
-
-       mutex_lock(&fw_lock);
-       if (fw_sysfs->fw_priv)
-               err = do_firmware_uevent(fw_sysfs, env);
-       mutex_unlock(&fw_lock);
-       return err;
-}
-
-static struct class firmware_class = {
-       .name           = "firmware",
-       .class_groups   = firmware_class_groups,
-       .dev_uevent     = firmware_uevent,
-       .dev_release    = fw_dev_release,
-};
-
-int register_sysfs_loader(void)
-{
-       int ret = class_register(&firmware_class);
-
-       if (ret != 0)
-               return ret;
-       return register_firmware_config_sysctl();
-}
-
-void unregister_sysfs_loader(void)
-{
-       unregister_firmware_config_sysctl();
-       class_unregister(&firmware_class);
-}
-
-static ssize_t firmware_loading_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-       int loading = 0;
-
-       mutex_lock(&fw_lock);
-       if (fw_sysfs->fw_priv)
-               loading = fw_sysfs_loading(fw_sysfs->fw_priv);
-       mutex_unlock(&fw_lock);
-
-       return sysfs_emit(buf, "%d\n", loading);
-}
-
-/**
- * firmware_loading_store() - set value in the 'loading' control file
- * @dev: device pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for loading control value
- * @count: number of bytes in @buf
- *
- *     The relevant values are:
- *
- *      1: Start a load, discarding any previous partial load.
- *      0: Conclude the load and hand the data to the driver code.
- *     -1: Conclude the load with an error and discard any written data.
- **/
-static ssize_t firmware_loading_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count)
-{
-       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-       struct fw_priv *fw_priv;
-       ssize_t written = count;
-       int loading = simple_strtol(buf, NULL, 10);
-
-       mutex_lock(&fw_lock);
-       fw_priv = fw_sysfs->fw_priv;
-       if (fw_state_is_aborted(fw_priv))
-               goto out;
-
-       switch (loading) {
-       case 1:
-               /* discarding any previous partial load */
-               if (!fw_sysfs_done(fw_priv)) {
-                       fw_free_paged_buf(fw_priv);
-                       fw_state_start(fw_priv);
-               }
-               break;
-       case 0:
-               if (fw_sysfs_loading(fw_priv)) {
-                       int rc;
-
-                       /*
-                        * Several loading requests may be pending on
-                        * one same firmware buf, so let all requests
-                        * see the mapped 'buf->data' once the loading
-                        * is completed.
-                        * */
-                       rc = fw_map_paged_buf(fw_priv);
-                       if (rc)
-                               dev_err(dev, "%s: map pages failed\n",
-                                       __func__);
-                       else
-                               rc = security_kernel_post_load_data(fw_priv->data,
-                                               fw_priv->size,
-                                               LOADING_FIRMWARE, "blob");
-
-                       /*
-                        * Same logic as fw_load_abort, only the DONE bit
-                        * is ignored and we set ABORT only on failure.
-                        */
-                       if (rc) {
-                               fw_state_aborted(fw_priv);
-                               written = rc;
-                       } else {
-                               fw_state_done(fw_priv);
-                       }
-                       break;
-               }
-               fallthrough;
-       default:
-               dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
-               fallthrough;
-       case -1:
-               fw_load_abort(fw_sysfs);
-               break;
-       }
-out:
-       mutex_unlock(&fw_lock);
-       return written;
-}
-
-static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-
-static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
-                          loff_t offset, size_t count, bool read)
-{
-       if (read)
-               memcpy(buffer, fw_priv->data + offset, count);
-       else
-               memcpy(fw_priv->data + offset, buffer, count);
-}
-
-static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
-                       loff_t offset, size_t count, bool read)
-{
-       while (count) {
-               void *page_data;
-               int page_nr = offset >> PAGE_SHIFT;
-               int page_ofs = offset & (PAGE_SIZE-1);
-               int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
-               page_data = kmap(fw_priv->pages[page_nr]);
-
-               if (read)
-                       memcpy(buffer, page_data + page_ofs, page_cnt);
-               else
-                       memcpy(page_data + page_ofs, buffer, page_cnt);
-
-               kunmap(fw_priv->pages[page_nr]);
-               buffer += page_cnt;
-               offset += page_cnt;
-               count -= page_cnt;
-       }
-}
-
-static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
-                                 struct bin_attribute *bin_attr,
-                                 char *buffer, loff_t offset, size_t count)
-{
-       struct device *dev = kobj_to_dev(kobj);
-       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-       struct fw_priv *fw_priv;
-       ssize_t ret_count;
-
-       mutex_lock(&fw_lock);
-       fw_priv = fw_sysfs->fw_priv;
-       if (!fw_priv || fw_sysfs_done(fw_priv)) {
-               ret_count = -ENODEV;
-               goto out;
-       }
-       if (offset > fw_priv->size) {
-               ret_count = 0;
-               goto out;
-       }
-       if (count > fw_priv->size - offset)
-               count = fw_priv->size - offset;
-
-       ret_count = count;
-
-       if (fw_priv->data)
-               firmware_rw_data(fw_priv, buffer, offset, count, true);
-       else
-               firmware_rw(fw_priv, buffer, offset, count, true);
-
-out:
-       mutex_unlock(&fw_lock);
-       return ret_count;
-}
-
-static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
-{
-       int err;
-
-       err = fw_grow_paged_buf(fw_sysfs->fw_priv,
-                               PAGE_ALIGN(min_size) >> PAGE_SHIFT);
-       if (err)
-               fw_load_abort(fw_sysfs);
-       return err;
-}
-
-/**
- * firmware_data_write() - write method for firmware
- * @filp: open sysfs file
- * @kobj: kobject for the device
- * @bin_attr: bin_attr structure
- * @buffer: buffer being written
- * @offset: buffer offset for write in total data store area
- * @count: buffer size
- *
- *     Data written to the 'data' attribute will be later handed to
- *     the driver as a firmware image.
- **/
-static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
-                                  struct bin_attribute *bin_attr,
-                                  char *buffer, loff_t offset, size_t count)
-{
-       struct device *dev = kobj_to_dev(kobj);
-       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-       struct fw_priv *fw_priv;
-       ssize_t retval;
-
-       if (!capable(CAP_SYS_RAWIO))
-               return -EPERM;
-
-       mutex_lock(&fw_lock);
-       fw_priv = fw_sysfs->fw_priv;
-       if (!fw_priv || fw_sysfs_done(fw_priv)) {
-               retval = -ENODEV;
-               goto out;
-       }
-
-       if (fw_priv->data) {
-               if (offset + count > fw_priv->allocated_size) {
-                       retval = -ENOMEM;
-                       goto out;
-               }
-               firmware_rw_data(fw_priv, buffer, offset, count, false);
-               retval = count;
-       } else {
-               retval = fw_realloc_pages(fw_sysfs, offset + count);
-               if (retval)
-                       goto out;
-
-               retval = count;
-               firmware_rw(fw_priv, buffer, offset, count, false);
-       }
-
-       fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
-out:
-       mutex_unlock(&fw_lock);
-       return retval;
-}
-
-static struct bin_attribute firmware_attr_data = {
-       .attr = { .name = "data", .mode = 0644 },
-       .size = 0,
-       .read = firmware_data_read,
-       .write = firmware_data_write,
-};
-
-static struct attribute *fw_dev_attrs[] = {
-       &dev_attr_loading.attr,
-       NULL
-};
-
-static struct bin_attribute *fw_dev_bin_attrs[] = {
-       &firmware_attr_data,
-       NULL
-};
-
-static const struct attribute_group fw_dev_attr_group = {
-       .attrs = fw_dev_attrs,
-       .bin_attrs = fw_dev_bin_attrs,
-};
-
-static const struct attribute_group *fw_dev_attr_groups[] = {
-       &fw_dev_attr_group,
-       NULL
-};
-
-static struct fw_sysfs *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
-                  struct device *device, u32 opt_flags)
-{
-       struct fw_sysfs *fw_sysfs;
-       struct device *f_dev;
-
-       fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
-       if (!fw_sysfs) {
-               fw_sysfs = ERR_PTR(-ENOMEM);
-               goto exit;
-       }
-
-       fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
-       fw_sysfs->fw = firmware;
-       f_dev = &fw_sysfs->dev;
-
-       device_initialize(f_dev);
-       dev_set_name(f_dev, "%s", fw_name);
-       f_dev->parent = device;
-       f_dev->class = &firmware_class;
-       f_dev->groups = fw_dev_attr_groups;
-exit:
-       return fw_sysfs;
-}
-
 /**
  * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
  * @fw_sysfs: firmware sysfs information for the firmware to load
index 9f3055d..1441485 100644 (file)
@@ -6,29 +6,7 @@
 #include <linux/device.h>
 
 #include "firmware.h"
-
-/**
- * struct firmware_fallback_config - firmware fallback configuration settings
- *
- * Helps describe and fine tune the fallback mechanism.
- *
- * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
- *     as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
- *     Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
- *     functionality on a kernel where that config entry has been disabled.
- * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
- *     This emulates the behaviour as if we had set the kernel
- *     config CONFIG_FW_LOADER_USER_HELPER=n.
- * @old_timeout: for internal use
- * @loading_timeout: the timeout to wait for the fallback mechanism before
- *     giving up, in seconds.
- */
-struct firmware_fallback_config {
-       unsigned int force_sysfs_fallback;
-       unsigned int ignore_sysfs_fallback;
-       int old_timeout;
-       int loading_timeout;
-};
+#include "sysfs.h"
 
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 int firmware_fallback_sysfs(struct firmware *fw, const char *name,
@@ -40,19 +18,6 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom);
 void fw_fallback_set_cache_timeout(void);
 void fw_fallback_set_default_timeout(void);
 
-int register_sysfs_loader(void);
-void unregister_sysfs_loader(void);
-#ifdef CONFIG_SYSCTL
-extern int register_firmware_config_sysctl(void);
-extern void unregister_firmware_config_sysctl(void);
-#else
-static inline int register_firmware_config_sysctl(void)
-{
-       return 0;
-}
-static inline void unregister_firmware_config_sysctl(void) { }
-#endif /* CONFIG_SYSCTL */
-
 #else /* CONFIG_FW_LOADER_USER_HELPER */
 static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
                                          struct device *device,
@@ -66,15 +31,6 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
 static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
 static inline void fw_fallback_set_cache_timeout(void) { }
 static inline void fw_fallback_set_default_timeout(void) { }
-
-static inline int register_sysfs_loader(void)
-{
-       return 0;
-}
-
-static inline void unregister_sysfs_loader(void)
-{
-}
 #endif /* CONFIG_FW_LOADER_USER_HELPER */
 
 #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
index 2889f44..fe77e91 100644 (file)
@@ -87,6 +87,7 @@ struct fw_priv {
 };
 
 extern struct mutex fw_lock;
+extern struct firmware_cache fw_cache;
 
 static inline bool __fw_state_check(struct fw_priv *fw_priv,
                                    enum fw_status status)
@@ -149,7 +150,22 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
        __fw_state_set(fw_priv, FW_STATUS_DONE);
 }
 
+static inline bool fw_state_is_done(struct fw_priv *fw_priv)
+{
+       return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_state_is_loading(struct fw_priv *fw_priv)
+{
+       return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
+
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+                        struct fw_priv **fw_priv, void *dbuf, size_t size,
+                        size_t offset, u32 opt_flags);
 int assign_fw(struct firmware *fw, struct device *device);
+void free_fw_priv(struct fw_priv *fw_priv);
+void fw_state_init(struct fw_priv *fw_priv);
 
 #ifdef CONFIG_FW_LOADER
 bool firmware_is_builtin(const struct firmware *fw);
index 406a907..ac3f34e 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/reboot.h>
 #include <linux/security.h>
+#include <linux/zstd.h>
 #include <linux/xz.h>
 
 #include <generated/utsrelease.h>
@@ -91,9 +92,9 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
  * guarding for corner cases a global lock should be OK */
 DEFINE_MUTEX(fw_lock);
 
-static struct firmware_cache fw_cache;
+struct firmware_cache fw_cache;
 
-static void fw_state_init(struct fw_priv *fw_priv)
+void fw_state_init(struct fw_priv *fw_priv)
 {
        struct fw_state *fw_st = &fw_priv->fw_st;
 
@@ -163,13 +164,9 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
 }
 
 /* Returns 1 for batching firmware requests with the same name */
-static int alloc_lookup_fw_priv(const char *fw_name,
-                               struct firmware_cache *fwc,
-                               struct fw_priv **fw_priv,
-                               void *dbuf,
-                               size_t size,
-                               size_t offset,
-                               u32 opt_flags)
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+                        struct fw_priv **fw_priv, void *dbuf, size_t size,
+                        size_t offset, u32 opt_flags)
 {
        struct fw_priv *tmp;
 
@@ -224,7 +221,7 @@ static void __free_fw_priv(struct kref *ref)
        kfree(fw_priv);
 }
 
-static void free_fw_priv(struct fw_priv *fw_priv)
+void free_fw_priv(struct fw_priv *fw_priv)
 {
        struct firmware_cache *fwc = fw_priv->fwc;
        spin_lock(&fwc->lock);
@@ -253,6 +250,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
        fw_priv->pages = NULL;
        fw_priv->page_array_size = 0;
        fw_priv->nr_pages = 0;
+       fw_priv->data = NULL;
+       fw_priv->size = 0;
 }
 
 int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
@@ -305,9 +304,73 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
 #endif
 
 /*
+ * ZSTD-compressed firmware support
+ */
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
+                             size_t in_size, const void *in_buffer)
+{
+       size_t len, out_size, workspace_size;
+       void *workspace, *out_buf;
+       zstd_dctx *ctx;
+       int err;
+
+       if (fw_priv->allocated_size) {
+               out_size = fw_priv->allocated_size;
+               out_buf = fw_priv->data;
+       } else {
+               zstd_frame_header params;
+
+               if (zstd_get_frame_header(&params, in_buffer, in_size) ||
+                   params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
+                       dev_dbg(dev, "%s: invalid zstd header\n", __func__);
+                       return -EINVAL;
+               }
+               out_size = params.frameContentSize;
+               out_buf = vzalloc(out_size);
+               if (!out_buf)
+                       return -ENOMEM;
+       }
+
+       workspace_size = zstd_dctx_workspace_bound();
+       workspace = kvzalloc(workspace_size, GFP_KERNEL);
+       if (!workspace) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       ctx = zstd_init_dctx(workspace, workspace_size);
+       if (!ctx) {
+               dev_dbg(dev, "%s: failed to initialize context\n", __func__);
+               err = -EINVAL;
+               goto error;
+       }
+
+       len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
+       if (zstd_is_error(len)) {
+               dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
+                       zstd_get_error_code(len));
+               err = -EINVAL;
+               goto error;
+       }
+
+       if (!fw_priv->allocated_size)
+               fw_priv->data = out_buf;
+       fw_priv->size = len;
+       err = 0;
+
+ error:
+       kvfree(workspace);
+       if (err && !fw_priv->allocated_size)
+               vfree(out_buf);
+       return err;
+}
+#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
+
+/*
  * XZ-compressed firmware support
  */
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
 /* show an error and return the standard error code */
 static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
 {
@@ -401,7 +464,7 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
        else
                return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
 }
-#endif /* CONFIG_FW_LOADER_COMPRESS */
+#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
 
 /* direct firmware loading support */
 static char fw_path_para[256];
@@ -771,7 +834,12 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
        if (!(opt_flags & FW_OPT_PARTIAL))
                nondirect = true;
 
-#ifdef CONFIG_FW_LOADER_COMPRESS
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+       if (ret == -ENOENT && nondirect)
+               ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+                                                fw_decompress_zstd);
+#endif
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
        if (ret == -ENOENT && nondirect)
                ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
                                                 fw_decompress_xz);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
new file mode 100644 (file)
index 0000000..5b0b85b
--- /dev/null
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "sysfs.h"
+
+/*
+ * sysfs support for firmware loader
+ */
+
+void __fw_load_abort(struct fw_priv *fw_priv)
+{
+       /*
+        * There is a small window in which user can write to 'loading'
+        * between loading done/aborted and disappearance of 'loading'
+        */
+       if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+               return;
+
+       fw_state_aborted(fw_priv);
+}
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+                           char *buf)
+{
+       return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
+}
+
+/**
+ * timeout_store() - set number of seconds to wait for firmware
+ * @class: device class pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for timeout value
+ * @count: number of bytes in @buf
+ *
+ *     Sets the number of seconds to wait for the firmware.  Once
+ *     this expires an error will be returned to the driver and no
+ *     firmware will be provided.
+ *
+ *     Note: zero means 'wait forever'.
+ **/
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+                            const char *buf, size_t count)
+{
+       int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+
+       if (tmp_loading_timeout < 0)
+               tmp_loading_timeout = 0;
+
+       __fw_fallback_set_timeout(tmp_loading_timeout);
+
+       return count;
+}
+static CLASS_ATTR_RW(timeout);
+
+static struct attribute *firmware_class_attrs[] = {
+       &class_attr_timeout.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(firmware_class);
+
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
+{
+       if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
+               return -ENOMEM;
+       if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
+               return -ENOMEM;
+       if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+       int err = 0;
+
+       mutex_lock(&fw_lock);
+       if (fw_sysfs->fw_priv)
+               err = do_firmware_uevent(fw_sysfs, env);
+       mutex_unlock(&fw_lock);
+       return err;
+}
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+static void fw_dev_release(struct device *dev)
+{
+       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+
+       if (fw_sysfs->fw_upload_priv) {
+               free_fw_priv(fw_sysfs->fw_priv);
+               kfree(fw_sysfs->fw_upload_priv);
+       }
+       kfree(fw_sysfs);
+}
+
+static struct class firmware_class = {
+       .name           = "firmware",
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+       .class_groups   = firmware_class_groups,
+       .dev_uevent     = firmware_uevent,
+#endif
+       .dev_release    = fw_dev_release,
+};
+
+int register_sysfs_loader(void)
+{
+       int ret = class_register(&firmware_class);
+
+       if (ret != 0)
+               return ret;
+       return register_firmware_config_sysctl();
+}
+
+void unregister_sysfs_loader(void)
+{
+       unregister_firmware_config_sysctl();
+       class_unregister(&firmware_class);
+}
+
+static ssize_t firmware_loading_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+       int loading = 0;
+
+       mutex_lock(&fw_lock);
+       if (fw_sysfs->fw_priv)
+               loading = fw_state_is_loading(fw_sysfs->fw_priv);
+       mutex_unlock(&fw_lock);
+
+       return sysfs_emit(buf, "%d\n", loading);
+}
+
+/**
+ * firmware_loading_store() - set value in the 'loading' control file
+ * @dev: device pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for loading control value
+ * @count: number of bytes in @buf
+ *
+ *     The relevant values are:
+ *
+ *      1: Start a load, discarding any previous partial load.
+ *      0: Conclude the load and hand the data to the driver code.
+ *     -1: Conclude the load with an error and discard any written data.
+ **/
+static ssize_t firmware_loading_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+       struct fw_priv *fw_priv;
+       ssize_t written = count;
+       int loading = simple_strtol(buf, NULL, 10);
+
+       mutex_lock(&fw_lock);
+       fw_priv = fw_sysfs->fw_priv;
+       if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+               goto out;
+
+       switch (loading) {
+       case 1:
+               /* discarding any previous partial load */
+               fw_free_paged_buf(fw_priv);
+               fw_state_start(fw_priv);
+               break;
+       case 0:
+               if (fw_state_is_loading(fw_priv)) {
+                       int rc;
+
+                       /*
+                        * Several loading requests may be pending on
+                        * one same firmware buf, so let all requests
+                        * see the mapped 'buf->data' once the loading
+                        * is completed.
+                        */
+                       rc = fw_map_paged_buf(fw_priv);
+                       if (rc)
+                               dev_err(dev, "%s: map pages failed\n",
+                                       __func__);
+                       else
+                               rc = security_kernel_post_load_data(fw_priv->data,
+                                                                   fw_priv->size,
+                                                                   LOADING_FIRMWARE,
+                                                                   "blob");
+
+                       /*
+                        * Same logic as fw_load_abort, only the DONE bit
+                        * is ignored and we set ABORT only on failure.
+                        */
+                       if (rc) {
+                               fw_state_aborted(fw_priv);
+                               written = rc;
+                       } else {
+                               fw_state_done(fw_priv);
+
+                               /*
+                                * If this is a user-initiated firmware upload
+                                * then start the upload in a worker thread now.
+                                */
+                               rc = fw_upload_start(fw_sysfs);
+                               if (rc)
+                                       written = rc;
+                       }
+                       break;
+               }
+               fallthrough;
+       default:
+               dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
+               fallthrough;
+       case -1:
+               fw_load_abort(fw_sysfs);
+               if (fw_sysfs->fw_upload_priv)
+                       fw_state_init(fw_sysfs->fw_priv);
+
+               break;
+       }
+out:
+       mutex_unlock(&fw_lock);
+       return written;
+}
+
+DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
+                            loff_t offset, size_t count, bool read)
+{
+       if (read)
+               memcpy(buffer, fw_priv->data + offset, count);
+       else
+               memcpy(fw_priv->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
+                       loff_t offset, size_t count, bool read)
+{
+       while (count) {
+               void *page_data;
+               int page_nr = offset >> PAGE_SHIFT;
+               int page_ofs = offset & (PAGE_SIZE - 1);
+               int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+               page_data = kmap(fw_priv->pages[page_nr]);
+
+               if (read)
+                       memcpy(buffer, page_data + page_ofs, page_cnt);
+               else
+                       memcpy(page_data + page_ofs, buffer, page_cnt);
+
+               kunmap(fw_priv->pages[page_nr]);
+               buffer += page_cnt;
+               offset += page_cnt;
+               count -= page_cnt;
+       }
+}
+
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+                                 struct bin_attribute *bin_attr,
+                                 char *buffer, loff_t offset, size_t count)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+       struct fw_priv *fw_priv;
+       ssize_t ret_count;
+
+       mutex_lock(&fw_lock);
+       fw_priv = fw_sysfs->fw_priv;
+       if (!fw_priv || fw_state_is_done(fw_priv)) {
+               ret_count = -ENODEV;
+               goto out;
+       }
+       if (offset > fw_priv->size) {
+               ret_count = 0;
+               goto out;
+       }
+       if (count > fw_priv->size - offset)
+               count = fw_priv->size - offset;
+
+       ret_count = count;
+
+       if (fw_priv->data)
+               firmware_rw_data(fw_priv, buffer, offset, count, true);
+       else
+               firmware_rw(fw_priv, buffer, offset, count, true);
+
+out:
+       mutex_unlock(&fw_lock);
+       return ret_count;
+}
+
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
+{
+       int err;
+
+       err = fw_grow_paged_buf(fw_sysfs->fw_priv,
+                               PAGE_ALIGN(min_size) >> PAGE_SHIFT);
+       if (err)
+               fw_load_abort(fw_sysfs);
+       return err;
+}
+
+/**
+ * firmware_data_write() - write method for firmware
+ * @filp: open sysfs file
+ * @kobj: kobject for the device
+ * @bin_attr: bin_attr structure
+ * @buffer: buffer being written
+ * @offset: buffer offset for write in total data store area
+ * @count: buffer size
+ *
+ *     Data written to the 'data' attribute will be later handed to
+ *     the driver as a firmware image.
+ **/
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+                                  struct bin_attribute *bin_attr,
+                                  char *buffer, loff_t offset, size_t count)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+       struct fw_priv *fw_priv;
+       ssize_t retval;
+
+       if (!capable(CAP_SYS_RAWIO))
+               return -EPERM;
+
+       mutex_lock(&fw_lock);
+       fw_priv = fw_sysfs->fw_priv;
+       if (!fw_priv || fw_state_is_done(fw_priv)) {
+               retval = -ENODEV;
+               goto out;
+       }
+
+       if (fw_priv->data) {
+               if (offset + count > fw_priv->allocated_size) {
+                       retval = -ENOMEM;
+                       goto out;
+               }
+               firmware_rw_data(fw_priv, buffer, offset, count, false);
+               retval = count;
+       } else {
+               retval = fw_realloc_pages(fw_sysfs, offset + count);
+               if (retval)
+                       goto out;
+
+               retval = count;
+               firmware_rw(fw_priv, buffer, offset, count, false);
+       }
+
+       fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
+out:
+       mutex_unlock(&fw_lock);
+       return retval;
+}
+
+static struct bin_attribute firmware_attr_data = {
+       .attr = { .name = "data", .mode = 0644 },
+       .size = 0,
+       .read = firmware_data_read,
+       .write = firmware_data_write,
+};
+
+static struct attribute *fw_dev_attrs[] = {
+       &dev_attr_loading.attr,
+#ifdef CONFIG_FW_UPLOAD
+       &dev_attr_cancel.attr,
+       &dev_attr_status.attr,
+       &dev_attr_error.attr,
+       &dev_attr_remaining_size.attr,
+#endif
+       NULL
+};
+
+static struct bin_attribute *fw_dev_bin_attrs[] = {
+       &firmware_attr_data,
+       NULL
+};
+
+static const struct attribute_group fw_dev_attr_group = {
+       .attrs = fw_dev_attrs,
+       .bin_attrs = fw_dev_bin_attrs,
+#ifdef CONFIG_FW_UPLOAD
+       .is_visible = fw_upload_is_visible,
+#endif
+};
+
+static const struct attribute_group *fw_dev_attr_groups[] = {
+       &fw_dev_attr_group,
+       NULL
+};
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+                  struct device *device, u32 opt_flags)
+{
+       struct fw_sysfs *fw_sysfs;
+       struct device *f_dev;
+
+       fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+       if (!fw_sysfs) {
+               fw_sysfs = ERR_PTR(-ENOMEM);
+               goto exit;
+       }
+
+       fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+       fw_sysfs->fw = firmware;
+       f_dev = &fw_sysfs->dev;
+
+       device_initialize(f_dev);
+       dev_set_name(f_dev, "%s", fw_name);
+       f_dev->parent = device;
+       f_dev->class = &firmware_class;
+       f_dev->groups = fw_dev_attr_groups;
+exit:
+       return fw_sysfs;
+}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
new file mode 100644 (file)
index 0000000..5d8ff16
--- /dev/null
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_SYSFS_H
+#define __FIRMWARE_SYSFS_H
+
+#include <linux/device.h>
+
+#include "firmware.h"
+
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
+extern struct firmware_fallback_config fw_fallback_config;
+extern struct device_attribute dev_attr_loading;
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * struct firmware_fallback_config - firmware fallback configuration settings
+ *
+ * Helps describe and fine tune the fallback mechanism.
+ *
+ * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
+ *     as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
+ *     Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+ *     functionality on a kernel where that config entry has been disabled.
+ * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
+ *     This emulates the behaviour as if we had set the kernel
+ *     config CONFIG_FW_LOADER_USER_HELPER=n.
+ * @old_timeout: for internal use
+ * @loading_timeout: the timeout to wait for the fallback mechanism before
+ *     giving up, in seconds.
+ */
+struct firmware_fallback_config {
+       unsigned int force_sysfs_fallback;
+       unsigned int ignore_sysfs_fallback;
+       int old_timeout;
+       int loading_timeout;
+};
+
+/* These getters are vetted to use int properly */
+static inline int __firmware_loading_timeout(void)
+{
+       return fw_fallback_config.loading_timeout;
+}
+
+/* These setters are vetted to use int properly */
+static inline void __fw_fallback_set_timeout(int timeout)
+{
+       fw_fallback_config.loading_timeout = timeout;
+}
+#endif
+
+#ifdef CONFIG_FW_LOADER_SYSFS
+int register_sysfs_loader(void);
+void unregister_sysfs_loader(void);
+#if defined(CONFIG_FW_LOADER_USER_HELPER) && defined(CONFIG_SYSCTL)
+int register_firmware_config_sysctl(void);
+void unregister_firmware_config_sysctl(void);
+#else
+static inline int register_firmware_config_sysctl(void)
+{
+       return 0;
+}
+
+static inline void unregister_firmware_config_sysctl(void) { }
+#endif /* CONFIG_FW_LOADER_USER_HELPER && CONFIG_SYSCTL */
+#else /* CONFIG_FW_LOADER_SYSFS */
+static inline int register_sysfs_loader(void)
+{
+       return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+#endif /* CONFIG_FW_LOADER_SYSFS */
+
+struct fw_sysfs {
+       bool nowait;
+       struct device dev;
+       struct fw_priv *fw_priv;
+       struct firmware *fw;
+       void *fw_upload_priv;
+};
+
+static inline struct fw_sysfs *to_fw_sysfs(struct device *dev)
+{
+       return container_of(dev, struct fw_sysfs, dev);
+}
+
+void __fw_load_abort(struct fw_priv *fw_priv);
+
+static inline void fw_load_abort(struct fw_sysfs *fw_sysfs)
+{
+       struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+       __fw_load_abort(fw_priv);
+}
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+                  struct device *device, u32 opt_flags);
+
+#ifdef CONFIG_FW_UPLOAD
+extern struct device_attribute dev_attr_status;
+extern struct device_attribute dev_attr_error;
+extern struct device_attribute dev_attr_cancel;
+extern struct device_attribute dev_attr_remaining_size;
+
+int fw_upload_start(struct fw_sysfs *fw_sysfs);
+umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+#else
+static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+       return 0;
+}
+#endif
+
+#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
new file mode 100644 (file)
index 0000000..87044d5
--- /dev/null
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "sysfs_upload.h"
+
+/*
+ * Support for user-space to initiate a firmware upload to a device.
+ */
+
+static const char * const fw_upload_prog_str[] = {
+       [FW_UPLOAD_PROG_IDLE]         = "idle",
+       [FW_UPLOAD_PROG_RECEIVING]    = "receiving",
+       [FW_UPLOAD_PROG_PREPARING]    = "preparing",
+       [FW_UPLOAD_PROG_TRANSFERRING] = "transferring",
+       [FW_UPLOAD_PROG_PROGRAMMING]  = "programming"
+};
+
+static const char * const fw_upload_err_str[] = {
+       [FW_UPLOAD_ERR_NONE]         = "none",
+       [FW_UPLOAD_ERR_HW_ERROR]     = "hw-error",
+       [FW_UPLOAD_ERR_TIMEOUT]      = "timeout",
+       [FW_UPLOAD_ERR_CANCELED]     = "user-abort",
+       [FW_UPLOAD_ERR_BUSY]         = "device-busy",
+       [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+       [FW_UPLOAD_ERR_RW_ERROR]     = "read-write-error",
+       [FW_UPLOAD_ERR_WEAROUT]      = "flash-wearout",
+};
+
+static const char *fw_upload_progress(struct device *dev,
+                                     enum fw_upload_prog prog)
+{
+       const char *status = "unknown-status";
+
+       if (prog < FW_UPLOAD_PROG_MAX)
+               status = fw_upload_prog_str[prog];
+       else
+               dev_err(dev, "Invalid status during secure update: %d\n", prog);
+
+       return status;
+}
+
+static const char *fw_upload_error(struct device *dev,
+                                  enum fw_upload_err err_code)
+{
+       const char *error = "unknown-error";
+
+       if (err_code < FW_UPLOAD_ERR_MAX)
+               error = fw_upload_err_str[err_code];
+       else
+               dev_err(dev, "Invalid error code during secure update: %d\n",
+                       err_code);
+
+       return error;
+}
+
+static ssize_t
+status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+       return sysfs_emit(buf, "%s\n", fw_upload_progress(dev, fwlp->progress));
+}
+DEVICE_ATTR_RO(status);
+
+static ssize_t
+error_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+       int ret;
+
+       mutex_lock(&fwlp->lock);
+
+       if (fwlp->progress != FW_UPLOAD_PROG_IDLE)
+               ret = -EBUSY;
+       else if (!fwlp->err_code)
+               ret = 0;
+       else
+               ret = sysfs_emit(buf, "%s:%s\n",
+                                fw_upload_progress(dev, fwlp->err_progress),
+                                fw_upload_error(dev, fwlp->err_code));
+
+       mutex_unlock(&fwlp->lock);
+
+       return ret;
+}
+DEVICE_ATTR_RO(error);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+       int ret = count;
+       bool cancel;
+
+       if (kstrtobool(buf, &cancel) || !cancel)
+               return -EINVAL;
+
+       mutex_lock(&fwlp->lock);
+       if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
+               ret = -ENODEV;
+
+       fwlp->ops->cancel(fwlp->fw_upload);
+       mutex_unlock(&fwlp->lock);
+
+       return ret;
+}
+DEVICE_ATTR_WO(cancel);
+
+static ssize_t remaining_size_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+       return sysfs_emit(buf, "%u\n", fwlp->remaining_size);
+}
+DEVICE_ATTR_RO(remaining_size);
+
+umode_t
+fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+       static struct fw_sysfs *fw_sysfs;
+
+       fw_sysfs = to_fw_sysfs(kobj_to_dev(kobj));
+
+       if (fw_sysfs->fw_upload_priv || attr == &dev_attr_loading.attr)
+               return attr->mode;
+
+       return 0;
+}
+
+static void fw_upload_update_progress(struct fw_upload_priv *fwlp,
+                                     enum fw_upload_prog new_progress)
+{
+       mutex_lock(&fwlp->lock);
+       fwlp->progress = new_progress;
+       mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_set_error(struct fw_upload_priv *fwlp,
+                               enum fw_upload_err err_code)
+{
+       mutex_lock(&fwlp->lock);
+       fwlp->err_progress = fwlp->progress;
+       fwlp->err_code = err_code;
+       mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_prog_complete(struct fw_upload_priv *fwlp)
+{
+       mutex_lock(&fwlp->lock);
+       fwlp->progress = FW_UPLOAD_PROG_IDLE;
+       mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_main(struct work_struct *work)
+{
+       struct fw_upload_priv *fwlp;
+       struct fw_sysfs *fw_sysfs;
+       u32 written = 0, offset = 0;
+       enum fw_upload_err ret;
+       struct device *fw_dev;
+       struct fw_upload *fwl;
+
+       fwlp = container_of(work, struct fw_upload_priv, work);
+       fwl = fwlp->fw_upload;
+       fw_sysfs = (struct fw_sysfs *)fwl->priv;
+       fw_dev = &fw_sysfs->dev;
+
+       fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PREPARING);
+       ret = fwlp->ops->prepare(fwl, fwlp->data, fwlp->remaining_size);
+       if (ret != FW_UPLOAD_ERR_NONE) {
+               fw_upload_set_error(fwlp, ret);
+               goto putdev_exit;
+       }
+
+       fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_TRANSFERRING);
+       while (fwlp->remaining_size) {
+               ret = fwlp->ops->write(fwl, fwlp->data, offset,
+                                       fwlp->remaining_size, &written);
+               if (ret != FW_UPLOAD_ERR_NONE || !written) {
+                       if (ret == FW_UPLOAD_ERR_NONE) {
+                               dev_warn(fw_dev, "write-op wrote zero data\n");
+                               ret = FW_UPLOAD_ERR_RW_ERROR;
+                       }
+                       fw_upload_set_error(fwlp, ret);
+                       goto done;
+               }
+
+               fwlp->remaining_size -= written;
+               offset += written;
+       }
+
+       fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PROGRAMMING);
+       ret = fwlp->ops->poll_complete(fwl);
+       if (ret != FW_UPLOAD_ERR_NONE)
+               fw_upload_set_error(fwlp, ret);
+
+done:
+       if (fwlp->ops->cleanup)
+               fwlp->ops->cleanup(fwl);
+
+putdev_exit:
+       put_device(fw_dev->parent);
+
+       /*
+        * Note: fwlp->remaining_size is left unmodified here to provide
+        * additional information on errors. It will be reinitialized when
+        * the next firmeware upload begins.
+        */
+       mutex_lock(&fw_lock);
+       fw_free_paged_buf(fw_sysfs->fw_priv);
+       fw_state_init(fw_sysfs->fw_priv);
+       mutex_unlock(&fw_lock);
+       fwlp->data = NULL;
+       fw_upload_prog_complete(fwlp);
+}
+
+/*
+ * Start a worker thread to upload data to the parent driver.
+ * Must be called with fw_lock held.
+ */
+int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+       struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+       struct device *fw_dev = &fw_sysfs->dev;
+       struct fw_upload_priv *fwlp;
+
+       if (!fw_sysfs->fw_upload_priv)
+               return 0;
+
+       if (!fw_priv->size) {
+               fw_free_paged_buf(fw_priv);
+               fw_state_init(fw_sysfs->fw_priv);
+               return 0;
+       }
+
+       fwlp = fw_sysfs->fw_upload_priv;
+       mutex_lock(&fwlp->lock);
+
+       /* Do not interfere with an on-going fw_upload */
+       if (fwlp->progress != FW_UPLOAD_PROG_IDLE) {
+               mutex_unlock(&fwlp->lock);
+               return -EBUSY;
+       }
+
+       get_device(fw_dev->parent); /* released in fw_upload_main */
+
+       fwlp->progress = FW_UPLOAD_PROG_RECEIVING;
+       fwlp->err_code = 0;
+       fwlp->remaining_size = fw_priv->size;
+       fwlp->data = fw_priv->data;
+
+       pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+                __func__, fw_priv->fw_name,
+                fw_priv, fw_priv->data,
+                (unsigned int)fw_priv->size);
+
+       queue_work(system_long_wq, &fwlp->work);
+       mutex_unlock(&fwlp->lock);
+
+       return 0;
+}
+
+/**
+ * firmware_upload_register() - register for the firmware upload sysfs API
+ * @module: kernel module of this device
+ * @parent: parent device instantiating firmware upload
+ * @name: firmware name to be associated with this device
+ * @ops: pointer to structure of firmware upload ops
+ * @dd_handle: pointer to parent driver private data
+ *
+ *     @name must be unique among all users of firmware upload. The firmware
+ *     sysfs files for this device will be found at /sys/class/firmware/@name.
+ *
+ *     Return: struct fw_upload pointer or ERR_PTR()
+ *
+ **/
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+                        const char *name, const struct fw_upload_ops *ops,
+                        void *dd_handle)
+{
+       u32 opt_flags = FW_OPT_NOCACHE;
+       struct fw_upload *fw_upload;
+       struct fw_upload_priv *fw_upload_priv;
+       struct fw_sysfs *fw_sysfs;
+       struct fw_priv *fw_priv;
+       struct device *fw_dev;
+       int ret;
+
+       if (!name || name[0] == '\0')
+               return ERR_PTR(-EINVAL);
+
+       if (!ops || !ops->cancel || !ops->prepare ||
+           !ops->write || !ops->poll_complete) {
+               dev_err(parent, "Attempt to register without all required ops\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (!try_module_get(module))
+               return ERR_PTR(-EFAULT);
+
+       fw_upload = kzalloc(sizeof(*fw_upload), GFP_KERNEL);
+       if (!fw_upload) {
+               ret = -ENOMEM;
+               goto exit_module_put;
+       }
+
+       fw_upload_priv = kzalloc(sizeof(*fw_upload_priv), GFP_KERNEL);
+       if (!fw_upload_priv) {
+               ret = -ENOMEM;
+               goto free_fw_upload;
+       }
+
+       fw_upload_priv->fw_upload = fw_upload;
+       fw_upload_priv->ops = ops;
+       mutex_init(&fw_upload_priv->lock);
+       fw_upload_priv->module = module;
+       fw_upload_priv->name = name;
+       fw_upload_priv->err_code = 0;
+       fw_upload_priv->progress = FW_UPLOAD_PROG_IDLE;
+       INIT_WORK(&fw_upload_priv->work, fw_upload_main);
+       fw_upload->dd_handle = dd_handle;
+
+       fw_sysfs = fw_create_instance(NULL, name, parent, opt_flags);
+       if (IS_ERR(fw_sysfs)) {
+               ret = PTR_ERR(fw_sysfs);
+               goto free_fw_upload_priv;
+       }
+       fw_upload->priv = fw_sysfs;
+       fw_sysfs->fw_upload_priv = fw_upload_priv;
+       fw_dev = &fw_sysfs->dev;
+
+       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv,  NULL, 0, 0,
+                                  FW_OPT_NOCACHE);
+       if (ret != 0) {
+               if (ret > 0)
+                       ret = -EINVAL;
+               goto free_fw_sysfs;
+       }
+       fw_priv->is_paged_buf = true;
+       fw_sysfs->fw_priv = fw_priv;
+
+       ret = device_add(fw_dev);
+       if (ret) {
+               dev_err(fw_dev, "%s: device_register failed\n", __func__);
+               put_device(fw_dev);
+               goto exit_module_put;
+       }
+
+       return fw_upload;
+
+free_fw_sysfs:
+       kfree(fw_sysfs);
+
+free_fw_upload_priv:
+       kfree(fw_upload_priv);
+
+free_fw_upload:
+       kfree(fw_upload);
+
+exit_module_put:
+       module_put(module);
+
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_register);
+
+/**
+ * firmware_upload_unregister() - Unregister firmware upload interface
+ * @fw_upload: pointer to struct fw_upload
+ **/
+void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+       struct fw_sysfs *fw_sysfs = fw_upload->priv;
+       struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+       mutex_lock(&fw_upload_priv->lock);
+       if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
+               mutex_unlock(&fw_upload_priv->lock);
+               goto unregister;
+       }
+
+       fw_upload_priv->ops->cancel(fw_upload);
+       mutex_unlock(&fw_upload_priv->lock);
+
+       /* Ensure lower-level device-driver is finished */
+       flush_work(&fw_upload_priv->work);
+
+unregister:
+       device_unregister(&fw_sysfs->dev);
+       module_put(fw_upload_priv->module);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/firmware_loader/sysfs_upload.h b/drivers/base/firmware_loader/sysfs_upload.h
new file mode 100644 (file)
index 0000000..31931ff
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SYSFS_UPLOAD_H
+#define __SYSFS_UPLOAD_H
+
+#include <linux/device.h>
+
+#include "sysfs.h"
+
+/**
+ * enum fw_upload_prog - firmware upload progress codes
+ * @FW_UPLOAD_PROG_IDLE: there is no firmware upload in progress
+ * @FW_UPLOAD_PROG_RECEIVING: worker thread is receiving firmware data
+ * @FW_UPLOAD_PROG_PREPARING: target device is preparing for firmware upload
+ * @FW_UPLOAD_PROG_TRANSFERRING: data is being copied to the device
+ * @FW_UPLOAD_PROG_PROGRAMMING: device is performing the firmware update
+ * @FW_UPLOAD_PROG_MAX: Maximum progress code marker
+ */
+enum fw_upload_prog {
+       FW_UPLOAD_PROG_IDLE,
+       FW_UPLOAD_PROG_RECEIVING,
+       FW_UPLOAD_PROG_PREPARING,
+       FW_UPLOAD_PROG_TRANSFERRING,
+       FW_UPLOAD_PROG_PROGRAMMING,
+       FW_UPLOAD_PROG_MAX
+};
+
+struct fw_upload_priv {
+       struct fw_upload *fw_upload;
+       struct module *module;
+       const char *name;
+       const struct fw_upload_ops *ops;
+       struct mutex lock;                /* protect data structure contents */
+       struct work_struct work;
+       const u8 *data;                   /* pointer to update data */
+       u32 remaining_size;               /* size remaining to transfer */
+       enum fw_upload_prog progress;
+       enum fw_upload_prog err_progress; /* progress at time of failure */
+       enum fw_upload_err err_code;      /* security manager error code */
+};
+
+#endif /* __SYSFS_UPLOAD_H */
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
new file mode 100644 (file)
index 0000000..87af641
--- /dev/null
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/sysfs.h>
+
+#include "physical_location.h"
+
+bool dev_add_physical_location(struct device *dev)
+{
+       struct acpi_pld_info *pld;
+       acpi_status status;
+
+       if (!has_acpi_companion(dev))
+               return false;
+
+       status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
+       if (ACPI_FAILURE(status))
+               return false;
+
+       dev->physical_location =
+               kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
+       if (!dev->physical_location)
+               return false;
+       dev->physical_location->panel = pld->panel;
+       dev->physical_location->vertical_position = pld->vertical_position;
+       dev->physical_location->horizontal_position = pld->horizontal_position;
+       dev->physical_location->dock = pld->dock;
+       dev->physical_location->lid = pld->lid;
+
+       ACPI_FREE(pld);
+       return true;
+}
+
+static ssize_t panel_show(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       const char *panel;
+
+       switch (dev->physical_location->panel) {
+       case DEVICE_PANEL_TOP:
+               panel = "top";
+               break;
+       case DEVICE_PANEL_BOTTOM:
+               panel = "bottom";
+               break;
+       case DEVICE_PANEL_LEFT:
+               panel = "left";
+               break;
+       case DEVICE_PANEL_RIGHT:
+               panel = "right";
+               break;
+       case DEVICE_PANEL_FRONT:
+               panel = "front";
+               break;
+       case DEVICE_PANEL_BACK:
+               panel = "back";
+               break;
+       default:
+               panel = "unknown";
+       }
+       return sysfs_emit(buf, "%s\n", panel);
+}
+static DEVICE_ATTR_RO(panel);
+
+static ssize_t vertical_position_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       const char *vertical_position;
+
+       switch (dev->physical_location->vertical_position) {
+       case DEVICE_VERT_POS_UPPER:
+               vertical_position = "upper";
+               break;
+       case DEVICE_VERT_POS_CENTER:
+               vertical_position = "center";
+               break;
+       case DEVICE_VERT_POS_LOWER:
+               vertical_position = "lower";
+               break;
+       default:
+               vertical_position = "unknown";
+       }
+       return sysfs_emit(buf, "%s\n", vertical_position);
+}
+static DEVICE_ATTR_RO(vertical_position);
+
+static ssize_t horizontal_position_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       const char *horizontal_position;
+
+       switch (dev->physical_location->horizontal_position) {
+       case DEVICE_HORI_POS_LEFT:
+               horizontal_position = "left";
+               break;
+       case DEVICE_HORI_POS_CENTER:
+               horizontal_position = "center";
+               break;
+       case DEVICE_HORI_POS_RIGHT:
+               horizontal_position = "right";
+               break;
+       default:
+               horizontal_position = "unknown";
+       }
+       return sysfs_emit(buf, "%s\n", horizontal_position);
+}
+static DEVICE_ATTR_RO(horizontal_position);
+
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       return sysfs_emit(buf, "%s\n",
+               dev->physical_location->dock ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(dock);
+
+static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       return sysfs_emit(buf, "%s\n",
+               dev->physical_location->lid ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(lid);
+
+static struct attribute *dev_attr_physical_location[] = {
+       &dev_attr_panel.attr,
+       &dev_attr_vertical_position.attr,
+       &dev_attr_horizontal_position.attr,
+       &dev_attr_dock.attr,
+       &dev_attr_lid.attr,
+       NULL,
+};
+
+const struct attribute_group dev_attr_physical_location_group = {
+       .name = "physical_location",
+       .attrs = dev_attr_physical_location,
+};
+
diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h
new file mode 100644 (file)
index 0000000..82cde9f
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/device.h>
+
+#ifdef CONFIG_ACPI
+extern bool dev_add_physical_location(struct device *dev);
+extern const struct attribute_group dev_attr_physical_location_group;
+#else
+static inline bool dev_add_physical_location(struct device *dev) { return false; };
+static const struct attribute_group dev_attr_physical_location_group = {};
+#endif
index 8cc272f..51bb228 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/property.h>
 #include <linux/kmemleak.h>
 #include <linux/types.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -231,7 +233,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
 out_not_found:
        ret = -ENXIO;
 out:
-       WARN(ret == 0, "0 is an invalid IRQ number\n");
+       if (WARN(!ret, "0 is an invalid IRQ number\n"))
+               return -EINVAL;
        return ret;
 }
 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
@@ -446,7 +449,8 @@ static int __platform_get_irq_byname(struct platform_device *dev,
 
        r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
        if (r) {
-               WARN(r->start == 0, "0 is an invalid IRQ number\n");
+               if (WARN(!r->start, "0 is an invalid IRQ number\n"))
+                       return -EINVAL;
                return r->start;
        }
 
@@ -1275,31 +1279,11 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct platform_device *pdev = to_platform_device(dev);
-       char *driver_override, *old, *cp;
-
-       /* We need to keep extra room for a newline */
-       if (count >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, count, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
-
-       cp = strchr(driver_override, '\n');
-       if (cp)
-               *cp = '\0';
-
-       device_lock(dev);
-       old = pdev->driver_override;
-       if (strlen(driver_override)) {
-               pdev->driver_override = driver_override;
-       } else {
-               kfree(driver_override);
-               pdev->driver_override = NULL;
-       }
-       device_unlock(dev);
+       int ret;
 
-       kfree(old);
+       ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
@@ -1454,9 +1438,9 @@ static void platform_shutdown(struct device *_dev)
                drv->shutdown(dev);
 }
 
-
-int platform_dma_configure(struct device *dev)
+static int platform_dma_configure(struct device *dev)
 {
+       struct platform_driver *drv = to_platform_driver(dev->driver);
        enum dev_dma_attr attr;
        int ret = 0;
 
@@ -1467,9 +1451,23 @@ int platform_dma_configure(struct device *dev)
                ret = acpi_dma_configure(dev, attr);
        }
 
+       if (!ret && !drv->driver_managed_dma) {
+               ret = iommu_device_use_default_domain(dev);
+               if (ret)
+                       arch_teardown_dma_ops(dev);
+       }
+
        return ret;
 }
 
+static void platform_dma_cleanup(struct device *dev)
+{
+       struct platform_driver *drv = to_platform_driver(dev->driver);
+
+       if (!drv->driver_managed_dma)
+               iommu_device_unuse_default_domain(dev);
+}
+
 static const struct dev_pm_ops platform_dev_pm_ops = {
        SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
        USE_PLATFORM_PM_SLEEP_OPS
@@ -1484,6 +1482,7 @@ struct bus_type platform_bus_type = {
        .remove         = platform_remove,
        .shutdown       = platform_shutdown,
        .dma_configure  = platform_dma_configure,
+       .dma_cleanup    = platform_dma_cleanup,
        .pm             = &platform_dev_pm_ops,
 };
 EXPORT_SYMBOL_GPL(platform_bus_type);
index 3adcac2..ed6f449 100644 (file)
@@ -1206,15 +1206,23 @@ const void *device_get_match_data(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(device_get_match_data);
 
-static void *
-fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
-                         void *data, devcon_match_fn_t match)
+static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode,
+                                               const char *con_id, void *data,
+                                               devcon_match_fn_t match,
+                                               void **matches,
+                                               unsigned int matches_len)
 {
        struct fwnode_handle *node;
        struct fwnode_handle *ep;
+       unsigned int count = 0;
        void *ret;
 
        fwnode_graph_for_each_endpoint(fwnode, ep) {
+               if (matches && count >= matches_len) {
+                       fwnode_handle_put(ep);
+                       break;
+               }
+
                node = fwnode_graph_get_remote_port_parent(ep);
                if (!fwnode_device_is_available(node)) {
                        fwnode_handle_put(node);
@@ -1224,33 +1232,43 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
                ret = match(node, con_id, data);
                fwnode_handle_put(node);
                if (ret) {
-                       fwnode_handle_put(ep);
-                       return ret;
+                       if (matches)
+                               matches[count] = ret;
+                       count++;
                }
        }
-       return NULL;
+       return count;
 }
 
-static void *
-fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
-                   void *data, devcon_match_fn_t match)
+static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode,
+                                         const char *con_id, void *data,
+                                         devcon_match_fn_t match,
+                                         void **matches,
+                                         unsigned int matches_len)
 {
        struct fwnode_handle *node;
+       unsigned int count = 0;
+       unsigned int i;
        void *ret;
-       int i;
 
        for (i = 0; ; i++) {
+               if (matches && count >= matches_len)
+                       break;
+
                node = fwnode_find_reference(fwnode, con_id, i);
                if (IS_ERR(node))
                        break;
 
                ret = match(node, NULL, data);
                fwnode_handle_put(node);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       if (matches)
+                               matches[count] = ret;
+                       count++;
+               }
        }
 
-       return NULL;
+       return count;
 }
 
 /**
@@ -1268,15 +1286,61 @@ void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
                                   const char *con_id, void *data,
                                   devcon_match_fn_t match)
 {
+       unsigned int count;
        void *ret;
 
        if (!fwnode || !match)
                return NULL;
 
-       ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
-       if (ret)
+       count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+       if (count)
                return ret;
 
-       return fwnode_devcon_match(fwnode, con_id, data, match);
+       count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+       return count ? ret : NULL;
 }
 EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+
+/**
+ * fwnode_connection_find_matches - Find connections from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ * @matches: (Optional) array of pointers to fill with matches
+ * @matches_len: Length of @matches
+ *
+ * Find up to @matches_len connections with unique identifier @con_id between
+ * @fwnode and other device nodes. @match will be used to convert the
+ * connection description to data the caller is expecting to be returned
+ * through the @matches array.
+ * If @matches is NULL @matches_len is ignored and the total number of resolved
+ * matches is returned.
+ *
+ * Return: Number of matches resolved, or negative errno.
+ */
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+                                  const char *con_id, void *data,
+                                  devcon_match_fn_t match,
+                                  void **matches, unsigned int matches_len)
+{
+       unsigned int count_graph;
+       unsigned int count_ref;
+
+       if (!fwnode || !match)
+               return -EINVAL;
+
+       count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match,
+                                                 matches, matches_len);
+
+       if (matches) {
+               matches += count_graph;
+               matches_len -= count_graph;
+       }
+
+       count_ref = fwnode_devcon_matches(fwnode, con_id, data, match,
+                                         matches, matches_len);
+
+       return count_graph + count_ref;
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
index f1dda4e..084f9b8 100644 (file)
@@ -1102,7 +1102,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
        if (partscan)
-               lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
+               clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
 
        loop_global_unlock(lo, is_loop);
        if (partscan)
@@ -1198,7 +1198,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
         */
        lo->lo_flags = 0;
        if (!part_shift)
-               lo->lo_disk->flags |= GENHD_FL_NO_PART;
+               set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
        mutex_lock(&lo->lo_mutex);
        lo->lo_state = Lo_unbound;
        mutex_unlock(&lo->lo_mutex);
@@ -1308,7 +1308,7 @@ out_unfreeze:
 
        if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
             !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
-               lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
+               clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
                partscan = true;
        }
 out_unlock:
@@ -2011,7 +2011,7 @@ static int loop_add(int i)
         * userspace tools. Parameters like this in general should be avoided.
         */
        if (!part_shift)
-               disk->flags |= GENHD_FL_NO_PART;
+               set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
        mutex_init(&lo->lo_mutex);
        lo->lo_number           = i;
        spin_lock_init(&lo->lo_lock);
index ac8b045..07f3c13 100644 (file)
@@ -403,13 +403,14 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        if (!mutex_trylock(&cmd->lock))
                return BLK_EH_RESET_TIMER;
 
-       if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+       if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
                mutex_unlock(&cmd->lock);
                return BLK_EH_DONE;
        }
 
        if (!refcount_inc_not_zero(&nbd->config_refs)) {
                cmd->status = BLK_STS_TIMEOUT;
+               __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
                mutex_unlock(&cmd->lock);
                goto done;
        }
@@ -478,6 +479,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
        set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
        cmd->status = BLK_STS_IOERR;
+       __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
        mutex_unlock(&cmd->lock);
        sock_shutdown(nbd);
        nbd_config_put(nbd);
@@ -745,7 +747,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
        cmd = blk_mq_rq_to_pdu(req);
 
        mutex_lock(&cmd->lock);
-       if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+       if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
                dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
                        tag, cmd->status, cmd->flags);
                ret = -ENOENT;
@@ -854,8 +856,16 @@ static void recv_work(struct work_struct *work)
                }
 
                rq = blk_mq_rq_from_pdu(cmd);
-               if (likely(!blk_should_fake_timeout(rq->q)))
-                       blk_mq_complete_request(rq);
+               if (likely(!blk_should_fake_timeout(rq->q))) {
+                       bool complete;
+
+                       mutex_lock(&cmd->lock);
+                       complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
+                                                       &cmd->flags);
+                       mutex_unlock(&cmd->lock);
+                       if (complete)
+                               blk_mq_complete_request(rq);
+               }
                percpu_ref_put(&q->q_usage_counter);
        }
 
@@ -1419,7 +1429,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
                                 struct block_device *bdev)
 {
-       sock_shutdown(nbd);
+       nbd_clear_sock(nbd);
        __invalidate_device(bdev, true);
        nbd_bdev_reset(nbd);
        if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
@@ -1518,15 +1528,20 @@ static struct nbd_config *nbd_alloc_config(void)
 {
        struct nbd_config *config;
 
+       if (!try_module_get(THIS_MODULE))
+               return ERR_PTR(-ENODEV);
+
        config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
-       if (!config)
-               return NULL;
+       if (!config) {
+               module_put(THIS_MODULE);
+               return ERR_PTR(-ENOMEM);
+       }
+
        atomic_set(&config->recv_threads, 0);
        init_waitqueue_head(&config->recv_wq);
        init_waitqueue_head(&config->conn_wait);
        config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
        atomic_set(&config->live_connections, 0);
-       try_module_get(THIS_MODULE);
        return config;
 }
 
@@ -1553,12 +1568,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
                        mutex_unlock(&nbd->config_lock);
                        goto out;
                }
-               config = nbd->config = nbd_alloc_config();
-               if (!config) {
-                       ret = -ENOMEM;
+               config = nbd_alloc_config();
+               if (IS_ERR(config)) {
+                       ret = PTR_ERR(config);
                        mutex_unlock(&nbd->config_lock);
                        goto out;
                }
+               nbd->config = config;
                refcount_set(&nbd->config_refs, 1);
                refcount_inc(&nbd->refs);
                mutex_unlock(&nbd->config_lock);
@@ -1798,17 +1814,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
        refcount_set(&nbd->refs, 0);
        INIT_LIST_HEAD(&nbd->list);
        disk->major = NBD_MAJOR;
-
-       /* Too big first_minor can cause duplicate creation of
-        * sysfs files/links, since index << part_shift might overflow, or
-        * MKDEV() expect that the max bits of first_minor is 20.
-        */
        disk->first_minor = index << part_shift;
-       if (disk->first_minor < index || disk->first_minor > MINORMASK) {
-               err = -EINVAL;
-               goto out_free_work;
-       }
-
        disk->minors = 1 << part_shift;
        disk->fops = &nbd_fops;
        disk->private_data = nbd;
@@ -1913,14 +1919,25 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
        if (!netlink_capable(skb, CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (info->attrs[NBD_ATTR_INDEX])
+       if (info->attrs[NBD_ATTR_INDEX]) {
                index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
+
+               /*
+                * Too big first_minor can cause duplicate creation of
+                * sysfs files/links, since index << part_shift might overflow, or
+                * MKDEV() expect that the max bits of first_minor is 20.
+                */
+               if (index < 0 || index > MINORMASK >> part_shift) {
+                       pr_err("illegal input index %d\n", index);
+                       return -EINVAL;
+               }
+       }
        if (!info->attrs[NBD_ATTR_SOCKETS]) {
-               printk(KERN_ERR "nbd: must specify at least one socket\n");
+               pr_err("must specify at least one socket\n");
                return -EINVAL;
        }
        if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
-               printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
+               pr_err("must specify a size in bytes for the device\n");
                return -EINVAL;
        }
 again:
@@ -1956,7 +1973,7 @@ again:
                nbd_put(nbd);
                if (index == -1)
                        goto again;
-               printk(KERN_ERR "nbd: nbd%d already in use\n", index);
+               pr_err("nbd%d already in use\n", index);
                return -EBUSY;
        }
        if (WARN_ON(nbd->config)) {
@@ -1964,13 +1981,14 @@ again:
                nbd_put(nbd);
                return -EINVAL;
        }
-       config = nbd->config = nbd_alloc_config();
-       if (!nbd->config) {
+       config = nbd_alloc_config();
+       if (IS_ERR(config)) {
                mutex_unlock(&nbd->config_lock);
                nbd_put(nbd);
-               printk(KERN_ERR "nbd: couldn't allocate config\n");
-               return -ENOMEM;
+               pr_err("couldn't allocate config\n");
+               return PTR_ERR(config);
        }
+       nbd->config = config;
        refcount_set(&nbd->config_refs, 1);
        set_bit(NBD_RT_BOUND, &config->runtime_flags);
 
@@ -2023,7 +2041,7 @@ again:
                        struct nlattr *socks[NBD_SOCK_MAX+1];
 
                        if (nla_type(attr) != NBD_SOCK_ITEM) {
-                               printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
+                               pr_err("socks must be embedded in a SOCK_ITEM attr\n");
                                ret = -EINVAL;
                                goto out;
                        }
@@ -2032,7 +2050,7 @@ again:
                                                          nbd_sock_policy,
                                                          info->extack);
                        if (ret != 0) {
-                               printk(KERN_ERR "nbd: error processing sock list\n");
+                               pr_err("error processing sock list\n");
                                ret = -EINVAL;
                                goto out;
                        }
@@ -2104,7 +2122,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
                return -EPERM;
 
        if (!info->attrs[NBD_ATTR_INDEX]) {
-               printk(KERN_ERR "nbd: must specify an index to disconnect\n");
+               pr_err("must specify an index to disconnect\n");
                return -EINVAL;
        }
        index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
@@ -2112,14 +2130,12 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
        nbd = idr_find(&nbd_index_idr, index);
        if (!nbd) {
                mutex_unlock(&nbd_index_mutex);
-               printk(KERN_ERR "nbd: couldn't find device at index %d\n",
-                      index);
+               pr_err("couldn't find device at index %d\n", index);
                return -EINVAL;
        }
        if (!refcount_inc_not_zero(&nbd->refs)) {
                mutex_unlock(&nbd_index_mutex);
-               printk(KERN_ERR "nbd: device at index %d is going down\n",
-                      index);
+               pr_err("device at index %d is going down\n", index);
                return -EINVAL;
        }
        mutex_unlock(&nbd_index_mutex);
@@ -2144,7 +2160,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
                return -EPERM;
 
        if (!info->attrs[NBD_ATTR_INDEX]) {
-               printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
+               pr_err("must specify a device to reconfigure\n");
                return -EINVAL;
        }
        index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
@@ -2152,8 +2168,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
        nbd = idr_find(&nbd_index_idr, index);
        if (!nbd) {
                mutex_unlock(&nbd_index_mutex);
-               printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
-                      index);
+               pr_err("couldn't find a device at index %d\n", index);
                return -EINVAL;
        }
        if (nbd->backend) {
@@ -2174,8 +2189,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
        }
        if (!refcount_inc_not_zero(&nbd->refs)) {
                mutex_unlock(&nbd_index_mutex);
-               printk(KERN_ERR "nbd: device at index %d is going down\n",
-                      index);
+               pr_err("device at index %d is going down\n", index);
                return -EINVAL;
        }
        mutex_unlock(&nbd_index_mutex);
@@ -2239,7 +2253,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
                        struct nlattr *socks[NBD_SOCK_MAX+1];
 
                        if (nla_type(attr) != NBD_SOCK_ITEM) {
-                               printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
+                               pr_err("socks must be embedded in a SOCK_ITEM attr\n");
                                ret = -EINVAL;
                                goto out;
                        }
@@ -2248,7 +2262,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
                                                          nbd_sock_policy,
                                                          info->extack);
                        if (ret != 0) {
-                               printk(KERN_ERR "nbd: error processing sock list\n");
+                               pr_err("error processing sock list\n");
                                ret = -EINVAL;
                                goto out;
                        }
@@ -2465,7 +2479,7 @@ static int __init nbd_init(void)
        BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
 
        if (max_part < 0) {
-               printk(KERN_ERR "nbd: max_part must be >= 0\n");
+               pr_err("max_part must be >= 0\n");
                return -EINVAL;
        }
 
@@ -2528,6 +2542,12 @@ static void __exit nbd_cleanup(void)
        struct nbd_device *nbd;
        LIST_HEAD(del_list);
 
+       /*
+        * Unregister netlink interface prior to waiting
+        * for the completion of netlink commands.
+        */
+       genl_unregister_family(&nbd_genl_family);
+
        nbd_dbg_close();
 
        mutex_lock(&nbd_index_mutex);
@@ -2537,8 +2557,11 @@ static void __exit nbd_cleanup(void)
        while (!list_empty(&del_list)) {
                nbd = list_first_entry(&del_list, struct nbd_device, list);
                list_del_init(&nbd->list);
+               if (refcount_read(&nbd->config_refs))
+                       pr_err("possibly leaking nbd_config (ref %d)\n",
+                                       refcount_read(&nbd->config_refs));
                if (refcount_read(&nbd->refs) != 1)
-                       printk(KERN_ERR "nbd: possibly leaking a device\n");
+                       pr_err("possibly leaking a device\n");
                nbd_put(nbd);
        }
 
@@ -2546,7 +2569,6 @@ static void __exit nbd_cleanup(void)
        destroy_workqueue(nbd_del_wq);
 
        idr_destroy(&nbd_index_idr);
-       genl_unregister_family(&nbd_genl_family);
        unregister_blkdev(NBD_MAJOR, "nbd");
 }
 
index 539cfea..6b67088 100644 (file)
@@ -77,12 +77,6 @@ enum {
        NULL_IRQ_TIMER          = 2,
 };
 
-enum {
-       NULL_Q_BIO              = 0,
-       NULL_Q_RQ               = 1,
-       NULL_Q_MQ               = 2,
-};
-
 static bool g_virt_boundary = false;
 module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
 MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
index 4525a65..8359b43 100644 (file)
@@ -60,6 +60,13 @@ struct nullb_zone {
        unsigned int capacity;
 };
 
+/* Queue modes */
+enum {
+       NULL_Q_BIO      = 0,
+       NULL_Q_RQ       = 1,
+       NULL_Q_MQ       = 2,
+};
+
 struct nullb_device {
        struct nullb *nullb;
        struct config_item item;
index ed158ea..2fdd7b2 100644 (file)
@@ -398,10 +398,10 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
         */
        if (append) {
                sector = zone->wp;
-               if (cmd->bio)
-                       cmd->bio->bi_iter.bi_sector = sector;
-               else
+               if (dev->queue_mode == NULL_Q_MQ)
                        cmd->rq->__sector = sector;
+               else
+                       cmd->bio->bi_iter.bi_sector = sector;
        } else if (sector != zone->wp) {
                ret = BLK_STS_IOERR;
                goto unlock;
index 2b21f71..ef9bc62 100644 (file)
@@ -756,24 +756,23 @@ static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
  */
 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
 {
-       struct rbd_client *client_node;
-       bool found = false;
+       struct rbd_client *rbdc = NULL, *iter;
 
        if (ceph_opts->flags & CEPH_OPT_NOSHARE)
                return NULL;
 
        spin_lock(&rbd_client_list_lock);
-       list_for_each_entry(client_node, &rbd_client_list, node) {
-               if (!ceph_compare_options(ceph_opts, client_node->client)) {
-                       __rbd_get_client(client_node);
+       list_for_each_entry(iter, &rbd_client_list, node) {
+               if (!ceph_compare_options(ceph_opts, iter->client)) {
+                       __rbd_get_client(iter);
 
-                       found = true;
+                       rbdc = iter;
                        break;
                }
        }
        spin_unlock(&rbd_client_list_lock);
 
-       return found ? client_node : NULL;
+       return rbdc;
 }
 
 /*
index b361583..63b4f64 100644 (file)
@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
        spin_unlock_irq(&host->lock);
 
        DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
-       blk_execute_rq_nowait(rq, true, NULL);
+       blk_execute_rq_nowait(rq, true);
 
        return 0;
 
@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
        crq->msg_bucket = (u32) rc;
 
        DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
-       blk_execute_rq_nowait(rq, true, NULL);
+       blk_execute_rq_nowait(rq, true);
 
        return 0;
 }
index d624cc8..6fc7850 100644 (file)
@@ -37,6 +37,10 @@ MODULE_PARM_DESC(num_request_queues,
                 "0 for no limit. "
                 "Values > nr_cpu_ids truncated to nr_cpu_ids.");
 
+static unsigned int poll_queues;
+module_param(poll_queues, uint, 0644);
+MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
+
 static int major;
 static DEFINE_IDA(vd_index_ida);
 
@@ -74,6 +78,7 @@ struct virtio_blk {
 
        /* num of vqs */
        int num_vqs;
+       int io_queues[HCTX_MAX_TYPES];
        struct virtio_blk_vq *vqs;
 };
 
@@ -96,8 +101,7 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
        }
 }
 
-static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
-               struct scatterlist *data_sg, bool have_data)
+static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
 {
        struct scatterlist hdr, status, *sgs[3];
        unsigned int num_out = 0, num_in = 0;
@@ -105,11 +109,11 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
        sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
        sgs[num_out++] = &hdr;
 
-       if (have_data) {
+       if (vbr->sg_table.nents) {
                if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
-                       sgs[num_out++] = data_sg;
+                       sgs[num_out++] = vbr->sg_table.sgl;
                else
-                       sgs[num_out + num_in++] = data_sg;
+                       sgs[num_out + num_in++] = vbr->sg_table.sgl;
        }
 
        sg_init_one(&status, &vbr->status, sizeof(vbr->status));
@@ -299,6 +303,28 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
                virtqueue_notify(vq->vq);
 }
 
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+                                       struct virtio_blk *vblk,
+                                       struct request *req,
+                                       struct virtblk_req *vbr)
+{
+       blk_status_t status;
+
+       status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+       if (unlikely(status))
+               return status;
+
+       blk_mq_start_request(req);
+
+       vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
+       if (unlikely(vbr->sg_table.nents < 0)) {
+               virtblk_cleanup_cmd(req);
+               return BLK_STS_RESOURCE;
+       }
+
+       return BLK_STS_OK;
+}
+
 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                           const struct blk_mq_queue_data *bd)
 {
@@ -306,26 +332,17 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct request *req = bd->rq;
        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
        unsigned long flags;
-       int num;
        int qid = hctx->queue_num;
        bool notify = false;
        blk_status_t status;
        int err;
 
-       status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+       status = virtblk_prep_rq(hctx, vblk, req, vbr);
        if (unlikely(status))
                return status;
 
-       blk_mq_start_request(req);
-
-       num = virtblk_map_data(hctx, req, vbr);
-       if (unlikely(num < 0)) {
-               virtblk_cleanup_cmd(req);
-               return BLK_STS_RESOURCE;
-       }
-
        spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
-       err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+       err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
        if (err) {
                virtqueue_kick(vblk->vqs[qid].vq);
                /* Don't stop the queue if -ENOMEM: we may have failed to
@@ -355,6 +372,75 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        return BLK_STS_OK;
 }
 
+static bool virtblk_prep_rq_batch(struct request *req)
+{
+       struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+       struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+       req->mq_hctx->tags->rqs[req->tag] = req;
+
+       return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+                                       struct request **rqlist,
+                                       struct request **requeue_list)
+{
+       unsigned long flags;
+       int err;
+       bool kick;
+
+       spin_lock_irqsave(&vq->lock, flags);
+
+       while (!rq_list_empty(*rqlist)) {
+               struct request *req = rq_list_pop(rqlist);
+               struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+               err = virtblk_add_req(vq->vq, vbr);
+               if (err) {
+                       virtblk_unmap_data(req, vbr);
+                       virtblk_cleanup_cmd(req);
+                       rq_list_add(requeue_list, req);
+               }
+       }
+
+       kick = virtqueue_kick_prepare(vq->vq);
+       spin_unlock_irqrestore(&vq->lock, flags);
+
+       return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+       struct request *req, *next, *prev = NULL;
+       struct request *requeue_list = NULL;
+
+       rq_list_for_each_safe(rqlist, req, next) {
+               struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+               bool kick;
+
+               if (!virtblk_prep_rq_batch(req)) {
+                       rq_list_move(rqlist, &requeue_list, req, prev);
+                       req = prev;
+                       if (!req)
+                               continue;
+               }
+
+               if (!next || req->mq_hctx != next->mq_hctx) {
+                       req->rq_next = NULL;
+                       kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+                       if (kick)
+                               virtqueue_notify(vq->vq);
+
+                       *rqlist = next;
+                       prev = NULL;
+               } else
+                       prev = req;
+       }
+
+       *rqlist = requeue_list;
+}
+
 /* return id (s/n) string for *disk to *id_str
  */
 static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -512,6 +598,7 @@ static int init_vq(struct virtio_blk *vblk)
        const char **names;
        struct virtqueue **vqs;
        unsigned short num_vqs;
+       unsigned int num_poll_vqs;
        struct virtio_device *vdev = vblk->vdev;
        struct irq_affinity desc = { 0, };
 
@@ -520,6 +607,7 @@ static int init_vq(struct virtio_blk *vblk)
                                   &num_vqs);
        if (err)
                num_vqs = 1;
+
        if (!err && !num_vqs) {
                dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
                return -EINVAL;
@@ -529,6 +617,17 @@ static int init_vq(struct virtio_blk *vblk)
                        min_not_zero(num_request_queues, nr_cpu_ids),
                        num_vqs);
 
+       num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
+
+       vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
+       vblk->io_queues[HCTX_TYPE_READ] = 0;
+       vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+       dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+                               vblk->io_queues[HCTX_TYPE_DEFAULT],
+                               vblk->io_queues[HCTX_TYPE_READ],
+                               vblk->io_queues[HCTX_TYPE_POLL]);
+
        vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
        if (!vblk->vqs)
                return -ENOMEM;
@@ -541,12 +640,18 @@ static int init_vq(struct virtio_blk *vblk)
                goto out;
        }
 
-       for (i = 0; i < num_vqs; i++) {
+       for (i = 0; i < num_vqs - num_poll_vqs; i++) {
                callbacks[i] = virtblk_done;
                snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
                names[i] = vblk->vqs[i].name;
        }
 
+       for (; i < num_vqs; i++) {
+               callbacks[i] = NULL;
+               snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
+               names[i] = vblk->vqs[i].name;
+       }
+
        /* Discover virtqueues and write information to configuration.  */
        err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
        if (err)
@@ -692,16 +797,90 @@ static const struct attribute_group *virtblk_attr_groups[] = {
 static int virtblk_map_queues(struct blk_mq_tag_set *set)
 {
        struct virtio_blk *vblk = set->driver_data;
+       int i, qoff;
+
+       for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+               struct blk_mq_queue_map *map = &set->map[i];
+
+               map->nr_queues = vblk->io_queues[i];
+               map->queue_offset = qoff;
+               qoff += map->nr_queues;
+
+               if (map->nr_queues == 0)
+                       continue;
+
+               /*
+                * Regular queues have interrupts and hence CPU affinity is
+                * defined by the core virtio code, but polling queues have
+                * no interrupts so we let the block layer assign CPU affinity.
+                */
+               if (i == HCTX_TYPE_POLL)
+                       blk_mq_map_queues(&set->map[i]);
+               else
+                       blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+       }
+
+       return 0;
+}
+
+static void virtblk_complete_batch(struct io_comp_batch *iob)
+{
+       struct request *req;
+
+       rq_list_for_each(&iob->req_list, req) {
+               virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
+               virtblk_cleanup_cmd(req);
+       }
+       blk_mq_end_request_batch(iob);
+}
+
+static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+       struct virtio_blk *vblk = hctx->queue->queuedata;
+       struct virtio_blk_vq *vq = hctx->driver_data;
+       struct virtblk_req *vbr;
+       unsigned long flags;
+       unsigned int len;
+       int found = 0;
 
-       return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
-                                       vblk->vdev, 0);
+       spin_lock_irqsave(&vq->lock, flags);
+
+       while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+               struct request *req = blk_mq_rq_from_pdu(vbr);
+
+               found++;
+               if (!blk_mq_add_to_batch(req, iob, vbr->status,
+                                               virtblk_complete_batch))
+                       blk_mq_complete_request(req);
+       }
+
+       if (found)
+               blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+
+       spin_unlock_irqrestore(&vq->lock, flags);
+
+       return found;
+}
+
+static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+                         unsigned int hctx_idx)
+{
+       struct virtio_blk *vblk = data;
+       struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
+
+       WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
+       hctx->driver_data = vq;
+       return 0;
 }
 
 static const struct blk_mq_ops virtio_mq_ops = {
        .queue_rq       = virtio_queue_rq,
+       .queue_rqs      = virtio_queue_rqs,
        .commit_rqs     = virtio_commit_rqs,
+       .init_hctx      = virtblk_init_hctx,
        .complete       = virtblk_request_done,
        .map_queues     = virtblk_map_queues,
+       .poll           = virtblk_poll,
 };
 
 static unsigned int virtblk_queue_depth;
@@ -778,6 +957,9 @@ static int virtblk_probe(struct virtio_device *vdev)
                sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
        vblk->tag_set.driver_data = vblk;
        vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+       vblk->tag_set.nr_maps = 1;
+       if (vblk->io_queues[HCTX_TYPE_POLL])
+               vblk->tag_set.nr_maps = 3;
 
        err = blk_mq_alloc_tag_set(&vblk->tag_set);
        if (err)
index 55e004d..a88ce44 100644 (file)
@@ -1221,7 +1221,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
                        list_del(&persistent_gnt->node);
                        if (persistent_gnt->gref != INVALID_GRANT_REF) {
                                gnttab_end_foreign_access(persistent_gnt->gref,
-                                                         0UL);
+                                                         NULL);
                                rinfo->persistent_gnts_c--;
                        }
                        if (info->feature_persistent)
@@ -1244,7 +1244,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
                       rinfo->shadow[i].req.u.rw.nr_segments;
                for (j = 0; j < segs; j++) {
                        persistent_gnt = rinfo->shadow[i].grants_used[j];
-                       gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
+                       gnttab_end_foreign_access(persistent_gnt->gref, NULL);
                        if (info->feature_persistent)
                                __free_page(persistent_gnt->page);
                        kfree(persistent_gnt);
@@ -1259,7 +1259,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
 
                for (j = 0; j < INDIRECT_GREFS(segs); j++) {
                        persistent_gnt = rinfo->shadow[i].indirect_grants[j];
-                       gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
+                       gnttab_end_foreign_access(persistent_gnt->gref, NULL);
                        __free_page(persistent_gnt->page);
                        kfree(persistent_gnt);
                }
index 8fd4a35..e81a970 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/acpi.h>
 #include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
 
 #include "fsl-mc-private.h"
 
@@ -140,15 +141,33 @@ static int fsl_mc_dma_configure(struct device *dev)
 {
        struct device *dma_dev = dev;
        struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+       struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
        u32 input_id = mc_dev->icid;
+       int ret;
 
        while (dev_is_fsl_mc(dma_dev))
                dma_dev = dma_dev->parent;
 
        if (dev_of_node(dma_dev))
-               return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+               ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+       else
+               ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+
+       if (!ret && !mc_drv->driver_managed_dma) {
+               ret = iommu_device_use_default_domain(dev);
+               if (ret)
+                       arch_teardown_dma_ops(dev);
+       }
+
+       return ret;
+}
+
+static void fsl_mc_dma_cleanup(struct device *dev)
+{
+       struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
 
-       return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+       if (!mc_drv->driver_managed_dma)
+               iommu_device_unuse_default_domain(dev);
 }
 
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
@@ -166,31 +185,14 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-       char *driver_override, *old = mc_dev->driver_override;
-       char *cp;
+       int ret;
 
        if (WARN_ON(dev->bus != &fsl_mc_bus_type))
                return -EINVAL;
 
-       if (count >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, count, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
-
-       cp = strchr(driver_override, '\n');
-       if (cp)
-               *cp = '\0';
-
-       if (strlen(driver_override)) {
-               mc_dev->driver_override = driver_override;
-       } else {
-               kfree(driver_override);
-               mc_dev->driver_override = NULL;
-       }
-
-       kfree(old);
+       ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
@@ -312,6 +314,7 @@ struct bus_type fsl_mc_bus_type = {
        .match = fsl_mc_bus_match,
        .uevent = fsl_mc_bus_uevent,
        .dma_configure  = fsl_mc_dma_configure,
+       .dma_cleanup = fsl_mc_dma_cleanup,
        .dev_groups = fsl_mc_dev_groups,
        .bus_groups = fsl_mc_bus_groups,
 };
index 4748df7..b39a11e 100644 (file)
@@ -6,3 +6,4 @@
 #
 
 source "drivers/bus/mhi/host/Kconfig"
+source "drivers/bus/mhi/ep/Kconfig"
index 5f5708a..4698133 100644 (file)
@@ -1,2 +1,5 @@
 # Host MHI stack
 obj-y += host/
+
+# Endpoint MHI stack
+obj-y += ep/
index b4ef9ac..f794b9c 100644 (file)
 #define MHI_TRE_GET_EV_LINKSPEED(tre)  FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
 #define MHI_TRE_GET_EV_LINKWIDTH(tre)  FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0)))
 
+/* State change event */
+#define MHI_SC_EV_PTR                  0
+#define MHI_SC_EV_DWORD0(state)                cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state))
+#define MHI_SC_EV_DWORD1(type)         cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+/* EE event */
+#define MHI_EE_EV_PTR                  0
+#define MHI_EE_EV_DWORD0(ee)           cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee))
+#define MHI_EE_EV_DWORD1(type)         cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+
+/* Command Completion event */
+#define MHI_CC_EV_PTR(ptr)             cpu_to_le64(ptr)
+#define MHI_CC_EV_DWORD0(code)         cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code))
+#define MHI_CC_EV_DWORD1(type)         cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
 /* Transfer descriptor macros */
 #define MHI_TRE_DATA_PTR(ptr)          cpu_to_le64(ptr)
 #define MHI_TRE_DATA_DWORD0(len)       cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len))
                                                                FIELD_PREP(BIT(9), ieot) |  \
                                                                FIELD_PREP(BIT(8), ieob) |  \
                                                                FIELD_PREP(BIT(0), chain))
+#define MHI_TRE_DATA_GET_PTR(tre)      le64_to_cpu((tre)->ptr)
+#define MHI_TRE_DATA_GET_LEN(tre)      FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0))
+#define MHI_TRE_DATA_GET_CHAIN(tre)    (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOB(tre)     (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOT(tre)     (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_BEI(tre)      (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1))))
 
 /* RSC transfer descriptor macros */
 #define MHI_RSCTRE_DATA_PTR(ptr, len)  cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr)
diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig
new file mode 100644 (file)
index 0000000..90ab3b0
--- /dev/null
@@ -0,0 +1,10 @@
+config MHI_BUS_EP
+       tristate "Modem Host Interface (MHI) bus Endpoint implementation"
+       help
+         Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+         communication protocol used by a host processor to control
+         and communicate a modem device over a high speed peripheral
+         bus or shared memory.
+
+         MHI_BUS_EP implements the MHI protocol for the endpoint devices,
+         such as SDX55 modem connected to the host machine over PCIe.
diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile
new file mode 100644 (file)
index 0000000..aad85f1
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
+mhi_ep-y := main.o mmio.o ring.o sm.o
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
new file mode 100644 (file)
index 0000000..a2125fa
--- /dev/null
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_EP_INTERNAL_
+#define _MHI_EP_INTERNAL_
+
+#include <linux/bitfield.h>
+
+#include "../common.h"
+
+extern struct bus_type mhi_ep_bus_type;
+
+#define MHI_REG_OFFSET                         0x100
+#define BHI_REG_OFFSET                         0x200
+
+/* MHI registers */
+#define EP_MHIREGLEN                           (MHI_REG_OFFSET + MHIREGLEN)
+#define EP_MHIVER                              (MHI_REG_OFFSET + MHIVER)
+#define EP_MHICFG                              (MHI_REG_OFFSET + MHICFG)
+#define EP_CHDBOFF                             (MHI_REG_OFFSET + CHDBOFF)
+#define EP_ERDBOFF                             (MHI_REG_OFFSET + ERDBOFF)
+#define EP_BHIOFF                              (MHI_REG_OFFSET + BHIOFF)
+#define EP_BHIEOFF                             (MHI_REG_OFFSET + BHIEOFF)
+#define EP_DEBUGOFF                            (MHI_REG_OFFSET + DEBUGOFF)
+#define EP_MHICTRL                             (MHI_REG_OFFSET + MHICTRL)
+#define EP_MHISTATUS                           (MHI_REG_OFFSET + MHISTATUS)
+#define EP_CCABAP_LOWER                                (MHI_REG_OFFSET + CCABAP_LOWER)
+#define EP_CCABAP_HIGHER                       (MHI_REG_OFFSET + CCABAP_HIGHER)
+#define EP_ECABAP_LOWER                                (MHI_REG_OFFSET + ECABAP_LOWER)
+#define EP_ECABAP_HIGHER                       (MHI_REG_OFFSET + ECABAP_HIGHER)
+#define EP_CRCBAP_LOWER                                (MHI_REG_OFFSET + CRCBAP_LOWER)
+#define EP_CRCBAP_HIGHER                       (MHI_REG_OFFSET + CRCBAP_HIGHER)
+#define EP_CRDB_LOWER                          (MHI_REG_OFFSET + CRDB_LOWER)
+#define EP_CRDB_HIGHER                         (MHI_REG_OFFSET + CRDB_HIGHER)
+#define EP_MHICTRLBASE_LOWER                   (MHI_REG_OFFSET + MHICTRLBASE_LOWER)
+#define EP_MHICTRLBASE_HIGHER                  (MHI_REG_OFFSET + MHICTRLBASE_HIGHER)
+#define EP_MHICTRLLIMIT_LOWER                  (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER)
+#define EP_MHICTRLLIMIT_HIGHER                 (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER)
+#define EP_MHIDATABASE_LOWER                   (MHI_REG_OFFSET + MHIDATABASE_LOWER)
+#define EP_MHIDATABASE_HIGHER                  (MHI_REG_OFFSET + MHIDATABASE_HIGHER)
+#define EP_MHIDATALIMIT_LOWER                  (MHI_REG_OFFSET + MHIDATALIMIT_LOWER)
+#define EP_MHIDATALIMIT_HIGHER                 (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER)
+
+/* MHI BHI registers */
+#define EP_BHI_INTVEC                          (BHI_REG_OFFSET + BHI_INTVEC)
+#define EP_BHI_EXECENV                         (BHI_REG_OFFSET + BHI_EXECENV)
+
+/* MHI Doorbell registers */
+#define CHDB_LOWER_n(n)                                (0x400 + 0x8 * (n))
+#define CHDB_HIGHER_n(n)                       (0x404 + 0x8 * (n))
+#define ERDB_LOWER_n(n)                                (0x800 + 0x8 * (n))
+#define ERDB_HIGHER_n(n)                       (0x804 + 0x8 * (n))
+
+#define MHI_CTRL_INT_STATUS                    0x4
+#define MHI_CTRL_INT_STATUS_MSK                        BIT(0)
+#define MHI_CTRL_INT_STATUS_CRDB_MSK           BIT(1)
+#define MHI_CHDB_INT_STATUS_n(n)               (0x28 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_n(n)               (0x38 + 0x4 * (n))
+
+#define MHI_CTRL_INT_CLEAR                     0x4c
+#define MHI_CTRL_INT_MMIO_WR_CLEAR             BIT(2)
+#define MHI_CTRL_INT_CRDB_CLEAR                        BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR                BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_n(n)                        (0x70 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL         GENMASK(31, 0)
+#define MHI_ERDB_INT_CLEAR_n(n)                        (0x80 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL         GENMASK(31, 0)
+
+/*
+ * Unlike the usual "masking" convention, writing "1" to a bit in this register
+ * enables the interrupt and writing "0" will disable it..
+ */
+#define MHI_CTRL_INT_MASK                      0x94
+#define MHI_CTRL_INT_MASK_MASK                 GENMASK(1, 0)
+#define MHI_CTRL_MHICTRL_MASK                  BIT(0)
+#define MHI_CTRL_CRDB_MASK                     BIT(1)
+
+#define MHI_CHDB_INT_MASK_n(n)                 (0xb8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_n_EN_ALL             GENMASK(31, 0)
+#define MHI_ERDB_INT_MASK_n(n)                 (0xc8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_n_EN_ALL             GENMASK(31, 0)
+
+#define NR_OF_CMD_RINGS                                1
+#define MHI_MASK_ROWS_CH_DB                    4
+#define MHI_MASK_ROWS_EV_DB                    4
+#define MHI_MASK_CH_LEN                                32
+#define MHI_MASK_EV_LEN                                32
+
+/* Generic context */
+struct mhi_generic_ctx {
+       __le32 reserved0;
+       __le32 reserved1;
+       __le32 reserved2;
+
+       __le64 rbase __packed __aligned(4);
+       __le64 rlen __packed __aligned(4);
+       __le64 rp __packed __aligned(4);
+       __le64 wp __packed __aligned(4);
+};
+
+enum mhi_ep_ring_type {
+       RING_TYPE_CMD,
+       RING_TYPE_ER,
+       RING_TYPE_CH,
+};
+
+/* Ring element */
+union mhi_ep_ring_ctx {
+       struct mhi_cmd_ctxt cmd;
+       struct mhi_event_ctxt ev;
+       struct mhi_chan_ctxt ch;
+       struct mhi_generic_ctx generic;
+};
+
+struct mhi_ep_ring_item {
+       struct list_head node;
+       struct mhi_ep_ring *ring;
+};
+
+struct mhi_ep_ring {
+       struct mhi_ep_cntrl *mhi_cntrl;
+       union mhi_ep_ring_ctx *ring_ctx;
+       struct mhi_ring_element *ring_cache;
+       enum mhi_ep_ring_type type;
+       u64 rbase;
+       size_t rd_offset;
+       size_t wr_offset;
+       size_t ring_size;
+       u32 db_offset_h;
+       u32 db_offset_l;
+       u32 ch_id;
+       u32 er_index;
+       u32 irq_vector;
+       bool started;
+};
+
+struct mhi_ep_cmd {
+       struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_event {
+       struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_state_transition {
+       struct list_head node;
+       enum mhi_state state;
+};
+
+struct mhi_ep_chan {
+       char *name;
+       struct mhi_ep_device *mhi_dev;
+       struct mhi_ep_ring ring;
+       struct mutex lock;
+       void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+       enum mhi_ch_state state;
+       enum dma_data_direction dir;
+       u64 tre_loc;
+       u32 tre_size;
+       u32 tre_bytes_left;
+       u32 chan;
+       bool skip_td;
+};
+
+/* MHI Ring related functions */
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id);
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring);
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+                     union mhi_ep_ring_ctx *ctx);
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr);
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element);
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring);
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring);
+
+/* MMIO related functions */
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset);
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val);
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val);
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask);
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl);
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring);
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value);
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+                              bool *mhi_reset);
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
+
+/* MHI EP core functions */
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env);
+bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state,
+                           enum mhi_state mhi_state);
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state);
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
+
+#endif
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
new file mode 100644 (file)
index 0000000..40109a7
--- /dev/null
@@ -0,0 +1,1591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MHI Endpoint bus stack
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include "internal.h"
+
+#define M0_WAIT_DELAY_MS       100
+#define M0_WAIT_COUNT          100
+
+static DEFINE_IDA(mhi_ep_cntrl_ida);
+
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+static int mhi_ep_destroy_device(struct device *dev, void *data);
+
+static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
+                            struct mhi_ring_element *el, bool bei)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       union mhi_ep_ring_ctx *ctx;
+       struct mhi_ep_ring *ring;
+       int ret;
+
+       mutex_lock(&mhi_cntrl->event_lock);
+       ring = &mhi_cntrl->mhi_event[ring_idx].ring;
+       ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
+       if (!ring->started) {
+               ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
+               if (ret) {
+                       dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
+                       goto err_unlock;
+               }
+       }
+
+       /* Add element to the event ring */
+       ret = mhi_ep_ring_add_element(ring, el);
+       if (ret) {
+               dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
+               goto err_unlock;
+       }
+
+       mutex_unlock(&mhi_cntrl->event_lock);
+
+       /*
+        * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
+        * set this flag for interrupt moderation as per MHI protocol.
+        */
+       if (!bei)
+               mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+
+       return 0;
+
+err_unlock:
+       mutex_unlock(&mhi_cntrl->event_lock);
+
+       return ret;
+}
+
+static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+                                       struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+{
+       struct mhi_ring_element event = {};
+
+       event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+       event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
+       event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+
+       return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
+}
+
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+{
+       struct mhi_ring_element event = {};
+
+       event.dword[0] = MHI_SC_EV_DWORD0(state);
+       event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+       return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+{
+       struct mhi_ring_element event = {};
+
+       event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
+       event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+
+       return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+{
+       struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+       struct mhi_ring_element event = {};
+
+       event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+       event.dword[0] = MHI_CC_EV_DWORD0(code);
+       event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+
+       return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       struct mhi_result result = {};
+       struct mhi_ep_chan *mhi_chan;
+       struct mhi_ep_ring *ch_ring;
+       u32 tmp, ch_id;
+       int ret;
+
+       ch_id = MHI_TRE_GET_CMD_CHID(el);
+       mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+       ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+
+       switch (MHI_TRE_GET_CMD_TYPE(el)) {
+       case MHI_PKT_TYPE_START_CHAN_CMD:
+               dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
+
+               mutex_lock(&mhi_chan->lock);
+               /* Initialize and configure the corresponding channel ring */
+               if (!ch_ring->started) {
+                       ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
+                               (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
+                       if (ret) {
+                               dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
+                               ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+                                                       MHI_EV_CC_UNDEFINED_ERR);
+                               if (ret)
+                                       dev_err(dev, "Error sending completion event: %d\n", ret);
+
+                               goto err_unlock;
+                       }
+               }
+
+               /* Set channel state to RUNNING */
+               mhi_chan->state = MHI_CH_STATE_RUNNING;
+               tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+               tmp &= ~CHAN_CTX_CHSTATE_MASK;
+               tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+               mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+               ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+               if (ret) {
+                       dev_err(dev, "Error sending command completion event (%u)\n",
+                               MHI_EV_CC_SUCCESS);
+                       goto err_unlock;
+               }
+
+               mutex_unlock(&mhi_chan->lock);
+
+               /*
+                * Create MHI device only during UL channel start. Since the MHI
+                * channels operate in a pair, we'll associate both UL and DL
+                * channels to the same device.
+                *
+                * We also need to check for mhi_dev != NULL because, the host
+                * will issue START_CHAN command during resume and we don't
+                * destroy the device during suspend.
+                */
+               if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
+                       ret = mhi_ep_create_device(mhi_cntrl, ch_id);
+                       if (ret) {
+                               dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
+                               mhi_ep_handle_syserr(mhi_cntrl);
+                               return ret;
+                       }
+               }
+
+               /* Finally, enable DB for the channel */
+               mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
+
+               break;
+       case MHI_PKT_TYPE_STOP_CHAN_CMD:
+               dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+               if (!ch_ring->started) {
+                       dev_err(dev, "Channel (%u) not opened\n", ch_id);
+                       return -ENODEV;
+               }
+
+               mutex_lock(&mhi_chan->lock);
+               /* Disable DB for the channel */
+               mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
+
+               /* Send channel disconnect status to client drivers */
+               result.transaction_status = -ENOTCONN;
+               result.bytes_xferd = 0;
+               mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+               /* Set channel state to STOP */
+               mhi_chan->state = MHI_CH_STATE_STOP;
+               tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+               tmp &= ~CHAN_CTX_CHSTATE_MASK;
+               tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
+               mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+               ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+               if (ret) {
+                       dev_err(dev, "Error sending command completion event (%u)\n",
+                               MHI_EV_CC_SUCCESS);
+                       goto err_unlock;
+               }
+
+               mutex_unlock(&mhi_chan->lock);
+               break;
+       case MHI_PKT_TYPE_RESET_CHAN_CMD:
+               dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+               if (!ch_ring->started) {
+                       dev_err(dev, "Channel (%u) not opened\n", ch_id);
+                       return -ENODEV;
+               }
+
+               mutex_lock(&mhi_chan->lock);
+               /* Stop and reset the transfer ring */
+               mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+
+               /* Send channel disconnect status to client driver */
+               result.transaction_status = -ENOTCONN;
+               result.bytes_xferd = 0;
+               mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+               /* Set channel state to DISABLED */
+               mhi_chan->state = MHI_CH_STATE_DISABLED;
+               tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+               tmp &= ~CHAN_CTX_CHSTATE_MASK;
+               tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+               mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+               ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+               if (ret) {
+                       dev_err(dev, "Error sending command completion event (%u)\n",
+                               MHI_EV_CC_SUCCESS);
+                       goto err_unlock;
+               }
+
+               mutex_unlock(&mhi_chan->lock);
+               break;
+       default:
+               dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
+                       MHI_TRE_GET_CMD_TYPE(el), ch_id);
+               return -EINVAL;
+       }
+
+       return 0;
+
+err_unlock:
+       mutex_unlock(&mhi_chan->lock);
+
+       return ret;
+}
+
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
+{
+       struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+                                                               mhi_dev->ul_chan;
+       struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+       struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+       return !!(ring->rd_offset == ring->wr_offset);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+
+static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+                               struct mhi_ep_ring *ring,
+                               struct mhi_result *result,
+                               u32 len)
+{
+       struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       size_t tr_len, read_offset, write_offset;
+       struct mhi_ring_element *el;
+       bool tr_done = false;
+       void *write_addr;
+       u64 read_addr;
+       u32 buf_left;
+       int ret;
+
+       buf_left = len;
+
+       do {
+               /* Don't process the transfer ring if the channel is not in RUNNING state */
+               if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+                       dev_err(dev, "Channel not available\n");
+                       return -ENODEV;
+               }
+
+               el = &ring->ring_cache[ring->rd_offset];
+
+               /* Check if there is data pending to be read from previous read operation */
+               if (mhi_chan->tre_bytes_left) {
+                       dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
+                       tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+               } else {
+                       mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
+                       mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
+                       mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+
+                       tr_len = min(buf_left, mhi_chan->tre_size);
+               }
+
+               read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+               write_offset = len - buf_left;
+               read_addr = mhi_chan->tre_loc + read_offset;
+               write_addr = result->buf_addr + write_offset;
+
+               dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
+               ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+               if (ret < 0) {
+                       dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
+                       return ret;
+               }
+
+               buf_left -= tr_len;
+               mhi_chan->tre_bytes_left -= tr_len;
+
+               /*
+                * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
+                * read completely:
+                *
+                * 1. Send completion event to the host based on the flags set in TRE.
+                * 2. Increment the local read offset of the transfer ring.
+                */
+               if (!mhi_chan->tre_bytes_left) {
+                       /*
+                        * The host will split the data packet into multiple TREs if it can't fit
+                        * the packet in a single TRE. In that case, CHAIN flag will be set by the
+                        * host for all TREs except the last one.
+                        */
+                       if (MHI_TRE_DATA_GET_CHAIN(el)) {
+                               /*
+                                * IEOB (Interrupt on End of Block) flag will be set by the host if
+                                * it expects the completion event for all TREs of a TD.
+                                */
+                               if (MHI_TRE_DATA_GET_IEOB(el)) {
+                                       ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+                                                                    MHI_TRE_DATA_GET_LEN(el),
+                                                                    MHI_EV_CC_EOB);
+                                       if (ret < 0) {
+                                               dev_err(&mhi_chan->mhi_dev->dev,
+                                                       "Error sending transfer compl. event\n");
+                                               return ret;
+                                       }
+                               }
+                       } else {
+                               /*
+                                * IEOT (Interrupt on End of Transfer) flag will be set by the host
+                                * for the last TRE of the TD and expects the completion event for
+                                * the same.
+                                */
+                               if (MHI_TRE_DATA_GET_IEOT(el)) {
+                                       ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+                                                                    MHI_TRE_DATA_GET_LEN(el),
+                                                                    MHI_EV_CC_EOT);
+                                       if (ret < 0) {
+                                               dev_err(&mhi_chan->mhi_dev->dev,
+                                                       "Error sending transfer compl. event\n");
+                                               return ret;
+                                       }
+                               }
+
+                               tr_done = true;
+                       }
+
+                       mhi_ep_ring_inc_index(ring);
+               }
+
+               result->bytes_xferd += tr_len;
+       } while (buf_left && !tr_done);
+
+       return 0;
+}
+
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+       struct mhi_result result = {};
+       u32 len = MHI_EP_DEFAULT_MTU;
+       struct mhi_ep_chan *mhi_chan;
+       int ret;
+
+       mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+       /*
+        * Bail out if transfer callback is not registered for the channel.
+        * This is most likely due to the client driver not loaded at this point.
+        */
+       if (!mhi_chan->xfer_cb) {
+               dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
+               return -ENODEV;
+       }
+
+       if (ring->ch_id % 2) {
+               /* DL channel */
+               result.dir = mhi_chan->dir;
+               mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+       } else {
+               /* UL channel */
+               result.buf_addr = kzalloc(len, GFP_KERNEL);
+               if (!result.buf_addr)
+                       return -ENOMEM;
+
+               do {
+                       ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+                       if (ret < 0) {
+                               dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+                               kfree(result.buf_addr);
+                               return ret;
+                       }
+
+                       result.dir = mhi_chan->dir;
+                       mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+                       result.bytes_xferd = 0;
+                       memset(result.buf_addr, 0, len);
+
+                       /* Read until the ring becomes empty */
+               } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+
+               kfree(result.buf_addr);
+       }
+
+       return 0;
+}
+
+/* TODO: Handle partially formed TDs */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+       struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+       struct device *dev = &mhi_chan->mhi_dev->dev;
+       struct mhi_ring_element *el;
+       u32 buf_left, read_offset;
+       struct mhi_ep_ring *ring;
+       enum mhi_ev_ccs code;
+       void *read_addr;
+       u64 write_addr;
+       size_t tr_len;
+       u32 tre_len;
+       int ret;
+
+       buf_left = skb->len;
+       ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+       mutex_lock(&mhi_chan->lock);
+
+       do {
+               /* Don't process the transfer ring if the channel is not in RUNNING state */
+               if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+                       dev_err(dev, "Channel not available\n");
+                       ret = -ENODEV;
+                       goto err_exit;
+               }
+
+               if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
+                       dev_err(dev, "TRE not available!\n");
+                       ret = -ENOSPC;
+                       goto err_exit;
+               }
+
+               el = &ring->ring_cache[ring->rd_offset];
+               tre_len = MHI_TRE_DATA_GET_LEN(el);
+
+               tr_len = min(buf_left, tre_len);
+               read_offset = skb->len - buf_left;
+               read_addr = skb->data + read_offset;
+               write_addr = MHI_TRE_DATA_GET_PTR(el);
+
+               dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+               ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+               if (ret < 0) {
+                       dev_err(dev, "Error writing to the channel\n");
+                       goto err_exit;
+               }
+
+               buf_left -= tr_len;
+               /*
+                * For all TREs queued by the host for DL channel, only the EOT flag will be set.
+                * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
+                * the host so that the host can adjust the packet boundary to next TREs. Else send
+                * the EOT event to the host indicating the packet boundary.
+                */
+               if (buf_left)
+                       code = MHI_EV_CC_OVERFLOW;
+               else
+                       code = MHI_EV_CC_EOT;
+
+               ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
+               if (ret) {
+                       dev_err(dev, "Error sending transfer completion event\n");
+                       goto err_exit;
+               }
+
+               mhi_ep_ring_inc_index(ring);
+       } while (buf_left);
+
+       mutex_unlock(&mhi_chan->lock);
+
+       return 0;
+
+err_exit:
+       mutex_unlock(&mhi_chan->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
+static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       int ret;
+
+       /* Update the number of event rings (NER) programmed by the host */
+       mhi_ep_mmio_update_ner(mhi_cntrl);
+
+       dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
+                mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+       ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+       ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+       cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+       /* Get the channel context base pointer from host */
+       mhi_ep_mmio_get_chc_base(mhi_cntrl);
+
+       /* Allocate and map memory for caching host channel context */
+       ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
+                                  &mhi_cntrl->ch_ctx_cache_phys,
+                                  (void __iomem **) &mhi_cntrl->ch_ctx_cache,
+                                  ch_ctx_host_size);
+       if (ret) {
+               dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
+               return ret;
+       }
+
+       /* Get the event context base pointer from host */
+       mhi_ep_mmio_get_erc_base(mhi_cntrl);
+
+       /* Allocate and map memory for caching host event context */
+       ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
+                                  &mhi_cntrl->ev_ctx_cache_phys,
+                                  (void __iomem **) &mhi_cntrl->ev_ctx_cache,
+                                  ev_ctx_host_size);
+       if (ret) {
+               dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
+               goto err_ch_ctx;
+       }
+
+       /* Get the command context base pointer from host */
+       mhi_ep_mmio_get_crc_base(mhi_cntrl);
+
+       /* Allocate and map memory for caching host command context */
+       ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
+                                  &mhi_cntrl->cmd_ctx_cache_phys,
+                                  (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
+                                  cmd_ctx_host_size);
+       if (ret) {
+               dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
+               goto err_ev_ctx;
+       }
+
+       /* Initialize command ring */
+       ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
+                               (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
+       if (ret) {
+               dev_err(dev, "Failed to start the command ring\n");
+               goto err_cmd_ctx;
+       }
+
+       return ret;
+
+err_cmd_ctx:
+       mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+                             (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+err_ev_ctx:
+       mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+                             (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+err_ch_ctx:
+       mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+                             (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+
+       return ret;
+}
+
+static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+
+       ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+       ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+       cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+       mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+                             (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+       mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+                             (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+       mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+                             (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+}
+
+static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       /*
+        * Doorbell interrupts are enabled when the corresponding channel gets started.
+        * Enabling all interrupts here triggers spurious irqs as some of the interrupts
+        * associated with hw channels always get triggered.
+        */
+       mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
+       mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
+}
+
+static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       enum mhi_state state;
+       bool mhi_reset;
+       u32 count = 0;
+       int ret;
+
+       /* Wait for Host to set the M0 state */
+       do {
+               msleep(M0_WAIT_DELAY_MS);
+               mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+               if (mhi_reset) {
+                       /* Clear the MHI reset if host is in reset state */
+                       mhi_ep_mmio_clear_reset(mhi_cntrl);
+                       dev_info(dev, "Detected Host reset while waiting for M0\n");
+               }
+               count++;
+       } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
+
+       if (state != MHI_STATE_M0) {
+               dev_err(dev, "Host failed to enter M0\n");
+               return -ETIMEDOUT;
+       }
+
+       ret = mhi_ep_cache_host_cfg(mhi_cntrl);
+       if (ret) {
+               dev_err(dev, "Failed to cache host config\n");
+               return ret;
+       }
+
+       mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+       /* Enable all interrupts now */
+       mhi_ep_enable_int(mhi_cntrl);
+
+       return 0;
+}
+
+static void mhi_ep_cmd_ring_worker(struct work_struct *work)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
+       struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       struct mhi_ring_element *el;
+       int ret;
+
+       /* Update the write offset for the ring */
+       ret = mhi_ep_update_wr_offset(ring);
+       if (ret) {
+               dev_err(dev, "Error updating write offset for ring\n");
+               return;
+       }
+
+       /* Sanity check to make sure there are elements in the ring */
+       if (ring->rd_offset == ring->wr_offset)
+               return;
+
+       /*
+        * Process command ring element till write offset. In case of an error, just try to
+        * process next element.
+        */
+       while (ring->rd_offset != ring->wr_offset) {
+               el = &ring->ring_cache[ring->rd_offset];
+
+               ret = mhi_ep_process_cmd_ring(ring, el);
+               if (ret)
+                       dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
+
+               mhi_ep_ring_inc_index(ring);
+       }
+}
+
+static void mhi_ep_ch_ring_worker(struct work_struct *work)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       struct mhi_ep_ring_item *itr, *tmp;
+       struct mhi_ring_element *el;
+       struct mhi_ep_ring *ring;
+       struct mhi_ep_chan *chan;
+       unsigned long flags;
+       LIST_HEAD(head);
+       int ret;
+
+       spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+       list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
+       spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+       /* Process each queued channel ring. In case of an error, just process next element. */
+       list_for_each_entry_safe(itr, tmp, &head, node) {
+               list_del(&itr->node);
+               ring = itr->ring;
+
+               /* Update the write offset for the ring */
+               ret = mhi_ep_update_wr_offset(ring);
+               if (ret) {
+                       dev_err(dev, "Error updating write offset for ring\n");
+                       kfree(itr);
+                       continue;
+               }
+
+               /* Sanity check to make sure there are elements in the ring */
+               if (ring->rd_offset == ring->wr_offset) {
+                       kfree(itr);
+                       continue;
+               }
+
+               el = &ring->ring_cache[ring->rd_offset];
+               chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+               mutex_lock(&chan->lock);
+               dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+               ret = mhi_ep_process_ch_ring(ring, el);
+               if (ret) {
+                       dev_err(dev, "Error processing ring for channel (%u): %d\n",
+                               ring->ch_id, ret);
+                       mutex_unlock(&chan->lock);
+                       kfree(itr);
+                       continue;
+               }
+
+               mutex_unlock(&chan->lock);
+               kfree(itr);
+       }
+}
+
+static void mhi_ep_state_worker(struct work_struct *work)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       struct mhi_ep_state_transition *itr, *tmp;
+       unsigned long flags;
+       LIST_HEAD(head);
+       int ret;
+
+       spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+       list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
+       spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+       list_for_each_entry_safe(itr, tmp, &head, node) {
+               list_del(&itr->node);
+               dev_dbg(dev, "Handling MHI state transition to %s\n",
+                        mhi_state_str(itr->state));
+
+               switch (itr->state) {
+               case MHI_STATE_M0:
+                       ret = mhi_ep_set_m0_state(mhi_cntrl);
+                       if (ret)
+                               dev_err(dev, "Failed to transition to M0 state\n");
+                       break;
+               case MHI_STATE_M3:
+                       ret = mhi_ep_set_m3_state(mhi_cntrl);
+                       if (ret)
+                               dev_err(dev, "Failed to transition to M3 state\n");
+                       break;
+               default:
+                       dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
+                       break;
+               }
+               kfree(itr);
+       }
+}
+
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
+                                   u32 ch_idx)
+{
+       struct mhi_ep_ring_item *item;
+       struct mhi_ep_ring *ring;
+       bool work = !!ch_int;
+       LIST_HEAD(head);
+       u32 i;
+
+       /* First add the ring items to a local list */
+       for_each_set_bit(i, &ch_int, 32) {
+               /* Channel index varies for each register: 0, 32, 64, 96 */
+               u32 ch_id = ch_idx + i;
+
+               ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+               item = kzalloc(sizeof(*item), GFP_ATOMIC);
+               if (!item)
+                       return;
+
+               item->ring = ring;
+               list_add_tail(&item->node, &head);
+       }
+
+       /* Now, splice the local list into ch_db_list and queue the work item */
+       if (work) {
+               spin_lock(&mhi_cntrl->list_lock);
+               list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
+               spin_unlock(&mhi_cntrl->list_lock);
+
+               queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
+       }
+}
+
+/*
+ * Channel interrupt statuses are contained in 4 registers each of 32bit length.
+ * For checking all interrupts, we need to loop through each registers and then
+ * check for bits set.
+ */
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       u32 ch_int, ch_idx, i;
+
+       /* Bail out if there is no channel doorbell interrupt */
+       if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
+               return;
+
+       for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+               ch_idx = i * MHI_MASK_CH_LEN;
+
+               /* Only process channel interrupt if the mask is enabled */
+               ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
+               if (ch_int) {
+                       mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
+                       mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+                                                       mhi_cntrl->chdb[i].status);
+               }
+       }
+}
+
+static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
+                                        enum mhi_state state)
+{
+       struct mhi_ep_state_transition *item;
+
+       item = kzalloc(sizeof(*item), GFP_ATOMIC);
+       if (!item)
+               return;
+
+       item->state = state;
+       spin_lock(&mhi_cntrl->list_lock);
+       list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
+       spin_unlock(&mhi_cntrl->list_lock);
+
+       queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers for state change and
+ * channel interrupts.
+ */
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = data;
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       enum mhi_state state;
+       u32 int_value;
+       bool mhi_reset;
+
+       /* Acknowledge the ctrl interrupt */
+       int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
+       mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
+
+       /* Check for ctrl interrupt */
+       if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
+               dev_dbg(dev, "Processing ctrl interrupt\n");
+               mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+               if (mhi_reset) {
+                       dev_info(dev, "Host triggered MHI reset!\n");
+                       disable_irq_nosync(mhi_cntrl->irq);
+                       schedule_work(&mhi_cntrl->reset_work);
+                       return IRQ_HANDLED;
+               }
+
+               mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
+       }
+
+       /* Check for command doorbell interrupt */
+       if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
+               dev_dbg(dev, "Processing command doorbell interrupt\n");
+               queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
+       }
+
+       /* Check for channel interrupts */
+       mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+       return IRQ_HANDLED;
+}
+
+static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct mhi_ep_ring *ch_ring, *ev_ring;
+       struct mhi_result result = {};
+       struct mhi_ep_chan *mhi_chan;
+       int i;
+
+       /* Stop all the channels */
+       for (i = 0; i < mhi_cntrl->max_chan; i++) {
+               mhi_chan = &mhi_cntrl->mhi_chan[i];
+               if (!mhi_chan->ring.started)
+                       continue;
+
+               mutex_lock(&mhi_chan->lock);
+               /* Send channel disconnect status to client drivers */
+               if (mhi_chan->xfer_cb) {
+                       result.transaction_status = -ENOTCONN;
+                       result.bytes_xferd = 0;
+                       mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+               }
+
+               mhi_chan->state = MHI_CH_STATE_DISABLED;
+               mutex_unlock(&mhi_chan->lock);
+       }
+
+       flush_workqueue(mhi_cntrl->wq);
+
+       /* Destroy devices associated with all channels */
+       device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
+
+       /* Stop and reset the transfer rings */
+       for (i = 0; i < mhi_cntrl->max_chan; i++) {
+               mhi_chan = &mhi_cntrl->mhi_chan[i];
+               if (!mhi_chan->ring.started)
+                       continue;
+
+               ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+               mutex_lock(&mhi_chan->lock);
+               mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+               mutex_unlock(&mhi_chan->lock);
+       }
+
+       /* Stop and reset the event rings */
+       for (i = 0; i < mhi_cntrl->event_rings; i++) {
+               ev_ring = &mhi_cntrl->mhi_event[i].ring;
+               if (!ev_ring->started)
+                       continue;
+
+               mutex_lock(&mhi_cntrl->event_lock);
+               mhi_ep_ring_reset(mhi_cntrl, ev_ring);
+               mutex_unlock(&mhi_cntrl->event_lock);
+       }
+
+       /* Stop and reset the command ring */
+       mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
+
+       mhi_ep_free_host_cfg(mhi_cntrl);
+       mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+       mhi_cntrl->enabled = false;
+}
+
+static void mhi_ep_reset_worker(struct work_struct *work)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       enum mhi_state cur_state;
+       int ret;
+
+       mhi_ep_abort_transfer(mhi_cntrl);
+
+       spin_lock_bh(&mhi_cntrl->state_lock);
+       /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+       mhi_ep_mmio_reset(mhi_cntrl);
+       cur_state = mhi_cntrl->mhi_state;
+       spin_unlock_bh(&mhi_cntrl->state_lock);
+
+       /*
+        * Only proceed further if the reset is due to SYS_ERR. The host will
+        * issue reset during shutdown also and we don't need to do re-init in
+        * that case.
+        */
+       if (cur_state == MHI_STATE_SYS_ERR) {
+               mhi_ep_mmio_init(mhi_cntrl);
+
+               /* Set AMSS EE before signaling ready state */
+               mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+               /* All set, notify the host that we are ready */
+               ret = mhi_ep_set_ready_state(mhi_cntrl);
+               if (ret)
+                       return;
+
+               dev_dbg(dev, "READY state notification sent to the host\n");
+
+               ret = mhi_ep_enable(mhi_cntrl);
+               if (ret) {
+                       dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+                       return;
+               }
+
+               enable_irq(mhi_cntrl->irq);
+       }
+}
+
+/*
+ * We don't need to do anything special other than setting the MHI SYS_ERR
+ * state. The host will reset all contexts and issue MHI RESET so that we
+ * could also recover from error state.
+ */
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       int ret;
+
+       ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+       if (ret)
+               return;
+
+       /* Signal host that the device went to SYS_ERR state */
+       ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
+       if (ret)
+               dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
+}
+
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       int ret, i;
+
+       /*
+        * Mask all interrupts until the state machine is ready. Interrupts will
+        * be enabled later with mhi_ep_enable().
+        */
+       mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+       mhi_ep_mmio_init(mhi_cntrl);
+
+       mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
+                                       GFP_KERNEL);
+       if (!mhi_cntrl->mhi_event)
+               return -ENOMEM;
+
+       /* Initialize command, channel and event rings */
+       mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
+       for (i = 0; i < mhi_cntrl->max_chan; i++)
+               mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
+       for (i = 0; i < mhi_cntrl->event_rings; i++)
+               mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
+
+       mhi_cntrl->mhi_state = MHI_STATE_RESET;
+
+       /* Set AMSS EE before signaling ready state */
+       mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+       /* All set, notify the host that we are ready */
+       ret = mhi_ep_set_ready_state(mhi_cntrl);
+       if (ret)
+               goto err_free_event;
+
+       dev_dbg(dev, "READY state notification sent to the host\n");
+
+       ret = mhi_ep_enable(mhi_cntrl);
+       if (ret) {
+               dev_err(dev, "Failed to enable MHI endpoint\n");
+               goto err_free_event;
+       }
+
+       enable_irq(mhi_cntrl->irq);
+       mhi_cntrl->enabled = true;
+
+       return 0;
+
+err_free_event:
+       kfree(mhi_cntrl->mhi_event);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       if (mhi_cntrl->enabled)
+               mhi_ep_abort_transfer(mhi_cntrl);
+
+       kfree(mhi_cntrl->mhi_event);
+       disable_irq(mhi_cntrl->irq);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct mhi_ep_chan *mhi_chan;
+       u32 tmp;
+       int i;
+
+       for (i = 0; i < mhi_cntrl->max_chan; i++) {
+               mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+               if (!mhi_chan->mhi_dev)
+                       continue;
+
+               mutex_lock(&mhi_chan->lock);
+               /* Skip if the channel is not currently running */
+               tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+               if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+                       mutex_unlock(&mhi_chan->lock);
+                       continue;
+               }
+
+               dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+               /* Set channel state to SUSPENDED */
+               tmp &= ~CHAN_CTX_CHSTATE_MASK;
+               tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+               mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+               mutex_unlock(&mhi_chan->lock);
+       }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct mhi_ep_chan *mhi_chan;
+       u32 tmp;
+       int i;
+
+       for (i = 0; i < mhi_cntrl->max_chan; i++) {
+               mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+               if (!mhi_chan->mhi_dev)
+                       continue;
+
+               mutex_lock(&mhi_chan->lock);
+               /* Skip if the channel is not currently suspended */
+               tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+               if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+                       mutex_unlock(&mhi_chan->lock);
+                       continue;
+               }
+
+               dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+               /* Set channel state to RUNNING */
+               tmp &= ~CHAN_CTX_CHSTATE_MASK;
+               tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+               mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+               mutex_unlock(&mhi_chan->lock);
+       }
+}
+
+static void mhi_ep_release_device(struct device *dev)
+{
+       struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+       if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+               mhi_dev->mhi_cntrl->mhi_dev = NULL;
+
+       /*
+        * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+        * devices for the channels will only get created in mhi_ep_create_device()
+        * if the mhi_dev associated with it is NULL.
+        */
+       if (mhi_dev->ul_chan)
+               mhi_dev->ul_chan->mhi_dev = NULL;
+
+       if (mhi_dev->dl_chan)
+               mhi_dev->dl_chan->mhi_dev = NULL;
+
+       kfree(mhi_dev);
+}
+
+static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
+                                                enum mhi_device_type dev_type)
+{
+       struct mhi_ep_device *mhi_dev;
+       struct device *dev;
+
+       mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+       if (!mhi_dev)
+               return ERR_PTR(-ENOMEM);
+
+       dev = &mhi_dev->dev;
+       device_initialize(dev);
+       dev->bus = &mhi_ep_bus_type;
+       dev->release = mhi_ep_release_device;
+
+       /* Controller device is always allocated first */
+       if (dev_type == MHI_DEVICE_CONTROLLER)
+               /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
+               dev->parent = mhi_cntrl->cntrl_dev;
+       else
+               /* for MHI client devices, parent is the MHI controller device */
+               dev->parent = &mhi_cntrl->mhi_dev->dev;
+
+       mhi_dev->mhi_cntrl = mhi_cntrl;
+       mhi_dev->dev_type = dev_type;
+
+       return mhi_dev;
+}
+
+/*
+ * MHI channels are always defined in pairs with UL as the even numbered
+ * channel and DL as odd numbered one. This function gets UL channel (primary)
+ * as the ch_id and always looks after the next entry in channel list for
+ * the corresponding DL channel (secondary).
+ */
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+       struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+       struct device *dev = mhi_cntrl->cntrl_dev;
+       struct mhi_ep_device *mhi_dev;
+       int ret;
+
+       /* Check if the channel name is same for both UL and DL */
+       if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
+               dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
+                       mhi_chan->name, mhi_chan[1].name);
+               return -EINVAL;
+       }
+
+       mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
+       if (IS_ERR(mhi_dev))
+               return PTR_ERR(mhi_dev);
+
+       /* Configure primary channel */
+       mhi_dev->ul_chan = mhi_chan;
+       get_device(&mhi_dev->dev);
+       mhi_chan->mhi_dev = mhi_dev;
+
+       /* Configure secondary channel as well */
+       mhi_chan++;
+       mhi_dev->dl_chan = mhi_chan;
+       get_device(&mhi_dev->dev);
+       mhi_chan->mhi_dev = mhi_dev;
+
+       /* Channel name is same for both UL and DL */
+       mhi_dev->name = mhi_chan->name;
+       dev_set_name(&mhi_dev->dev, "%s_%s",
+                    dev_name(&mhi_cntrl->mhi_dev->dev),
+                    mhi_dev->name);
+
+       ret = device_add(&mhi_dev->dev);
+       if (ret)
+               put_device(&mhi_dev->dev);
+
+       return ret;
+}
+
+static int mhi_ep_destroy_device(struct device *dev, void *data)
+{
+       struct mhi_ep_device *mhi_dev;
+       struct mhi_ep_cntrl *mhi_cntrl;
+       struct mhi_ep_chan *ul_chan, *dl_chan;
+
+       if (dev->bus != &mhi_ep_bus_type)
+               return 0;
+
+       mhi_dev = to_mhi_ep_device(dev);
+       mhi_cntrl = mhi_dev->mhi_cntrl;
+
+       /* Only destroy devices created for channels */
+       if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+               return 0;
+
+       ul_chan = mhi_dev->ul_chan;
+       dl_chan = mhi_dev->dl_chan;
+
+       if (ul_chan)
+               put_device(&ul_chan->mhi_dev->dev);
+
+       if (dl_chan)
+               put_device(&dl_chan->mhi_dev->dev);
+
+       dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
+                mhi_dev->name);
+
+       /* Notify the client and remove the device from MHI bus */
+       device_del(dev);
+       put_device(dev);
+
+       return 0;
+}
+
+static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
+                           const struct mhi_ep_cntrl_config *config)
+{
+       const struct mhi_ep_channel_config *ch_cfg;
+       struct device *dev = mhi_cntrl->cntrl_dev;
+       u32 chan, i;
+       int ret = -EINVAL;
+
+       mhi_cntrl->max_chan = config->max_channels;
+
+       /*
+        * Allocate max_channels supported by the MHI endpoint and populate
+        * only the defined channels
+        */
+       mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
+                                     GFP_KERNEL);
+       if (!mhi_cntrl->mhi_chan)
+               return -ENOMEM;
+
+       for (i = 0; i < config->num_channels; i++) {
+               struct mhi_ep_chan *mhi_chan;
+
+               ch_cfg = &config->ch_cfg[i];
+
+               chan = ch_cfg->num;
+               if (chan >= mhi_cntrl->max_chan) {
+                       dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
+                               chan, mhi_cntrl->max_chan);
+                       goto error_chan_cfg;
+               }
+
+               /* Bi-directional and direction less channels are not supported */
+               if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
+                       dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
+                               ch_cfg->dir, chan);
+                       goto error_chan_cfg;
+               }
+
+               mhi_chan = &mhi_cntrl->mhi_chan[chan];
+               mhi_chan->name = ch_cfg->name;
+               mhi_chan->chan = chan;
+               mhi_chan->dir = ch_cfg->dir;
+               mutex_init(&mhi_chan->lock);
+       }
+
+       return 0;
+
+error_chan_cfg:
+       kfree(mhi_cntrl->mhi_chan);
+
+       return ret;
+}
+
+/*
+ * Allocate channel and command rings here. Event rings will be allocated
+ * in mhi_ep_power_up() as the config comes from the host.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+                               const struct mhi_ep_cntrl_config *config)
+{
+       struct mhi_ep_device *mhi_dev;
+       int ret;
+
+       if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
+               return -EINVAL;
+
+       ret = mhi_ep_chan_init(mhi_cntrl, config);
+       if (ret)
+               return ret;
+
+       mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+       if (!mhi_cntrl->mhi_cmd) {
+               ret = -ENOMEM;
+               goto err_free_ch;
+       }
+
+       INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
+       INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
+       INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
+       INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
+
+       mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+       if (!mhi_cntrl->wq) {
+               ret = -ENOMEM;
+               goto err_free_cmd;
+       }
+
+       INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+       INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+       spin_lock_init(&mhi_cntrl->state_lock);
+       spin_lock_init(&mhi_cntrl->list_lock);
+       mutex_init(&mhi_cntrl->event_lock);
+
+       /* Set MHI version and AMSS EE before enumeration */
+       mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
+       mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+       /* Set controller index */
+       ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
+       if (ret < 0)
+               goto err_destroy_wq;
+
+       mhi_cntrl->index = ret;
+
+       irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+       ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
+                         "doorbell_irq", mhi_cntrl);
+       if (ret) {
+               dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
+               goto err_ida_free;
+       }
+
+       /* Allocate the controller device */
+       mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
+       if (IS_ERR(mhi_dev)) {
+               dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
+               ret = PTR_ERR(mhi_dev);
+               goto err_free_irq;
+       }
+
+       dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
+       mhi_dev->name = dev_name(&mhi_dev->dev);
+       mhi_cntrl->mhi_dev = mhi_dev;
+
+       ret = device_add(&mhi_dev->dev);
+       if (ret)
+               goto err_put_dev;
+
+       dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
+
+       return 0;
+
+err_put_dev:
+       put_device(&mhi_dev->dev);
+err_free_irq:
+       free_irq(mhi_cntrl->irq, mhi_cntrl);
+err_ida_free:
+       ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+err_destroy_wq:
+       destroy_workqueue(mhi_cntrl->wq);
+err_free_cmd:
+       kfree(mhi_cntrl->mhi_cmd);
+err_free_ch:
+       kfree(mhi_cntrl->mhi_chan);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
+
+/*
+ * It is expected that the controller drivers will power down the MHI EP stack
+ * using "mhi_ep_power_down()" before calling this function to unregister themselves.
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+       destroy_workqueue(mhi_cntrl->wq);
+
+       free_irq(mhi_cntrl->irq, mhi_cntrl);
+
+       kfree(mhi_cntrl->mhi_cmd);
+       kfree(mhi_cntrl->mhi_chan);
+
+       device_del(&mhi_dev->dev);
+       put_device(&mhi_dev->dev);
+
+       ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
+
+static int mhi_ep_driver_probe(struct device *dev)
+{
+       struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+       struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+       struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
+       struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
+
+       ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+       dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+       return mhi_drv->probe(mhi_dev, mhi_dev->id);
+}
+
+static int mhi_ep_driver_remove(struct device *dev)
+{
+       struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+       struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+       struct mhi_result result = {};
+       struct mhi_ep_chan *mhi_chan;
+       int dir;
+
+       /* Skip if it is a controller device */
+       if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+               return 0;
+
+       /* Disconnect the channels associated with the driver */
+       for (dir = 0; dir < 2; dir++) {
+               mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+               if (!mhi_chan)
+                       continue;
+
+               mutex_lock(&mhi_chan->lock);
+               /* Send channel disconnect status to the client driver */
+               if (mhi_chan->xfer_cb) {
+                       result.transaction_status = -ENOTCONN;
+                       result.bytes_xferd = 0;
+                       mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+               }
+
+               mhi_chan->state = MHI_CH_STATE_DISABLED;
+               mhi_chan->xfer_cb = NULL;
+               mutex_unlock(&mhi_chan->lock);
+       }
+
+       /* Remove the client driver now */
+       mhi_drv->remove(mhi_dev);
+
+       return 0;
+}
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
+{
+       struct device_driver *driver = &mhi_drv->driver;
+
+       if (!mhi_drv->probe || !mhi_drv->remove)
+               return -EINVAL;
+
+       /* Client drivers should have callbacks defined for both channels */
+       if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
+               return -EINVAL;
+
+       driver->bus = &mhi_ep_bus_type;
+       driver->owner = owner;
+       driver->probe = mhi_ep_driver_probe;
+       driver->remove = mhi_ep_driver_remove;
+
+       return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
+
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
+{
+       driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
+
+static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+       return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
+                                       mhi_dev->name);
+}
+
+static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+{
+       struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+       struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+       const struct mhi_device_id *id;
+
+       /*
+        * If the device is a controller type then there is no client driver
+        * associated with it
+        */
+       if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+               return 0;
+
+       for (id = mhi_drv->id_table; id->chan[0]; id++)
+               if (!strcmp(mhi_dev->name, id->chan)) {
+                       mhi_dev->id = id;
+                       return 1;
+               }
+
+       return 0;
+};
+
+struct bus_type mhi_ep_bus_type = {
+       .name = "mhi_ep",
+       .dev_name = "mhi_ep",
+       .match = mhi_ep_match,
+       .uevent = mhi_ep_uevent,
+};
+
+static int __init mhi_ep_init(void)
+{
+       return bus_register(&mhi_ep_bus_type);
+}
+
+static void __exit mhi_ep_exit(void)
+{
+       bus_unregister(&mhi_ep_bus_type);
+}
+
+postcore_initcall(mhi_ep_init);
+module_exit(mhi_ep_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Bus Endpoint stack");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c
new file mode 100644 (file)
index 0000000..b5bfd22
--- /dev/null
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+
+#include "internal.h"
+
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset)
+{
+       return readl(mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
+{
+       writel(val, mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val)
+{
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, offset);
+       regval &= ~mask;
+       regval |= (val << __ffs(mask)) & mask;
+       mhi_ep_mmio_write(mhi_cntrl, offset, regval);
+}
+
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask)
+{
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(dev, offset);
+       regval &= mask;
+       regval >>= __ffs(mask);
+
+       return regval;
+}
+
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+                               bool *mhi_reset)
+{
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL);
+       *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
+       *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
+}
+
+static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable)
+{
+       u32 chid_mask, chid_shift, chdb_idx, val;
+
+       chid_shift = ch_id % 32;
+       chid_mask = BIT(chid_shift);
+       chdb_idx = ch_id / 32;
+
+       val = enable ? 1 : 0;
+
+       mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val);
+
+       /* Update the local copy of the channel mask */
+       mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask;
+       mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift;
+}
+
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+       mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true);
+}
+
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+       mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false);
+}
+
+static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+       u32 val, i;
+
+       val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0;
+
+       for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+               mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val);
+               mhi_cntrl->chdb[i].mask = val;
+       }
+}
+
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
+}
+
+static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
+}
+
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       bool chdb = false;
+       u32 i;
+
+       for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+               mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i));
+               if (mhi_cntrl->chdb[i].status)
+                       chdb = true;
+       }
+
+       /* Return whether a channel doorbell interrupt occurred or not */
+       return chdb;
+}
+
+static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+       u32 val, i;
+
+       val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0;
+
+       for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+               mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val);
+}
+
+static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
+}
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+                                 MHI_CTRL_MHICTRL_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+                                 MHI_CTRL_MHICTRL_MASK, 0);
+}
+
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+                                 MHI_CTRL_CRDB_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+                                 MHI_CTRL_CRDB_MASK, 0);
+}
+
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
+       mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
+       mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
+       mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
+}
+
+static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       u32 i;
+
+       for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++)
+               mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+                                  MHI_CHDB_INT_CLEAR_n_CLEAR_ALL);
+
+       for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+               mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i),
+                                  MHI_ERDB_INT_CLEAR_n_CLEAR_ALL);
+
+       mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR,
+                          MHI_CTRL_INT_MMIO_WR_CLEAR |
+                          MHI_CTRL_INT_CRDB_CLEAR |
+                          MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
+}
+
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER);
+       mhi_cntrl->ch_ctx_host_pa = regval;
+       mhi_cntrl->ch_ctx_host_pa <<= 32;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER);
+       mhi_cntrl->ch_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER);
+       mhi_cntrl->ev_ctx_host_pa = regval;
+       mhi_cntrl->ev_ctx_host_pa <<= 32;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER);
+       mhi_cntrl->ev_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER);
+       mhi_cntrl->cmd_ctx_host_pa = regval;
+       mhi_cntrl->cmd_ctx_host_pa <<= 32;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER);
+       mhi_cntrl->cmd_ctx_host_pa |= regval;
+}
+
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+       u64 db_offset;
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h);
+       db_offset = regval;
+       db_offset <<= 32;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l);
+       db_offset |= regval;
+
+       return db_offset;
+}
+
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
+{
+       mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value);
+}
+
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0);
+}
+
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0);
+       mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0);
+       mhi_ep_mmio_clear_interrupts(mhi_cntrl);
+}
+
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       u32 regval;
+
+       mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF);
+       mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF);
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+       mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+       mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+
+       mhi_ep_mmio_reset(mhi_cntrl);
+}
+
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       u32 regval;
+
+       regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+       mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+       mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+}
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
new file mode 100644 (file)
index 0000000..115518e
--- /dev/null
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
+{
+       return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
+}
+
+static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
+{
+       __le64 rlen;
+
+       memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
+
+       return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
+}
+
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
+{
+       ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
+}
+
+static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       size_t start, copy_size;
+       int ret;
+
+       /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
+       if (ring->type == RING_TYPE_ER)
+               return 0;
+
+       /* No need to cache the ring if write pointer is unmodified */
+       if (ring->wr_offset == end)
+               return 0;
+
+       start = ring->wr_offset;
+       if (start < end) {
+               copy_size = (end - start) * sizeof(struct mhi_ring_element);
+               ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+                                               (start * sizeof(struct mhi_ring_element)),
+                                               &ring->ring_cache[start], copy_size);
+               if (ret < 0)
+                       return ret;
+       } else {
+               copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+               ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+                                               (start * sizeof(struct mhi_ring_element)),
+                                               &ring->ring_cache[start], copy_size);
+               if (ret < 0)
+                       return ret;
+
+               if (end) {
+                       ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
+                                                       &ring->ring_cache[0],
+                                                       end * sizeof(struct mhi_ring_element));
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+
+       return 0;
+}
+
+static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
+{
+       size_t wr_offset;
+       int ret;
+
+       wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
+
+       /* Cache the host ring till write offset */
+       ret = __mhi_ep_cache_ring(ring, wr_offset);
+       if (ret)
+               return ret;
+
+       ring->wr_offset = wr_offset;
+
+       return 0;
+}
+
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
+{
+       u64 wr_ptr;
+
+       wr_ptr = mhi_ep_mmio_get_db(ring);
+
+       return mhi_ep_cache_ring(ring, wr_ptr);
+}
+
+/* TODO: Support for adding multiple ring elements to the ring */
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+       struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       size_t old_offset = 0;
+       u32 num_free_elem;
+       __le64 rp;
+       int ret;
+
+       ret = mhi_ep_update_wr_offset(ring);
+       if (ret) {
+               dev_err(dev, "Error updating write pointer\n");
+               return ret;
+       }
+
+       if (ring->rd_offset < ring->wr_offset)
+               num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
+       else
+               num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
+
+       /* Check if there is space in ring for adding at least an element */
+       if (!num_free_elem) {
+               dev_err(dev, "No space left in the ring\n");
+               return -ENOSPC;
+       }
+
+       old_offset = ring->rd_offset;
+       mhi_ep_ring_inc_index(ring);
+
+       dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+
+       /* Update rp in ring context */
+       rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
+       memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
+
+       ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
+                                      sizeof(*el));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+{
+       ring->type = type;
+       if (ring->type == RING_TYPE_CMD) {
+               ring->db_offset_h = EP_CRDB_HIGHER;
+               ring->db_offset_l = EP_CRDB_LOWER;
+       } else if (ring->type == RING_TYPE_CH) {
+               ring->db_offset_h = CHDB_HIGHER_n(id);
+               ring->db_offset_l = CHDB_LOWER_n(id);
+               ring->ch_id = id;
+       } else {
+               ring->db_offset_h = ERDB_HIGHER_n(id);
+               ring->db_offset_l = ERDB_LOWER_n(id);
+       }
+}
+
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+                       union mhi_ep_ring_ctx *ctx)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       __le64 val;
+       int ret;
+
+       ring->mhi_cntrl = mhi_cntrl;
+       ring->ring_ctx = ctx;
+       ring->ring_size = mhi_ep_ring_num_elems(ring);
+       memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
+       ring->rbase = le64_to_cpu(val);
+
+       if (ring->type == RING_TYPE_CH)
+               ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
+
+       if (ring->type == RING_TYPE_ER)
+               ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+
+       /* During ring init, both rp and wp are equal */
+       memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
+       ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+       ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+
+       /* Allocate ring cache memory for holding the copy of host ring */
+       ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
+       if (!ring->ring_cache)
+               return -ENOMEM;
+
+       memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
+       ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
+       if (ret) {
+               dev_err(dev, "Failed to cache ring\n");
+               kfree(ring->ring_cache);
+               return ret;
+       }
+
+       ring->started = true;
+
+       return 0;
+}
+
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
+{
+       ring->started = false;
+       kfree(ring->ring_cache);
+       ring->ring_cache = NULL;
+}
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
new file mode 100644 (file)
index 0000000..3655c19
--- /dev/null
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
+                                        enum mhi_state cur_mhi_state,
+                                        enum mhi_state mhi_state)
+{
+       if (mhi_state == MHI_STATE_SYS_ERR)
+               return true;    /* Allowed in any state */
+
+       if (mhi_state == MHI_STATE_READY)
+               return cur_mhi_state == MHI_STATE_RESET;
+
+       if (mhi_state == MHI_STATE_M0)
+               return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY;
+
+       if (mhi_state == MHI_STATE_M3)
+               return cur_mhi_state == MHI_STATE_M0;
+
+       return false;
+}
+
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+       if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
+               dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
+                       mhi_state_str(mhi_state),
+                       mhi_state_str(mhi_cntrl->mhi_state));
+               return -EACCES;
+       }
+
+       /* TODO: Add support for M1 and M2 states */
+       if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) {
+               dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state));
+               return -EOPNOTSUPP;
+       }
+
+       mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state);
+       mhi_cntrl->mhi_state = mhi_state;
+
+       if (mhi_state == MHI_STATE_READY)
+               mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1);
+
+       if (mhi_state == MHI_STATE_SYS_ERR)
+               mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1);
+
+       return 0;
+}
+
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       enum mhi_state old_state;
+       int ret;
+
+       /* If MHI is in M3, resume suspended channels */
+       spin_lock_bh(&mhi_cntrl->state_lock);
+       old_state = mhi_cntrl->mhi_state;
+       if (old_state == MHI_STATE_M3)
+               mhi_ep_resume_channels(mhi_cntrl);
+
+       ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+       spin_unlock_bh(&mhi_cntrl->state_lock);
+
+       if (ret) {
+               mhi_ep_handle_syserr(mhi_cntrl);
+               return ret;
+       }
+
+       /* Signal host that the device moved to M0 */
+       ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+       if (ret) {
+               dev_err(dev, "Failed sending M0 state change event\n");
+               return ret;
+       }
+
+       if (old_state == MHI_STATE_READY) {
+               /* Send AMSS EE event to host */
+               ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
+               if (ret) {
+                       dev_err(dev, "Failed sending AMSS EE event\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       int ret;
+
+       spin_lock_bh(&mhi_cntrl->state_lock);
+       ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+       spin_unlock_bh(&mhi_cntrl->state_lock);
+
+       if (ret) {
+               mhi_ep_handle_syserr(mhi_cntrl);
+               return ret;
+       }
+
+       mhi_ep_suspend_channels(mhi_cntrl);
+
+       /* Signal host that the device moved to M3 */
+       ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+       if (ret) {
+               dev_err(dev, "Failed sending M3 state change event\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       enum mhi_state mhi_state;
+       int ret, is_ready;
+
+       spin_lock_bh(&mhi_cntrl->state_lock);
+       /* Ensure that the MHISTATUS is set to RESET by host */
+       mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
+       is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
+
+       if (mhi_state != MHI_STATE_RESET || is_ready) {
+               dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+               spin_unlock_bh(&mhi_cntrl->state_lock);
+               return -EIO;
+       }
+
+       ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+       spin_unlock_bh(&mhi_cntrl->state_lock);
+
+       if (ret)
+               mhi_ep_handle_syserr(mhi_cntrl);
+
+       return ret;
+}
index b0da7ca..26d0edd 100644 (file)
@@ -19,8 +19,8 @@
 #include "internal.h"
 
 /* Setup RDDM vector table for RDDM transfer and program RXVEC */
-void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
-                     struct image_info *img_info)
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+                    struct image_info *img_info)
 {
        struct mhi_buf *mhi_buf = img_info->mhi_buf;
        struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
@@ -28,6 +28,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        u32 sequence_id;
        unsigned int i;
+       int ret;
 
        for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
                bhi_vec->dma_addr = mhi_buf->dma_addr;
@@ -45,11 +46,17 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
        mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
        sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
 
-       mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
-                           BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+       ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+                                 BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+       if (ret) {
+               dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
+               return ret;
+       }
 
        dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
                &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+
+       return 0;
 }
 
 /* Collect RDDM buffer during kernel panic */
@@ -198,10 +205,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
 
        mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
 
-       mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
-                           BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
+       ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+                                 BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
        read_unlock_bh(pm_lock);
 
+       if (ret)
+               return ret;
+
        /* Wait for the image download to complete */
        ret = wait_event_timeout(mhi_cntrl->state_event,
                                 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
index a665b8e..c137d55 100644 (file)
@@ -86,7 +86,7 @@ static ssize_t serial_number_show(struct device *dev,
        struct mhi_device *mhi_dev = to_mhi_device(dev);
        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 
-       return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
+       return sysfs_emit(buf, "Serial Number: %u\n",
                        mhi_cntrl->serial_number);
 }
 static DEVICE_ATTR_RO(serial_number);
@@ -100,17 +100,30 @@ static ssize_t oem_pk_hash_show(struct device *dev,
        int i, cnt = 0;
 
        for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
-               cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
-                               "OEMPKHASH[%d]: 0x%x\n", i,
-                               mhi_cntrl->oem_pk_hash[i]);
+               cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
+                               i, mhi_cntrl->oem_pk_hash[i]);
 
        return cnt;
 }
 static DEVICE_ATTR_RO(oem_pk_hash);
 
+static ssize_t soc_reset_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf,
+                              size_t count)
+{
+       struct mhi_device *mhi_dev = to_mhi_device(dev);
+       struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+       mhi_soc_reset(mhi_cntrl);
+       return count;
+}
+static DEVICE_ATTR_WO(soc_reset);
+
 static struct attribute *mhi_dev_attrs[] = {
        &dev_attr_serial_number.attr,
        &dev_attr_oem_pk_hash.attr,
+       &dev_attr_soc_reset.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(mhi_dev);
@@ -425,74 +438,65 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        struct {
                u32 offset;
-               u32 mask;
                u32 val;
        } reg_info[] = {
                {
-                       CCABAP_HIGHER, U32_MAX,
+                       CCABAP_HIGHER,
                        upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
                },
                {
-                       CCABAP_LOWER, U32_MAX,
+                       CCABAP_LOWER,
                        lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
                },
                {
-                       ECABAP_HIGHER, U32_MAX,
+                       ECABAP_HIGHER,
                        upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
                },
                {
-                       ECABAP_LOWER, U32_MAX,
+                       ECABAP_LOWER,
                        lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
                },
                {
-                       CRCBAP_HIGHER, U32_MAX,
+                       CRCBAP_HIGHER,
                        upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
                },
                {
-                       CRCBAP_LOWER, U32_MAX,
+                       CRCBAP_LOWER,
                        lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
                },
                {
-                       MHICFG, MHICFG_NER_MASK,
-                       mhi_cntrl->total_ev_rings,
-               },
-               {
-                       MHICFG, MHICFG_NHWER_MASK,
-                       mhi_cntrl->hw_ev_rings,
-               },
-               {
-                       MHICTRLBASE_HIGHER, U32_MAX,
+                       MHICTRLBASE_HIGHER,
                        upper_32_bits(mhi_cntrl->iova_start),
                },
                {
-                       MHICTRLBASE_LOWER, U32_MAX,
+                       MHICTRLBASE_LOWER,
                        lower_32_bits(mhi_cntrl->iova_start),
                },
                {
-                       MHIDATABASE_HIGHER, U32_MAX,
+                       MHIDATABASE_HIGHER,
                        upper_32_bits(mhi_cntrl->iova_start),
                },
                {
-                       MHIDATABASE_LOWER, U32_MAX,
+                       MHIDATABASE_LOWER,
                        lower_32_bits(mhi_cntrl->iova_start),
                },
                {
-                       MHICTRLLIMIT_HIGHER, U32_MAX,
+                       MHICTRLLIMIT_HIGHER,
                        upper_32_bits(mhi_cntrl->iova_stop),
                },
                {
-                       MHICTRLLIMIT_LOWER, U32_MAX,
+                       MHICTRLLIMIT_LOWER,
                        lower_32_bits(mhi_cntrl->iova_stop),
                },
                {
-                       MHIDATALIMIT_HIGHER, U32_MAX,
+                       MHIDATALIMIT_HIGHER,
                        upper_32_bits(mhi_cntrl->iova_stop),
                },
                {
-                       MHIDATALIMIT_LOWER, U32_MAX,
+                       MHIDATALIMIT_LOWER,
                        lower_32_bits(mhi_cntrl->iova_stop),
                },
-               { 0, 0, 0 }
+               {0, 0}
        };
 
        dev_dbg(dev, "Initializing MHI registers\n");
@@ -534,8 +538,22 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 
        /* Write to MMIO registers */
        for (i = 0; reg_info[i].offset; i++)
-               mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
-                                   reg_info[i].mask, reg_info[i].val);
+               mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
+                             reg_info[i].val);
+
+       ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
+                                 mhi_cntrl->total_ev_rings);
+       if (ret) {
+               dev_err(dev, "Unable to write MHICFG register\n");
+               return ret;
+       }
+
+       ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
+                                 mhi_cntrl->hw_ev_rings);
+       if (ret) {
+               dev_err(dev, "Unable to write MHICFG register\n");
+               return ret;
+       }
 
        return 0;
 }
@@ -1103,8 +1121,15 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
                 */
                mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
                                     mhi_cntrl->rddm_size);
-               if (mhi_cntrl->rddm_image)
-                       mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+               if (mhi_cntrl->rddm_image) {
+                       ret = mhi_rddm_prepare(mhi_cntrl,
+                                              mhi_cntrl->rddm_image);
+                       if (ret) {
+                               mhi_free_bhie_table(mhi_cntrl,
+                                                   mhi_cntrl->rddm_image);
+                               goto error_reg_offset;
+                       }
+               }
        }
 
        mutex_unlock(&mhi_cntrl->pm_mutex);
index b47d8ef..01fd10a 100644 (file)
@@ -324,8 +324,9 @@ int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
                                    u32 val, u32 delayus);
 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
                   u32 offset, u32 val);
-void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
-                        u32 offset, u32 mask, u32 val);
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+                                    void __iomem *base, u32 offset, u32 mask,
+                                    u32 val);
 void mhi_ring_er_db(struct mhi_event *mhi_event);
 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
                  dma_addr_t db_val);
@@ -339,7 +340,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
-void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
                      struct image_info *img_info);
 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
 
index 9021be7..f3aef77 100644 (file)
@@ -65,19 +65,22 @@ void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
        mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
 }
 
-void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
-                        u32 offset, u32 mask, u32 val)
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+                                    void __iomem *base, u32 offset, u32 mask,
+                                    u32 val)
 {
        int ret;
        u32 tmp;
 
        ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
        if (ret)
-               return;
+               return ret;
 
        tmp &= ~mask;
        tmp |= (val << __ffs(mask));
        mhi_write_reg(mhi_cntrl, base, offset, tmp);
+
+       return 0;
 }
 
 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
@@ -531,18 +534,13 @@ irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
                                        struct mhi_ring *ring)
 {
-       dma_addr_t ctxt_wp;
-
        /* Update the WP */
        ring->wp += ring->el_size;
-       ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
 
-       if (ring->wp >= (ring->base + ring->len)) {
+       if (ring->wp >= (ring->base + ring->len))
                ring->wp = ring->base;
-               ctxt_wp = ring->iommu_base;
-       }
 
-       *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
+       *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
 
        /* Update the RP */
        ring->rp += ring->el_size;
index 541ced2..8416267 100644 (file)
@@ -371,7 +371,16 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
        .sideband_wake = false,
 };
 
-static const struct mhi_channel_config mhi_mv31_channels[] = {
+static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
+       .name = "foxconn-sdx65",
+       .config = &modem_foxconn_sdx55_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32,
+       .mru_default = 32768,
+       .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_mv3x_channels[] = {
        MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
        MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
        /* MBIM Control Channel */
@@ -382,25 +391,33 @@ static const struct mhi_channel_config mhi_mv31_channels[] = {
        MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
 };
 
-static struct mhi_event_config mhi_mv31_events[] = {
+static struct mhi_event_config mhi_mv3x_events[] = {
        MHI_EVENT_CONFIG_CTRL(0, 256),
        MHI_EVENT_CONFIG_DATA(1, 256),
        MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
        MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
 };
 
-static const struct mhi_controller_config modem_mv31_config = {
+static const struct mhi_controller_config modem_mv3x_config = {
        .max_channels = 128,
        .timeout_ms = 20000,
-       .num_channels = ARRAY_SIZE(mhi_mv31_channels),
-       .ch_cfg = mhi_mv31_channels,
-       .num_events = ARRAY_SIZE(mhi_mv31_events),
-       .event_cfg = mhi_mv31_events,
+       .num_channels = ARRAY_SIZE(mhi_mv3x_channels),
+       .ch_cfg = mhi_mv3x_channels,
+       .num_events = ARRAY_SIZE(mhi_mv3x_events),
+       .event_cfg = mhi_mv3x_events,
 };
 
 static const struct mhi_pci_dev_info mhi_mv31_info = {
        .name = "cinterion-mv31",
-       .config = &modem_mv31_config,
+       .config = &modem_mv3x_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32,
+       .mru_default = 32768,
+};
+
+static const struct mhi_pci_dev_info mhi_mv32_info = {
+       .name = "cinterion-mv32",
+       .config = &modem_mv3x_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
        .dma_data_width = 32,
        .mru_default = 32768,
@@ -446,20 +463,100 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
        .sideband_wake = false,
 };
 
+static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
+       MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
+       MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
+       MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
+       MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
+};
+
+static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
+       MHI_EVENT_CONFIG_CTRL(0, 128),
+       MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
+       MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+       .max_channels = 128,
+       .timeout_ms = 20000,
+       .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
+       .ch_cfg = mhi_telit_fn980_hw_v1_channels,
+       .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
+       .event_cfg = mhi_telit_fn980_hw_v1_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
+       .name = "telit-fn980-hwv1",
+       .fw = "qcom/sdx55m/sbl1.mbn",
+       .edl = "qcom/sdx55m/edl.mbn",
+       .config = &modem_telit_fn980_hw_v1_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32,
+       .mru_default = 32768,
+       .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
+       MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+       MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+       MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+       MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+       MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+       MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_telit_fn990_events[] = {
+       MHI_EVENT_CONFIG_CTRL(0, 128),
+       MHI_EVENT_CONFIG_DATA(1, 128),
+       MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+       MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_telit_fn990_config = {
+       .max_channels = 128,
+       .timeout_ms = 20000,
+       .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
+       .ch_cfg = mhi_telit_fn990_channels,
+       .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+       .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
+       .name = "telit-fn990",
+       .config = &modem_telit_fn990_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32,
+       .sideband_wake = false,
+       .mru_default = 32768,
+};
+
+/* Keep the list sorted based on the PID. New VID should be added as the last entry */
 static const struct pci_device_id mhi_pci_id_table[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
        /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
                .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+       /* Telit FN980 hardware revision v1 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
+               .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
        { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
                .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
-       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
-               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
+       /* Telit FN990 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
+               .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
        { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
                .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
        { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
                .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
-       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
-               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
        /* T99W175 (sdx55), Both for eSIM and Non-eSIM */
        { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
                .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
@@ -472,9 +569,21 @@ static const struct pci_device_id mhi_pci_id_table[] = {
        /* T99W175 (sdx55), Based on Qualcomm new baseline */
        { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
                .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+       /* T99W368 (sdx65) */
+       { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
+               .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+       /* T99W373 (sdx62) */
+       { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
+               .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
        /* MV31-W (Cinterion) */
        { PCI_DEVICE(0x1269, 0x00b3),
                .driver_data = (kernel_ulong_t) &mhi_mv31_info },
+       /* MV32-WA (Cinterion) */
+       { PCI_DEVICE(0x1269, 0x00ba),
+               .driver_data = (kernel_ulong_t) &mhi_mv32_info },
+       /* MV32-WB (Cinterion) */
+       { PCI_DEVICE(0x1269, 0x00bb),
+               .driver_data = (kernel_ulong_t) &mhi_mv32_info },
        {  }
 };
 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
index 3d90b8e..dc2e8ff 100644 (file)
@@ -129,13 +129,20 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn
 
 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
 {
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       int ret;
+
        if (state == MHI_STATE_RESET) {
-               mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
-                                   MHICTRL_RESET_MASK, 1);
+               ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+                                         MHICTRL_RESET_MASK, 1);
        } else {
-               mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
-                                   MHICTRL_MHISTATE_MASK, state);
+               ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+                                         MHICTRL_MHISTATE_MASK, state);
        }
+
+       if (ret)
+               dev_err(dev, "Failed to set MHI state to: %s\n",
+                       mhi_state_str(state));
 }
 
 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
@@ -476,6 +483,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
                 * hence re-program it
                 */
                mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+
+               if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+                       /* wait for ready to be set */
+                       ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
+                                                MHISTATUS,
+                                                MHISTATUS_READY_MASK, 1, 25000);
+                       if (ret)
+                               dev_err(dev, "Device failed to enter READY state\n");
+               }
        }
 
        dev_dbg(dev,
index 18363aa..9a7d123 100644 (file)
@@ -3395,7 +3395,9 @@ static int sysc_remove(struct platform_device *pdev)
        struct sysc *ddata = platform_get_drvdata(pdev);
        int error;
 
-       cancel_delayed_work_sync(&ddata->idle_work);
+       /* Device can still be enabled, see deferred idle quirk in probe */
+       if (cancel_delayed_work_sync(&ddata->idle_work))
+               ti_sysc_idle(&ddata->idle_work.work);
 
        error = pm_runtime_resume_and_get(ddata->dev);
        if (error < 0) {
index 55f4837..69fd31f 100644 (file)
@@ -18,7 +18,8 @@ config TTY_PRINTK
          The feature is useful to inline user messages with kernel
          messages.
          In order to use this feature, you should output user messages
-         to /dev/ttyprintk or redirect console to this TTY.
+         to /dev/ttyprintk or redirect console to this TTY, or boot
+         the kernel with console=ttyprintk.
 
          If unsure, say N.
 
index cc296f0..84ca98e 100644 (file)
@@ -101,7 +101,7 @@ static inline bool should_stop_iteration(void)
 {
        if (need_resched())
                cond_resched();
-       return fatal_signal_pending(current);
+       return signal_pending(current);
 }
 
 /*
index ca5141e..cba19bf 100644 (file)
@@ -100,17 +100,18 @@ static const struct seq_operations misc_seq_ops = {
 static int misc_open(struct inode *inode, struct file *file)
 {
        int minor = iminor(inode);
-       struct miscdevice *c;
+       struct miscdevice *c = NULL, *iter;
        int err = -ENODEV;
        const struct file_operations *new_fops = NULL;
 
        mutex_lock(&misc_mtx);
 
-       list_for_each_entry(c, &misc_list, list) {
-               if (c->minor == minor) {
-                       new_fops = fops_get(c->fops);
-                       break;
-               }
+       list_for_each_entry(iter, &misc_list, list) {
+               if (iter->minor != minor)
+                       continue;
+               c = iter;
+               new_fops = fops_get(iter->fops);
+               break;
        }
 
        if (!new_fops) {
@@ -118,11 +119,12 @@ static int misc_open(struct inode *inode, struct file *file)
                request_module("char-major-%d-%d", MISC_MAJOR, minor);
                mutex_lock(&misc_mtx);
 
-               list_for_each_entry(c, &misc_list, list) {
-                       if (c->minor == minor) {
-                               new_fops = fops_get(c->fops);
-                               break;
-                       }
+               list_for_each_entry(iter, &misc_list, list) {
+                       if (iter->minor != minor)
+                               continue;
+                       c = iter;
+                       new_fops = fops_get(iter->fops);
+                       break;
                }
                if (!new_fops)
                        goto fail;
index 78baba5..8fc49b0 100644 (file)
@@ -922,7 +922,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd)
                // BIT7:parity error
                // BIT6:framing error
 
-               if (status & (BIT7 + BIT6)) {
+               if (status & (BIT7 | BIT6)) {
                        if (status & BIT7)
                                icount->parity++;
                        else
@@ -1418,7 +1418,11 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
                info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
 
        /* byte size and parity */
-
+       if ((cflag & CSIZE) != CS8) {
+               cflag &= ~CSIZE;
+               cflag |= CS7;
+               tty->termios.c_cflag = cflag;
+       }
        info->params.data_bits = tty_get_char_size(cflag);
 
        if (cflag & CSTOPB)
@@ -1432,10 +1436,8 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
                        info->params.parity = ASYNC_PARITY_ODD;
                else
                        info->params.parity = ASYNC_PARITY_EVEN;
-#ifdef CMSPAR
                if (cflag & CMSPAR)
                        info->params.parity = ASYNC_PARITY_SPACE;
-#endif
        }
 
        /* calculate number of jiffies to transmit a full
index adf941c..ed45d04 100644 (file)
@@ -11,6 +11,7 @@
  * of the boot process, for example.
  */
 
+#include <linux/console.h>
 #include <linux/device.h>
 #include <linux/serial.h>
 #include <linux/tty.h>
@@ -163,6 +164,18 @@ static const struct tty_port_operations tpk_port_ops = {
 
 static struct tty_driver *ttyprintk_driver;
 
+static struct tty_driver *ttyprintk_console_device(struct console *c,
+                                                  int *index)
+{
+       *index = 0;
+       return ttyprintk_driver;
+}
+
+static struct console ttyprintk_console = {
+       .name = "ttyprintk",
+       .device = ttyprintk_console_device,
+};
+
 static int __init ttyprintk_init(void)
 {
        int ret;
@@ -195,6 +208,8 @@ static int __init ttyprintk_init(void)
                goto error;
        }
 
+       register_console(&ttyprintk_console);
+
        return 0;
 
 error:
@@ -205,6 +220,7 @@ error:
 
 static void __exit ttyprintk_exit(void)
 {
+       unregister_console(&ttyprintk_console);
        tty_unregister_driver(ttyprintk_driver);
        tty_driver_kref_put(ttyprintk_driver);
        tty_port_destroy(&tpk_port.port);
index 5046486..0f23864 100644 (file)
@@ -174,18 +174,17 @@ void xillybus_cleanup_chrdev(void *private_data,
                             struct device *dev)
 {
        int minor;
-       struct xilly_unit *unit;
-       bool found = false;
+       struct xilly_unit *unit = NULL, *iter;
 
        mutex_lock(&unit_mutex);
 
-       list_for_each_entry(unit, &unit_list, list_entry)
-               if (unit->private_data == private_data) {
-                       found = true;
+       list_for_each_entry(iter, &unit_list, list_entry)
+               if (iter->private_data == private_data) {
+                       unit = iter;
                        break;
                }
 
-       if (!found) {
+       if (!unit) {
                dev_err(dev, "Weird bug: Failed to find unit\n");
                mutex_unlock(&unit_mutex);
                return;
@@ -216,22 +215,21 @@ int xillybus_find_inode(struct inode *inode,
 {
        int minor = iminor(inode);
        int major = imajor(inode);
-       struct xilly_unit *unit;
-       bool found = false;
+       struct xilly_unit *unit = NULL, *iter;
 
        mutex_lock(&unit_mutex);
 
-       list_for_each_entry(unit, &unit_list, list_entry)
-               if (unit->major == major &&
-                   minor >= unit->lowest_minor &&
-                   minor < (unit->lowest_minor + unit->num_nodes)) {
-                       found = true;
+       list_for_each_entry(iter, &unit_list, list_entry)
+               if (iter->major == major &&
+                   minor >= iter->lowest_minor &&
+                   minor < (iter->lowest_minor + iter->num_nodes)) {
+                       unit = iter;
                        break;
                }
 
        mutex_unlock(&unit_mutex);
 
-       if (!found)
+       if (!unit)
                return -ENODEV;
 
        *private_data = unit->private_data;
index dc35517..39bcbfd 100644 (file)
@@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref)
        if (xdev->workq)
                destroy_workqueue(xdev->workq);
 
+       usb_put_dev(xdev->udev);
        kfree(xdev->channels); /* Argument may be NULL, and that's fine */
        kfree(xdev);
 }
index 5b022ee..c56e406 100644 (file)
@@ -683,7 +683,12 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
                return ERR_PTR(ret);
        }
 
-       pdev->driver_override = "imx-scu-clk";
+       ret = driver_set_override(&pdev->dev, &pdev->driver_override,
+                                 "imx-scu-clk", strlen("imx-scu-clk"));
+       if (ret) {
+               platform_device_put(pdev);
+               return ERR_PTR(ret);
+       }
 
        ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
        if (ret)
index cfc79f9..03de634 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/soc/pxa/smemc.h>
 
 #include <dt-bindings/clock/pxa-clock.h>
 #include "clk-pxa.h"
@@ -94,7 +95,8 @@ void __init clkdev_pxa_register(int ckid, const char *con_id,
                clk_register_clkdev(clk, con_id, dev_id);
 }
 
-int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
+int __init clk_pxa_cken_init(const struct desc_clk_cken *clks,
+                            int nb_clks, void __iomem *clk_regs)
 {
        int i;
        struct pxa_clk *pxa_clk;
@@ -106,6 +108,7 @@ int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
                pxa_clk->lp = clks[i].lp;
                pxa_clk->hp = clks[i].hp;
                pxa_clk->gate = clks[i].gate;
+               pxa_clk->gate.reg = clk_regs + clks[i].cken_reg;
                pxa_clk->gate.lock = &pxa_clk_lock;
                clk = clk_register_composite(NULL, clks[i].name,
                                             clks[i].parent_names, 2,
@@ -150,12 +153,13 @@ void pxa2xx_core_turbo_switch(bool on)
 }
 
 void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
-                       u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+                       u32 (*mdrefr_dri)(unsigned int),
                        void __iomem *cccr)
 {
        unsigned int clkcfg = freq->clkcfg;
        unsigned int unused, preset_mdrefr, postset_mdrefr;
        unsigned long flags;
+       void __iomem *mdrefr = pxa_smemc_get_mdrefr();
 
        local_irq_save(flags);
 
index 5768e0f..7ec2d28 100644 (file)
 struct desc_clk_cken {
        struct clk_hw hw;
        int ckid;
+       int cken_reg;
        const char *name;
        const char *dev_id;
        const char *con_id;
@@ -119,11 +120,12 @@ struct desc_clk_cken {
 #define PXA_CKEN(_dev_id, _con_id, _name, parents, _mult_lp, _div_lp,  \
                 _mult_hp, _div_hp, is_lp, _cken_reg, _cken_bit, flag)  \
        { .ckid = CLK_ ## _name, .name = #_name,                        \
+         .cken_reg = _cken_reg,                                        \
          .dev_id = _dev_id, .con_id = _con_id, .parent_names = parents,\
          .lp = { .mult = _mult_lp, .div = _div_lp },                   \
          .hp = { .mult = _mult_hp, .div = _div_hp },                   \
          .is_in_low_power = is_lp,                                     \
-         .gate = { .reg = (void __iomem *)_cken_reg, .bit_idx = _cken_bit }, \
+         .gate = { .bit_idx = _cken_bit }, \
          .flags = flag,                                                \
        }
 #define PXA_CKEN_1RATE(dev_id, con_id, name, parents, cken_reg,                \
@@ -146,12 +148,13 @@ static inline int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
 
 extern void clkdev_pxa_register(int ckid, const char *con_id,
                                const char *dev_id, struct clk *clk);
-extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
+extern int clk_pxa_cken_init(const struct desc_clk_cken *clks,
+                            int nb_clks, void __iomem *clk_regs);
 void clk_pxa_dt_common_init(struct device_node *np);
 
 void pxa2xx_core_turbo_switch(bool on);
 void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
-                       u32 (*mdrefr_dri)(unsigned int), void __iomem *mdrefr,
+                       u32 (*mdrefr_dri)(unsigned int),
                        void __iomem *cccr);
 int pxa2xx_determine_rate(struct clk_rate_request *req,
                          struct pxa2xx_freq *freqs,  int nb_freqs);
index d0f9579..93d5907 100644 (file)
 #include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/of.h>
-#include <mach/pxa2xx-regs.h>
-#include <mach/smemc.h>
+#include <linux/soc/pxa/smemc.h>
 
 #include <dt-bindings/clock/pxa-clock.h>
 #include "clk-pxa.h"
+#include "clk-pxa2xx.h"
 
 #define KHz 1000
 #define MHz (1000 * 1000)
@@ -33,15 +33,13 @@ enum {
         ((T) ? CLKCFG_TURBO : 0))
 #define PXA25x_CCCR(N2, M, L) (N2 << 7 | M << 5 | L)
 
-#define MDCNFG_DRAC2(mdcnfg)   (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg)   (((mdcnfg) >> 5) & 0x3)
-
 /* Define the refresh period in mSec for the SDRAM and the number of rows */
 #define SDRAM_TREF     64      /* standard 64ms SDRAM */
 
 /*
  * Various clock factors driven by the CCCR register.
  */
+static void __iomem *clk_regs;
 
 /* Crystal Frequency to Memory Frequency Multiplier (L) */
 static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, };
@@ -57,30 +55,9 @@ static const char * const get_freq_khz[] = {
        "core", "run", "cpll", "memory"
 };
 
-static int get_sdram_rows(void)
-{
-       static int sdram_rows;
-       unsigned int drac2 = 0, drac0 = 0;
-       u32 mdcnfg;
-
-       if (sdram_rows)
-               return sdram_rows;
-
-       mdcnfg = readl_relaxed(MDCNFG);
-
-       if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
-               drac2 = MDCNFG_DRAC2(mdcnfg);
-
-       if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
-               drac0 = MDCNFG_DRAC0(mdcnfg);
-
-       sdram_rows = 1 << (11 + max(drac0, drac2));
-       return sdram_rows;
-}
-
 static u32 mdrefr_dri(unsigned int freq_khz)
 {
-       u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+       u32 interval = freq_khz * SDRAM_TREF / pxa2xx_smemc_get_sdram_rows();
 
        return interval / 32;
 }
@@ -121,7 +98,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
 static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
                                                unsigned long parent_rate)
 {
-       unsigned long cccr = readl(CCCR);
+       unsigned long cccr = readl(clk_regs + CCCR);
        unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
 
        return parent_rate / m;
@@ -225,7 +202,7 @@ MUX_OPS(clk_pxa25x_core, "core", CLK_SET_RATE_PARENT);
 static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
                                             unsigned long parent_rate)
 {
-       unsigned long cccr = readl(CCCR);
+       unsigned long cccr = readl(clk_regs + CCCR);
        unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
 
        return (parent_rate / n2) * 2;
@@ -236,7 +213,7 @@ RATE_RO_OPS(clk_pxa25x_run, "run");
 static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
        unsigned long parent_rate)
 {
-       unsigned long clkcfg, cccr = readl(CCCR);
+       unsigned long clkcfg, cccr = readl(clk_regs + CCCR);
        unsigned int l, m, n2, t;
 
        asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -268,7 +245,7 @@ static int clk_pxa25x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
        if (i >= ARRAY_SIZE(pxa25x_freqs))
                return -EINVAL;
 
-       pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+       pxa2xx_cpll_change(&pxa25x_freqs[i], mdrefr_dri, clk_regs + CCCR);
 
        return 0;
 }
@@ -345,16 +322,17 @@ static void __init pxa25x_dummy_clocks_init(void)
        }
 }
 
-int __init pxa25x_clocks_init(void)
+int __init pxa25x_clocks_init(void __iomem *regs)
 {
+       clk_regs = regs;
        pxa25x_base_clocks_init();
        pxa25x_dummy_clocks_init();
-       return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks));
+       return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks), clk_regs);
 }
 
 static void __init pxa25x_dt_clocks_init(struct device_node *np)
 {
-       pxa25x_clocks_init();
+       pxa25x_clocks_init(ioremap(0x41300000ul, 0x10));
        clk_pxa_dt_common_init(np);
 }
 CLK_OF_DECLARE(pxa25x_clks, "marvell,pxa250-core-clocks",
index 7b12310..116c6ac 100644 (file)
@@ -7,16 +7,15 @@
  * Heavily inspired from former arch/arm/mach-pxa/clock.c.
  */
 #include <linux/clk-provider.h>
-#include <mach/pxa2xx-regs.h>
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
-
-#include <mach/smemc.h>
+#include <linux/soc/pxa/smemc.h>
 
 #include <dt-bindings/clock/pxa-clock.h>
 #include "clk-pxa.h"
+#include "clk-pxa2xx.h"
 
 #define KHz 1000
 #define MHz (1000 * 1000)
@@ -50,41 +49,19 @@ enum {
         ((T)  ? CLKCFG_TURBO : 0))
 #define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
 
-#define MDCNFG_DRAC2(mdcnfg)   (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg)   (((mdcnfg) >> 5) & 0x3)
-
 /* Define the refresh period in mSec for the SDRAM and the number of rows */
 #define SDRAM_TREF     64      /* standard 64ms SDRAM */
 
+static void __iomem *clk_regs;
+
 static const char * const get_freq_khz[] = {
        "core", "run", "cpll", "memory",
        "system_bus"
 };
 
-static int get_sdram_rows(void)
-{
-       static int sdram_rows;
-       unsigned int drac2 = 0, drac0 = 0;
-       u32 mdcnfg;
-
-       if (sdram_rows)
-               return sdram_rows;
-
-       mdcnfg = readl_relaxed(MDCNFG);
-
-       if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
-               drac2 = MDCNFG_DRAC2(mdcnfg);
-
-       if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
-               drac0 = MDCNFG_DRAC0(mdcnfg);
-
-       sdram_rows = 1 << (11 + max(drac0, drac2));
-       return sdram_rows;
-}
-
 static u32 mdrefr_dri(unsigned int freq_khz)
 {
-       u32 interval = freq_khz * SDRAM_TREF / get_sdram_rows();
+       u32 interval = freq_khz * SDRAM_TREF / pxa2xx_smemc_get_sdram_rows();
 
        return (interval - 31) / 32;
 }
@@ -124,7 +101,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
 
 bool pxa27x_is_ppll_disabled(void)
 {
-       unsigned long ccsr = readl(CCSR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        return ccsr & (1 << CCCR_PPDIS_BIT);
 }
@@ -226,7 +203,7 @@ static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
        unsigned long clkcfg;
        unsigned int t, ht;
        unsigned int l, L, n2, N;
-       unsigned long ccsr = readl(CCSR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
        t  = clkcfg & (1 << 0);
@@ -260,7 +237,7 @@ static int clk_pxa27x_cpll_set_rate(struct clk_hw *hw, unsigned long rate,
        if (i >= ARRAY_SIZE(pxa27x_freqs))
                return -EINVAL;
 
-       pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, MDREFR, CCCR);
+       pxa2xx_cpll_change(&pxa27x_freqs[i], mdrefr_dri, clk_regs + CCCR);
        return 0;
 }
 
@@ -271,8 +248,8 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
                                                  unsigned long parent_rate)
 {
        unsigned int l, osc_forced;
-       unsigned long ccsr = readl(CCSR);
-       unsigned long cccr = readl(CCCR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
+       unsigned long cccr = readl(clk_regs + CCCR);
 
        l  = ccsr & CCSR_L_MASK;
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
@@ -293,7 +270,7 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
 static u8 clk_pxa27x_lcd_base_get_parent(struct clk_hw *hw)
 {
        unsigned int osc_forced;
-       unsigned long ccsr = readl(CCSR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
        if (osc_forced)
@@ -322,7 +299,7 @@ static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
 {
        unsigned long clkcfg;
        unsigned int t, ht, osc_forced;
-       unsigned long ccsr = readl(CCSR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
        if (osc_forced)
@@ -359,7 +336,7 @@ MUX_OPS(clk_pxa27x_core, "core", CLK_SET_RATE_PARENT);
 static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
                                             unsigned long parent_rate)
 {
-       unsigned long ccsr = readl(CCSR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
        unsigned int n2 = (ccsr & CCSR_N2_MASK) >> CCSR_N2_SHIFT;
 
        return (parent_rate / n2) * 2;
@@ -382,7 +359,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
 {
        unsigned long clkcfg;
        unsigned int b, osc_forced;
-       unsigned long ccsr = readl(CCSR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
        asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -399,7 +376,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
 static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
 {
        unsigned int osc_forced;
-       unsigned long ccsr = readl(CCSR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
        if (osc_forced)
@@ -415,8 +392,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
                                                unsigned long parent_rate)
 {
        unsigned int a, l, osc_forced;
-       unsigned long cccr = readl(CCCR);
-       unsigned long ccsr = readl(CCSR);
+       unsigned long cccr = readl(clk_regs + CCCR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
        a = cccr & (1 << CCCR_A_BIT);
@@ -434,8 +411,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
 static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
 {
        unsigned int osc_forced, a;
-       unsigned long cccr = readl(CCCR);
-       unsigned long ccsr = readl(CCSR);
+       unsigned long cccr = readl(clk_regs + CCCR);
+       unsigned long ccsr = readl(clk_regs + CCSR);
 
        osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
        a = cccr & (1 << CCCR_A_BIT);
@@ -490,16 +467,17 @@ static void __init pxa27x_base_clocks_init(void)
        clk_register_clk_pxa27x_lcd_base();
 }
 
-int __init pxa27x_clocks_init(void)
+int __init pxa27x_clocks_init(void __iomem *regs)
 {
+       clk_regs = regs;
        pxa27x_base_clocks_init();
        pxa27x_dummy_clocks_init();
-       return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
+       return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks), regs);
 }
 
 static void __init pxa27x_dt_clocks_init(struct device_node *np)
 {
-       pxa27x_clocks_init();
+       pxa27x_clocks_init(ioremap(0x41300000ul, 0x10));
        clk_pxa_dt_common_init(np);
 }
 CLK_OF_DECLARE(pxa_clks, "marvell,pxa270-clocks", pxa27x_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa2xx.h b/drivers/clk/pxa/clk-pxa2xx.h
new file mode 100644 (file)
index 0000000..94b03d0
--- /dev/null
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __CLK_PXA2XX_H
+#define __CLK_PXA2XX_H
+
+#define CCCR           (0x0000)  /* Core Clock Configuration Register */
+#define CCSR           (0x000C)  /* Core Clock Status Register */
+#define CKEN           (0x0004)  /* Clock Enable Register */
+#define OSCC           (0x0008)  /* Oscillator Configuration Register */
+
+#define CCCR_N_MASK    0x0380  /* Run Mode Frequency to Turbo Mode Frequency Multiplier */
+#define CCCR_M_MASK    0x0060  /* Memory Frequency to Run Mode Frequency Multiplier */
+#define CCCR_L_MASK    0x001f  /* Crystal Frequency to Memory Frequency Multiplier */
+
+#define CCCR_CPDIS_BIT (31)
+#define CCCR_PPDIS_BIT (30)
+#define CCCR_LCD_26_BIT        (27)
+#define CCCR_A_BIT     (25)
+
+#define CCSR_N2_MASK   CCCR_N_MASK
+#define CCSR_M_MASK    CCCR_M_MASK
+#define CCSR_L_MASK    CCCR_L_MASK
+#define CCSR_N2_SHIFT  7
+
+#define CKEN_AC97CONF   (31)    /* AC97 Controller Configuration */
+#define CKEN_CAMERA    (24)    /* Camera Interface Clock Enable */
+#define CKEN_SSP1      (23)    /* SSP1 Unit Clock Enable */
+#define CKEN_MEMC      (22)    /* Memory Controller Clock Enable */
+#define CKEN_MEMSTK    (21)    /* Memory Stick Host Controller */
+#define CKEN_IM                (20)    /* Internal Memory Clock Enable */
+#define CKEN_KEYPAD    (19)    /* Keypad Interface Clock Enable */
+#define CKEN_USIM      (18)    /* USIM Unit Clock Enable */
+#define CKEN_MSL       (17)    /* MSL Unit Clock Enable */
+#define CKEN_LCD       (16)    /* LCD Unit Clock Enable */
+#define CKEN_PWRI2C    (15)    /* PWR I2C Unit Clock Enable */
+#define CKEN_I2C       (14)    /* I2C Unit Clock Enable */
+#define CKEN_FICP      (13)    /* FICP Unit Clock Enable */
+#define CKEN_MMC       (12)    /* MMC Unit Clock Enable */
+#define CKEN_USB       (11)    /* USB Unit Clock Enable */
+#define CKEN_ASSP      (10)    /* ASSP (SSP3) Clock Enable */
+#define CKEN_USBHOST   (10)    /* USB Host Unit Clock Enable */
+#define CKEN_OSTIMER   (9)     /* OS Timer Unit Clock Enable */
+#define CKEN_NSSP      (9)     /* NSSP (SSP2) Clock Enable */
+#define CKEN_I2S       (8)     /* I2S Unit Clock Enable */
+#define CKEN_BTUART    (7)     /* BTUART Unit Clock Enable */
+#define CKEN_FFUART    (6)     /* FFUART Unit Clock Enable */
+#define CKEN_STUART    (5)     /* STUART Unit Clock Enable */
+#define CKEN_HWUART    (4)     /* HWUART Unit Clock Enable */
+#define CKEN_SSP3      (4)     /* SSP3 Unit Clock Enable */
+#define CKEN_SSP       (3)     /* SSP Unit Clock Enable */
+#define CKEN_SSP2      (3)     /* SSP2 Unit Clock Enable */
+#define CKEN_AC97      (2)     /* AC97 Unit Clock Enable */
+#define CKEN_PWM1      (1)     /* PWM1 Clock Enable */
+#define CKEN_PWM0      (0)     /* PWM0 Clock Enable */
+
+#define OSCC_OON       (1 << 1)        /* 32.768kHz OON (write-once only bit) */
+#define OSCC_OOK       (1 << 0)        /* 32.768kHz OOK (read-only bit) */
+
+#endif
index 60db927..42958a5 100644 (file)
@@ -14,8 +14,9 @@
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
-#include <mach/smemc.h>
-#include <mach/pxa3xx-regs.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
+#include <linux/clk/pxa.h>
 
 #include <dt-bindings/clock/pxa-clock.h>
 #include "clk-pxa.h"
 #define KHz 1000
 #define MHz (1000 * 1000)
 
+#define ACCR                   (0x0000)        /* Application Subsystem Clock Configuration Register */
+#define ACSR                   (0x0004)        /* Application Subsystem Clock Status Register */
+#define AICSR                  (0x0008)        /* Application Subsystem Interrupt Control/Status Register */
+#define CKENA                  (0x000C)        /* A Clock Enable Register */
+#define CKENB                  (0x0010)        /* B Clock Enable Register */
+#define CKENC                  (0x0024)        /* C Clock Enable Register */
+#define AC97_DIV               (0x0014)        /* AC97 clock divisor value register */
+
+#define ACCR_XPDIS             (1 << 31)       /* Core PLL Output Disable */
+#define ACCR_SPDIS             (1 << 30)       /* System PLL Output Disable */
+#define ACCR_D0CS              (1 << 26)       /* D0 Mode Clock Select */
+#define ACCR_PCCE              (1 << 11)       /* Power Mode Change Clock Enable */
+#define ACCR_DDR_D0CS          (1 << 7)        /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
+
+#define ACCR_SMCFS_MASK                (0x7 << 23)     /* Static Memory Controller Frequency Select */
+#define ACCR_SFLFS_MASK                (0x3 << 18)     /* Frequency Select for Internal Memory Controller */
+#define ACCR_XSPCLK_MASK       (0x3 << 16)     /* Core Frequency during Frequency Change */
+#define ACCR_HSS_MASK          (0x3 << 14)     /* System Bus-Clock Frequency Select */
+#define ACCR_DMCFS_MASK                (0x3 << 12)     /* Dynamic Memory Controller Clock Frequency Select */
+#define ACCR_XN_MASK           (0x7 << 8)      /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
+#define ACCR_XL_MASK           (0x1f)          /* Core PLL Run-Mode-to-Oscillator Ratio */
+
+#define ACCR_SMCFS(x)          (((x) & 0x7) << 23)
+#define ACCR_SFLFS(x)          (((x) & 0x3) << 18)
+#define ACCR_XSPCLK(x)         (((x) & 0x3) << 16)
+#define ACCR_HSS(x)            (((x) & 0x3) << 14)
+#define ACCR_DMCFS(x)          (((x) & 0x3) << 12)
+#define ACCR_XN(x)             (((x) & 0x7) << 8)
+#define ACCR_XL(x)             ((x) & 0x1f)
+
+/*
+ * Clock Enable Bit
+ */
+#define CKEN_LCD       1       /* < LCD Clock Enable */
+#define CKEN_USBH      2       /* < USB host clock enable */
+#define CKEN_CAMERA    3       /* < Camera interface clock enable */
+#define CKEN_NAND      4       /* < NAND Flash Controller Clock Enable */
+#define CKEN_USB2      6       /* < USB 2.0 client clock enable. */
+#define CKEN_DMC       8       /* < Dynamic Memory Controller clock enable */
+#define CKEN_SMC       9       /* < Static Memory Controller clock enable */
+#define CKEN_ISC       10      /* < Internal SRAM Controller clock enable */
+#define CKEN_BOOT      11      /* < Boot rom clock enable */
+#define CKEN_MMC1      12      /* < MMC1 Clock enable */
+#define CKEN_MMC2      13      /* < MMC2 clock enable */
+#define CKEN_KEYPAD    14      /* < Keypand Controller Clock Enable */
+#define CKEN_CIR       15      /* < Consumer IR Clock Enable */
+#define CKEN_USIM0     17      /* < USIM[0] Clock Enable */
+#define CKEN_USIM1     18      /* < USIM[1] Clock Enable */
+#define CKEN_TPM       19      /* < TPM clock enable */
+#define CKEN_UDC       20      /* < UDC clock enable */
+#define CKEN_BTUART    21      /* < BTUART clock enable */
+#define CKEN_FFUART    22      /* < FFUART clock enable */
+#define CKEN_STUART    23      /* < STUART clock enable */
+#define CKEN_AC97      24      /* < AC97 clock enable */
+#define CKEN_TOUCH     25      /* < Touch screen Interface Clock Enable */
+#define CKEN_SSP1      26      /* < SSP1 clock enable */
+#define CKEN_SSP2      27      /* < SSP2 clock enable */
+#define CKEN_SSP3      28      /* < SSP3 clock enable */
+#define CKEN_SSP4      29      /* < SSP4 clock enable */
+#define CKEN_MSL0      30      /* < MSL0 clock enable */
+#define CKEN_PWM0      32      /* < PWM[0] clock enable */
+#define CKEN_PWM1      33      /* < PWM[1] clock enable */
+#define CKEN_I2C       36      /* < I2C clock enable */
+#define CKEN_INTC      38      /* < Interrupt controller clock enable */
+#define CKEN_GPIO      39      /* < GPIO clock enable */
+#define CKEN_1WIRE     40      /* < 1-wire clock enable */
+#define CKEN_HSIO2     41      /* < HSIO2 clock enable */
+#define CKEN_MINI_IM   48      /* < Mini-IM */
+#define CKEN_MINI_LCD  49      /* < Mini LCD */
+
+#define CKEN_MMC3      5       /* < MMC3 Clock Enable */
+#define CKEN_MVED      43      /* < MVED clock enable */
+
+/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
+#define CKEN_PXA300_GCU                42      /* Graphics controller clock enable */
+#define CKEN_PXA320_GCU                7       /* Graphics controller clock enable */
+
+
 enum {
        PXA_CORE_60Mhz = 0,
        PXA_CORE_RUN,
@@ -39,12 +118,12 @@ static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
 
 /* crystal frequency to static memory controller multiplier (SMCFS) */
 static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
-static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
-
 static const char * const get_freq_khz[] = {
        "core", "ring_osc_60mhz", "run", "cpll", "system_bus"
 };
 
+static void __iomem *clk_regs;
+
 /*
  * Get the clock frequency as reflected by ACSR and the turbo flag.
  * We assume these values have been applied via a fcs.
@@ -78,12 +157,27 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info)
        return (unsigned int)clks[0] / KHz;
 }
 
+void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask)
+{
+       u32 accr = readl(clk_regs + ACCR);
+
+       accr &= ~disable;
+       accr |= enable;
+
+       writel(accr, ACCR);
+       if (xclkcfg)
+               __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
+
+       while ((readl(clk_regs + ACSR) & mask) != (accr & mask))
+               cpu_relax();
+}
+
 static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
                                             unsigned long parent_rate)
 {
        unsigned long ac97_div, rate;
 
-       ac97_div = AC97_DIV;
+       ac97_div = readl(clk_regs + AC97_DIV);
 
        /* This may loose precision for some rates but won't for the
         * standard 24.576MHz.
@@ -100,18 +194,18 @@ RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
 static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
                                              unsigned long parent_rate)
 {
-       unsigned long acsr = ACSR;
-       unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+       unsigned long acsr = readl(clk_regs + ACSR);
 
        return (parent_rate / 48)  * smcfs_mult[(acsr >> 23) & 0x7] /
-               df_clkdiv[(memclkcfg >> 16) & 0x3];
+               pxa3xx_smemc_get_memclkdiv();
+
 }
 PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
 RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
 
 static bool pxa3xx_is_ring_osc_forced(void)
 {
-       unsigned long acsr = ACSR;
+       unsigned long acsr = readl(clk_regs + ACSR);
 
        return acsr & ACCR_D0CS;
 }
@@ -123,7 +217,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
 PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
 PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
 
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? CKENB : CKENA)
 #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
                    div_hp, bit, is_lp, flags)                          \
        PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
@@ -191,7 +285,7 @@ static struct desc_clk_cken pxa93x_clocks[] __initdata = {
 static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
                                            unsigned long parent_rate)
 {
-       unsigned long acsr = ACSR;
+       unsigned long acsr = readl(clk_regs + ACSR);
        unsigned int hss = (acsr >> 14) & 0x3;
 
        if (pxa3xx_is_ring_osc_forced())
@@ -238,7 +332,7 @@ MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
 static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
                                             unsigned long parent_rate)
 {
-       unsigned long acsr = ACSR;
+       unsigned long acsr = readl(clk_regs + ACSR);
        unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
        unsigned int t, xclkcfg;
 
@@ -254,7 +348,7 @@ RATE_RO_OPS(clk_pxa3xx_run, "run");
 static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
        unsigned long parent_rate)
 {
-       unsigned long acsr = ACSR;
+       unsigned long acsr = readl(clk_regs + ACSR);
        unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
        unsigned int xl = acsr & ACCR_XL_MASK;
        unsigned int t, xclkcfg;
@@ -325,7 +419,7 @@ static void __init pxa3xx_dummy_clocks_init(void)
        }
 }
 
-static void __init pxa3xx_base_clocks_init(void)
+static void __init pxa3xx_base_clocks_init(void __iomem *oscc_reg)
 {
        struct clk *clk;
 
@@ -335,34 +429,35 @@ static void __init pxa3xx_base_clocks_init(void)
        clk_register_clk_pxa3xx_ac97();
        clk_register_clk_pxa3xx_smemc();
        clk = clk_register_gate(NULL, "CLK_POUT",
-                               "osc_13mhz", 0, OSCC, 11, 0, NULL);
+                               "osc_13mhz", 0, oscc_reg, 11, 0, NULL);
        clk_register_clkdev(clk, "CLK_POUT", NULL);
        clkdev_pxa_register(CLK_OSTIMER, "OSTIMER0", NULL,
                            clk_register_fixed_factor(NULL, "os-timer0",
                                                      "osc_13mhz", 0, 1, 4));
 }
 
-int __init pxa3xx_clocks_init(void)
+int __init pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg)
 {
        int ret;
 
-       pxa3xx_base_clocks_init();
+       clk_regs = regs;
+       pxa3xx_base_clocks_init(oscc_reg);
        pxa3xx_dummy_clocks_init();
-       ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
+       ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks), regs);
        if (ret)
                return ret;
        if (cpu_is_pxa320())
                return clk_pxa_cken_init(pxa320_clocks,
-                                        ARRAY_SIZE(pxa320_clocks));
+                                        ARRAY_SIZE(pxa320_clocks), regs);
        if (cpu_is_pxa300() || cpu_is_pxa310())
                return clk_pxa_cken_init(pxa300_310_clocks,
-                                        ARRAY_SIZE(pxa300_310_clocks));
-       return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
+                                        ARRAY_SIZE(pxa300_310_clocks), regs);
+       return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks), regs);
 }
 
 static void __init pxa3xx_dt_clocks_init(struct device_node *np)
 {
-       pxa3xx_clocks_init();
+       pxa3xx_clocks_init(ioremap(0x41340000, 0x10), ioremap(0x41350000, 4));
        clk_pxa_dt_common_init(np);
 }
 CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
index fe3f05d..3c0ee10 100644 (file)
@@ -80,7 +80,7 @@ config IXP4XX_TIMER
        bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
        depends on HAS_IOMEM
        select CLKSRC_MMIO
-       select TIMER_OF if OF
+       select TIMER_OF
        help
          Enables support for the Intel XScale IXP4xx SoC timer.
 
@@ -597,6 +597,14 @@ config CLKSRC_ST_LPC
          Enable this option to use the Low Power controller timer
          as clocksource.
 
+config GXP_TIMER
+       bool "GXP timer driver" if COMPILE_TEST && !ARCH_HPE
+       default ARCH_HPE
+       select TIMER_OF if OF
+       help
+         Provides a driver for the timer control found on HPE
+         GXP SOCs. This is required for all GXP SOCs.
+
 config RISCV_TIMER
        bool "Timer for the RISC-V platform" if COMPILE_TEST
        depends on GENERIC_SCHED_CLOCK && RISCV && RISCV_SBI
index 833cfb7..6ca6400 100644 (file)
@@ -86,3 +86,4 @@ obj-$(CONFIG_HYPERV_TIMER)            += hyperv_timer.o
 obj-$(CONFIG_MICROCHIP_PIT64B)         += timer-microchip-pit64b.o
 obj-$(CONFIG_MSC313E_TIMER)            += timer-msc313e.o
 obj-$(CONFIG_GOLDFISH_TIMER)           += timer-goldfish.o
+obj-$(CONFIG_GXP_TIMER)                        += timer-gxp.o
index a50ab5c..39f172d 100644 (file)
@@ -1,15 +1,5 @@
-/*
- * Copyright (C) 2012 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 Broadcom Corporation
 
 #include <linux/init.h>
 #include <linux/irq.h>
index 5d3d88e..a4a9911 100644 (file)
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * J-Core SoC PIT/clocksource driver
  *
  * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
  */
 
 #include <linux/kernel.h>
index be4175f..b3ae38f 100644 (file)
@@ -1,10 +1,5 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
 
 #define pr_fmt(fmt) "mips-gic-timer: " fmt
 
index e3acc3c..6ec565d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Marvell Armada 370/XP SoC timer handling.
  *
@@ -7,10 +8,6 @@
  * Gregory CLEMENT <gregory.clement@free-electrons.com>
  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
  * Timer 0 is used as free-running clocksource, while timer 1 is
  * used as clock_event_device.
  *
index 1e984a4..559aa96 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Conexant Digicolor timer driver
  *
  * Copyright (C) 2013 Maxime Ripard
  *
  * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 /*
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
new file mode 100644 (file)
index 0000000..8b38b32
--- /dev/null
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/sched_clock.h>
+
+#define TIMER0_FREQ    1000000
+#define GXP_TIMER_CNT_OFS 0x00
+#define GXP_TIMESTAMP_OFS 0x08
+#define GXP_TIMER_CTRL_OFS 0x14
+
+/* TCS Stands for Timer Control/Status: these are masks to be used in */
+/* the Timer Count Registers */
+#define MASK_TCS_ENABLE        0x01
+#define MASK_TCS_PERIOD        0x02
+#define MASK_TCS_RELOAD        0x04
+#define MASK_TCS_TC    0x80
+
+struct gxp_timer {
+       void __iomem *counter;
+       void __iomem *control;
+       struct clock_event_device evt;
+};
+
+static struct gxp_timer *gxp_timer;
+
+static void __iomem *system_clock __ro_after_init;
+
+static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
+{
+       return container_of(evt_dev, struct gxp_timer, evt);
+}
+
+static u64 notrace gxp_sched_read(void)
+{
+       return readl_relaxed(system_clock);
+}
+
+static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
+{
+       struct gxp_timer *timer = to_gxp_timer(evt_dev);
+
+       /* Stop counting and disable interrupt before updating */
+       writeb_relaxed(MASK_TCS_TC, timer->control);
+       writel_relaxed(event, timer->counter);
+       writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
+
+       return 0;
+}
+
+static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
+{
+       struct gxp_timer *timer = (struct gxp_timer *)dev_id;
+
+       if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
+               return IRQ_NONE;
+
+       writeb_relaxed(MASK_TCS_TC, timer->control);
+
+       timer->evt.event_handler(&timer->evt);
+
+       return IRQ_HANDLED;
+}
+
+static int __init gxp_timer_init(struct device_node *node)
+{
+       void __iomem *base;
+       struct clk *clk;
+       u32 freq;
+       int ret, irq;
+
+       gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
+       if (!gxp_timer) {
+               ret = -ENOMEM;
+               pr_err("Can't allocate gxp_timer");
+               return ret;
+       }
+
+       clk = of_clk_get(node, 0);
+       if (IS_ERR(clk)) {
+               ret = (int)PTR_ERR(clk);
+               pr_err("%pOFn clock not found: %d\n", node, ret);
+               goto err_free;
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_err("%pOFn clock enable failed: %d\n", node, ret);
+               goto err_clk_enable;
+       }
+
+       base = of_iomap(node, 0);
+       if (!base) {
+               ret = -ENXIO;
+               pr_err("Can't map timer base registers");
+               goto err_iomap;
+       }
+
+       /* Set the offsets to the clock register and timer registers */
+       gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
+       gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
+       system_clock = base + GXP_TIMESTAMP_OFS;
+
+       gxp_timer->evt.name = node->name;
+       gxp_timer->evt.rating = 300;
+       gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
+       gxp_timer->evt.set_next_event = gxp_time_set_next_event;
+       gxp_timer->evt.cpumask = cpumask_of(0);
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq <= 0) {
+               ret = -EINVAL;
+               pr_err("GXP Timer Can't parse IRQ %d", irq);
+               goto err_exit;
+       }
+
+       freq = clk_get_rate(clk);
+
+       ret = clocksource_mmio_init(system_clock, node->name, freq,
+                                   300, 32, clocksource_mmio_readl_up);
+       if (ret) {
+               pr_err("%pOFn init clocksource failed: %d", node, ret);
+               goto err_exit;
+       }
+
+       sched_clock_register(gxp_sched_read, 32, freq);
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq <= 0) {
+               ret = -EINVAL;
+               pr_err("%pOFn Can't parse IRQ %d", node, irq);
+               goto err_exit;
+       }
+
+       clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
+                                       0xf, 0xffffffff);
+
+       ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
+                         node->name, gxp_timer);
+       if (ret) {
+               pr_err("%pOFn request_irq() failed: %d", node, ret);
+               goto err_exit;
+       }
+
+       pr_debug("gxp: system timer (irq = %d)\n", irq);
+       return 0;
+
+err_exit:
+       iounmap(base);
+err_iomap:
+       clk_disable_unprepare(clk);
+err_clk_enable:
+       clk_put(clk);
+err_free:
+       kfree(gxp_timer);
+       return ret;
+}
+
+/*
+ * This probe gets called after the timer is already up and running. This will create
+ * the watchdog device as a child since the registers are shared.
+ */
+
+static int gxp_timer_probe(struct platform_device *pdev)
+{
+       struct platform_device *gxp_watchdog_device;
+       struct device *dev = &pdev->dev;
+
+       if (!gxp_timer) {
+               pr_err("Gxp Timer not initialized, cannot create watchdog");
+               return -ENOMEM;
+       }
+
+       gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
+       if (!gxp_watchdog_device) {
+               pr_err("Timer failed to allocate gxp-wdt");
+               return -ENOMEM;
+       }
+
+       /* Pass the base address (counter) as platform data and nothing else */
+       gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
+       gxp_watchdog_device->dev.parent = dev;
+
+       return platform_device_add(gxp_watchdog_device);
+}
+
+static const struct of_device_id gxp_timer_of_match[] = {
+       { .compatible = "hpe,gxp-timer", },
+       {},
+};
+
+static struct platform_driver gxp_timer_driver = {
+       .probe  = gxp_timer_probe,
+       .driver = {
+               .name = "gxp-timer",
+               .of_match_table = gxp_timer_of_match,
+               .suppress_bind_attrs = true,
+       },
+};
+
+builtin_platform_driver(gxp_timer_driver);
+
+TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);
index cbb1849..720ed70 100644 (file)
@@ -19,8 +19,6 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
-/* Goes away with OF conversion */
-#include <linux/platform_data/timer-ixp4xx.h>
 
 /*
  * Constants to make it easy to access Timer Control/Status registers
@@ -263,28 +261,6 @@ static struct platform_driver ixp4xx_timer_driver = {
 };
 builtin_platform_driver(ixp4xx_timer_driver);
 
-/**
- * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
- * @timerbase: physical base of timer block
- * @timer_irq: Linux IRQ number for the timer
- * @timer_freq: Fixed frequency of the timer
- */
-void __init ixp4xx_timer_setup(resource_size_t timerbase,
-                              int timer_irq,
-                              unsigned int timer_freq)
-{
-       void __iomem *base;
-
-       base = ioremap(timerbase, 0x100);
-       if (!base) {
-               pr_crit("IXP4xx: can't remap timer\n");
-               return;
-       }
-       ixp4xx_timer_register(base, timer_irq, timer_freq);
-}
-EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
-
-#ifdef CONFIG_OF
 static __init int ixp4xx_of_timer_init(struct device_node *np)
 {
        void __iomem *base;
@@ -315,4 +291,3 @@ out_unmap:
        return ret;
 }
 TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
-#endif
index d51a62a..68eae63 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Clocksource driver for NXP LPC32xx/18xx/43xx timer
  *
@@ -6,11 +7,6 @@
  * Based on:
  * time-efm32 Copyright (C) 2013 Pengutronix
  * mach-lpc32xx/timer.c Copyright (C) 2009 - 2010 NXP Semiconductors
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
  */
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
index 5101e83..49e86cb 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Marvell Orion SoC timer handling.
  *
  * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
  * Timer 0 is used as free-running clocksource, while timer 1 is
  * used as clock_event_device.
  */
index 56c0cc3..d514b44 100644 (file)
@@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
        }
 
        rps->irq = irq_of_parse_and_map(np, 0);
-       if (rps->irq < 0) {
+       if (!rps->irq) {
                ret = -EINVAL;
                goto err_iomap;
        }
index 69c069e..57b2197 100644 (file)
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Pistachio clocksource based on general-purpose timers
  *
  * Copyright (C) 2015 Imagination Technologies
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
  */
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
index 1767f8b..593d5a9 100644 (file)
@@ -34,7 +34,7 @@ static int riscv_clock_next_event(unsigned long delta,
 static unsigned int riscv_clock_event_irq;
 static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
        .name                   = "riscv_timer_clockevent",
-       .features               = CLOCK_EVT_FEAT_ONESHOT,
+       .features               = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
        .rating                 = 100,
        .set_next_event         = riscv_clock_next_event,
 };
index 401d592..e6a87f4 100644 (file)
@@ -259,6 +259,11 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
        struct clk *clk1, *clk2;
        const char *name = of_get_property(np, "compatible", NULL);
 
+       if (initialized) {
+               pr_debug("%pOF: skipping further SP804 timer device\n", np);
+               return 0;
+       }
+
        base = of_iomap(np, 0);
        if (!base)
                return -ENXIO;
@@ -270,11 +275,6 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
        writel(0, timer1_base + timer->ctrl);
        writel(0, timer2_base + timer->ctrl);
 
-       if (initialized || !of_device_is_available(np)) {
-               ret = -EINVAL;
-               goto err;
-       }
-
        clk1 = of_clk_get(np, 0);
        if (IS_ERR(clk1))
                clk1 = NULL;
index 0ba8155..bb6ea6c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Allwinner A1X SoCs timer handling.
  *
@@ -8,10 +9,6 @@
  * Based on code from
  * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
  * Benn Huang <benn@allwinnertech.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/clk.h>
index 552c525..85900f7 100644 (file)
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Allwinner SoCs hstimer driver.
  *
  * Copyright (C) 2013 Maxime Ripard
  *
  * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/clk.h>
index df4a73e..c194e8f 100644 (file)
@@ -828,8 +828,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
                cpu_pm_register_notifier(&timer->nb);
        }
 
-       if (pdata)
-               timer->errata = pdata->timer_errata;
+       timer->errata = pdata->timer_errata;
 
        timer->pdev = pdev;
 
index 8eb1f69..d4e2ed7 100644 (file)
@@ -854,7 +854,7 @@ int comedi_load_firmware(struct comedi_device *dev,
                release_firmware(fw);
        }
 
-       return ret < 0 ? ret : 0;
+       return min(ret, 0);
 }
 EXPORT_SYMBOL_GPL(comedi_load_firmware);
 
index d092c9b..24eaf0e 100644 (file)
@@ -61,6 +61,8 @@ static struct cppc_workaround_oem_info wa_info[] = {
        }
 };
 
+static struct cpufreq_driver cppc_cpufreq_driver;
+
 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
 
 /* Frequency invariance support */
@@ -75,7 +77,6 @@ struct cppc_freq_invariance {
 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
 static struct kthread_worker *kworker_fie;
 
-static struct cpufreq_driver cppc_cpufreq_driver;
 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
                                 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
@@ -440,6 +441,14 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
        }
        return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
 }
+#else
+static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+       return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+}
+#endif
+
+#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
 
 static DEFINE_PER_CPU(unsigned int, efficiency_class);
 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
@@ -620,21 +629,12 @@ static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
 }
 
 #else
-
-static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
-{
-       return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
-}
 static int populate_efficiency_class(void)
 {
        return 0;
 }
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
-{
-}
 #endif
 
-
 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
 {
        struct cppc_cpudata *cpu_data;
index 8661638..37a1eb2 100644 (file)
@@ -8,18 +8,22 @@
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/cpumask.h>
+#include <linux/minmax.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
 #include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
 
-#define MIN_VOLT_SHIFT         (100000)
-#define MAX_VOLT_SHIFT         (200000)
-#define MAX_VOLT_LIMIT         (1150000)
-#define VOLT_TOL               (10000)
+struct mtk_cpufreq_platform_data {
+       int min_volt_shift;
+       int max_volt_shift;
+       int proc_max_volt;
+       int sram_min_volt;
+       int sram_max_volt;
+       bool ccifreq_supported;
+};
 
 /*
  * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
@@ -35,6 +39,7 @@
 struct mtk_cpu_dvfs_info {
        struct cpumask cpus;
        struct device *cpu_dev;
+       struct device *cci_dev;
        struct regulator *proc_reg;
        struct regulator *sram_reg;
        struct clk *cpu_clk;
@@ -42,8 +47,20 @@ struct mtk_cpu_dvfs_info {
        struct list_head list_head;
        int intermediate_voltage;
        bool need_voltage_tracking;
+       int vproc_on_boot;
+       int pre_vproc;
+       /* Avoid race condition for regulators between notify and policy */
+       struct mutex reg_lock;
+       struct notifier_block opp_nb;
+       unsigned int opp_cpu;
+       unsigned long current_freq;
+       const struct mtk_cpufreq_platform_data *soc_data;
+       int vtrack_max;
+       bool ccifreq_bound;
 };
 
+static struct platform_device *cpufreq_pdev;
+
 static LIST_HEAD(dvfs_info_list);
 
 static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
@@ -61,142 +78,123 @@ static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
 static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
                                        int new_vproc)
 {
+       const struct mtk_cpufreq_platform_data *soc_data = info->soc_data;
        struct regulator *proc_reg = info->proc_reg;
        struct regulator *sram_reg = info->sram_reg;
-       int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
-
-       old_vproc = regulator_get_voltage(proc_reg);
-       if (old_vproc < 0) {
-               pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
-               return old_vproc;
-       }
-       /* Vsram should not exceed the maximum allowed voltage of SoC. */
-       new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
-
-       if (old_vproc < new_vproc) {
-               /*
-                * When scaling up voltages, Vsram and Vproc scale up step
-                * by step. At each step, set Vsram to (Vproc + 200mV) first,
-                * then set Vproc to (Vsram - 100mV).
-                * Keep doing it until Vsram and Vproc hit target voltages.
-                */
-               do {
-                       old_vsram = regulator_get_voltage(sram_reg);
-                       if (old_vsram < 0) {
-                               pr_err("%s: invalid Vsram value: %d\n",
-                                      __func__, old_vsram);
-                               return old_vsram;
-                       }
-                       old_vproc = regulator_get_voltage(proc_reg);
-                       if (old_vproc < 0) {
-                               pr_err("%s: invalid Vproc value: %d\n",
-                                      __func__, old_vproc);
-                               return old_vproc;
-                       }
-
-                       vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
+       int pre_vproc, pre_vsram, new_vsram, vsram, vproc, ret;
+       int retry = info->vtrack_max;
+
+       pre_vproc = regulator_get_voltage(proc_reg);
+       if (pre_vproc < 0) {
+               dev_err(info->cpu_dev,
+                       "invalid Vproc value: %d\n", pre_vproc);
+               return pre_vproc;
+       }
 
-                       if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
-                               vsram = MAX_VOLT_LIMIT;
+       pre_vsram = regulator_get_voltage(sram_reg);
+       if (pre_vsram < 0) {
+               dev_err(info->cpu_dev, "invalid Vsram value: %d\n", pre_vsram);
+               return pre_vsram;
+       }
 
-                               /*
-                                * If the target Vsram hits the maximum voltage,
-                                * try to set the exact voltage value first.
-                                */
-                               ret = regulator_set_voltage(sram_reg, vsram,
-                                                           vsram);
-                               if (ret)
-                                       ret = regulator_set_voltage(sram_reg,
-                                                       vsram - VOLT_TOL,
-                                                       vsram);
+       new_vsram = clamp(new_vproc + soc_data->min_volt_shift,
+                         soc_data->sram_min_volt, soc_data->sram_max_volt);
 
-                               vproc = new_vproc;
-                       } else {
-                               ret = regulator_set_voltage(sram_reg, vsram,
-                                                           vsram + VOLT_TOL);
+       do {
+               if (pre_vproc <= new_vproc) {
+                       vsram = clamp(pre_vproc + soc_data->max_volt_shift,
+                                     soc_data->sram_min_volt, new_vsram);
+                       ret = regulator_set_voltage(sram_reg, vsram,
+                                                   soc_data->sram_max_volt);
 
-                               vproc = vsram - MIN_VOLT_SHIFT;
-                       }
                        if (ret)
                                return ret;
 
+                       if (vsram == soc_data->sram_max_volt ||
+                           new_vsram == soc_data->sram_min_volt)
+                               vproc = new_vproc;
+                       else
+                               vproc = vsram - soc_data->min_volt_shift;
+
                        ret = regulator_set_voltage(proc_reg, vproc,
-                                                   vproc + VOLT_TOL);
+                                                   soc_data->proc_max_volt);
                        if (ret) {
-                               regulator_set_voltage(sram_reg, old_vsram,
-                                                     old_vsram);
+                               regulator_set_voltage(sram_reg, pre_vsram,
+                                                     soc_data->sram_max_volt);
                                return ret;
                        }
-               } while (vproc < new_vproc || vsram < new_vsram);
-       } else if (old_vproc > new_vproc) {
-               /*
-                * When scaling down voltages, Vsram and Vproc scale down step
-                * by step. At each step, set Vproc to (Vsram - 200mV) first,
-                * then set Vproc to (Vproc + 100mV).
-                * Keep doing it until Vsram and Vproc hit target voltages.
-                */
-               do {
-                       old_vproc = regulator_get_voltage(proc_reg);
-                       if (old_vproc < 0) {
-                               pr_err("%s: invalid Vproc value: %d\n",
-                                      __func__, old_vproc);
-                               return old_vproc;
-                       }
-                       old_vsram = regulator_get_voltage(sram_reg);
-                       if (old_vsram < 0) {
-                               pr_err("%s: invalid Vsram value: %d\n",
-                                      __func__, old_vsram);
-                               return old_vsram;
-                       }
-
-                       vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
+               } else if (pre_vproc > new_vproc) {
+                       vproc = max(new_vproc,
+                                   pre_vsram - soc_data->max_volt_shift);
                        ret = regulator_set_voltage(proc_reg, vproc,
-                                                   vproc + VOLT_TOL);
+                                                   soc_data->proc_max_volt);
                        if (ret)
                                return ret;
 
                        if (vproc == new_vproc)
                                vsram = new_vsram;
                        else
-                               vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
-
-                       if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
-                               vsram = MAX_VOLT_LIMIT;
-
-                               /*
-                                * If the target Vsram hits the maximum voltage,
-                                * try to set the exact voltage value first.
-                                */
-                               ret = regulator_set_voltage(sram_reg, vsram,
-                                                           vsram);
-                               if (ret)
-                                       ret = regulator_set_voltage(sram_reg,
-                                                       vsram - VOLT_TOL,
-                                                       vsram);
-                       } else {
-                               ret = regulator_set_voltage(sram_reg, vsram,
-                                                           vsram + VOLT_TOL);
-                       }
+                               vsram = max(new_vsram,
+                                           vproc + soc_data->min_volt_shift);
 
+                       ret = regulator_set_voltage(sram_reg, vsram,
+                                                   soc_data->sram_max_volt);
                        if (ret) {
-                               regulator_set_voltage(proc_reg, old_vproc,
-                                                     old_vproc);
+                               regulator_set_voltage(proc_reg, pre_vproc,
+                                                     soc_data->proc_max_volt);
                                return ret;
                        }
-               } while (vproc > new_vproc + VOLT_TOL ||
-                        vsram > new_vsram + VOLT_TOL);
-       }
+               }
+
+               pre_vproc = vproc;
+               pre_vsram = vsram;
+
+               if (--retry < 0) {
+                       dev_err(info->cpu_dev,
+                               "over loop count, failed to set voltage\n");
+                       return -EINVAL;
+               }
+       } while (vproc != new_vproc || vsram != new_vsram);
 
        return 0;
 }
 
 static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
 {
+       const struct mtk_cpufreq_platform_data *soc_data = info->soc_data;
+       int ret;
+
        if (info->need_voltage_tracking)
-               return mtk_cpufreq_voltage_tracking(info, vproc);
+               ret = mtk_cpufreq_voltage_tracking(info, vproc);
        else
-               return regulator_set_voltage(info->proc_reg, vproc,
-                                            vproc + VOLT_TOL);
+               ret = regulator_set_voltage(info->proc_reg, vproc,
+                                           soc_data->proc_max_volt);
+       if (!ret)
+               info->pre_vproc = vproc;
+
+       return ret;
+}
+
+static bool is_ccifreq_ready(struct mtk_cpu_dvfs_info *info)
+{
+       struct device_link *sup_link;
+
+       if (info->ccifreq_bound)
+               return true;
+
+       sup_link = device_link_add(info->cpu_dev, info->cci_dev,
+                                  DL_FLAG_AUTOREMOVE_CONSUMER);
+       if (!sup_link) {
+               dev_err(info->cpu_dev, "cpu%d: sup_link is NULL\n", info->opp_cpu);
+               return false;
+       }
+
+       if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+               return false;
+
+       info->ccifreq_bound = true;
+
+       return true;
 }
 
 static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
@@ -208,219 +206,367 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
        struct mtk_cpu_dvfs_info *info = policy->driver_data;
        struct device *cpu_dev = info->cpu_dev;
        struct dev_pm_opp *opp;
-       long freq_hz, old_freq_hz;
-       int vproc, old_vproc, inter_vproc, target_vproc, ret;
+       long freq_hz, pre_freq_hz;
+       int vproc, pre_vproc, inter_vproc, target_vproc, ret;
 
        inter_vproc = info->intermediate_voltage;
 
-       old_freq_hz = clk_get_rate(cpu_clk);
-       old_vproc = regulator_get_voltage(info->proc_reg);
-       if (old_vproc < 0) {
-               pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
-               return old_vproc;
+       pre_freq_hz = clk_get_rate(cpu_clk);
+
+       mutex_lock(&info->reg_lock);
+
+       if (unlikely(info->pre_vproc <= 0))
+               pre_vproc = regulator_get_voltage(info->proc_reg);
+       else
+               pre_vproc = info->pre_vproc;
+
+       if (pre_vproc < 0) {
+               dev_err(cpu_dev, "invalid Vproc value: %d\n", pre_vproc);
+               ret = pre_vproc;
+               goto out;
        }
 
        freq_hz = freq_table[index].frequency * 1000;
 
        opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
        if (IS_ERR(opp)) {
-               pr_err("cpu%d: failed to find OPP for %ld\n",
-                      policy->cpu, freq_hz);
-               return PTR_ERR(opp);
+               dev_err(cpu_dev, "cpu%d: failed to find OPP for %ld\n",
+                       policy->cpu, freq_hz);
+               ret = PTR_ERR(opp);
+               goto out;
        }
        vproc = dev_pm_opp_get_voltage(opp);
        dev_pm_opp_put(opp);
 
        /*
+        * If MediaTek cci is supported but is not ready, we will use the value
+        * of max(target cpu voltage, booting voltage) to prevent high freqeuncy
+        * low voltage crash.
+        */
+       if (info->soc_data->ccifreq_supported && !is_ccifreq_ready(info))
+               vproc = max(vproc, info->vproc_on_boot);
+
+       /*
         * If the new voltage or the intermediate voltage is higher than the
         * current voltage, scale up voltage first.
         */
-       target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
-       if (old_vproc < target_vproc) {
+       target_vproc = max(inter_vproc, vproc);
+       if (pre_vproc <= target_vproc) {
                ret = mtk_cpufreq_set_voltage(info, target_vproc);
                if (ret) {
-                       pr_err("cpu%d: failed to scale up voltage!\n",
-                              policy->cpu);
-                       mtk_cpufreq_set_voltage(info, old_vproc);
-                       return ret;
+                       dev_err(cpu_dev,
+                               "cpu%d: failed to scale up voltage!\n", policy->cpu);
+                       mtk_cpufreq_set_voltage(info, pre_vproc);
+                       goto out;
                }
        }
 
        /* Reparent the CPU clock to intermediate clock. */
        ret = clk_set_parent(cpu_clk, info->inter_clk);
        if (ret) {
-               pr_err("cpu%d: failed to re-parent cpu clock!\n",
-                      policy->cpu);
-               mtk_cpufreq_set_voltage(info, old_vproc);
-               WARN_ON(1);
-               return ret;
+               dev_err(cpu_dev,
+                       "cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
+               mtk_cpufreq_set_voltage(info, pre_vproc);
+               goto out;
        }
 
        /* Set the original PLL to target rate. */
        ret = clk_set_rate(armpll, freq_hz);
        if (ret) {
-               pr_err("cpu%d: failed to scale cpu clock rate!\n",
-                      policy->cpu);
+               dev_err(cpu_dev,
+                       "cpu%d: failed to scale cpu clock rate!\n", policy->cpu);
                clk_set_parent(cpu_clk, armpll);
-               mtk_cpufreq_set_voltage(info, old_vproc);
-               return ret;
+               mtk_cpufreq_set_voltage(info, pre_vproc);
+               goto out;
        }
 
        /* Set parent of CPU clock back to the original PLL. */
        ret = clk_set_parent(cpu_clk, armpll);
        if (ret) {
-               pr_err("cpu%d: failed to re-parent cpu clock!\n",
-                      policy->cpu);
+               dev_err(cpu_dev,
+                       "cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
                mtk_cpufreq_set_voltage(info, inter_vproc);
-               WARN_ON(1);
-               return ret;
+               goto out;
        }
 
        /*
         * If the new voltage is lower than the intermediate voltage or the
         * original voltage, scale down to the new voltage.
         */
-       if (vproc < inter_vproc || vproc < old_vproc) {
+       if (vproc < inter_vproc || vproc < pre_vproc) {
                ret = mtk_cpufreq_set_voltage(info, vproc);
                if (ret) {
-                       pr_err("cpu%d: failed to scale down voltage!\n",
-                              policy->cpu);
+                       dev_err(cpu_dev,
+                               "cpu%d: failed to scale down voltage!\n", policy->cpu);
                        clk_set_parent(cpu_clk, info->inter_clk);
-                       clk_set_rate(armpll, old_freq_hz);
+                       clk_set_rate(armpll, pre_freq_hz);
                        clk_set_parent(cpu_clk, armpll);
-                       return ret;
+                       goto out;
                }
        }
 
-       return 0;
+       info->current_freq = freq_hz;
+
+out:
+       mutex_unlock(&info->reg_lock);
+
+       return ret;
 }
 
 #define DYNAMIC_POWER "dynamic-power-coefficient"
 
+static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
+                                   unsigned long event, void *data)
+{
+       struct dev_pm_opp *opp = data;
+       struct dev_pm_opp *new_opp;
+       struct mtk_cpu_dvfs_info *info;
+       unsigned long freq, volt;
+       struct cpufreq_policy *policy;
+       int ret = 0;
+
+       info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
+
+       if (event == OPP_EVENT_ADJUST_VOLTAGE) {
+               freq = dev_pm_opp_get_freq(opp);
+
+               mutex_lock(&info->reg_lock);
+               if (info->current_freq == freq) {
+                       volt = dev_pm_opp_get_voltage(opp);
+                       ret = mtk_cpufreq_set_voltage(info, volt);
+                       if (ret)
+                               dev_err(info->cpu_dev,
+                                       "failed to scale voltage: %d\n", ret);
+               }
+               mutex_unlock(&info->reg_lock);
+       } else if (event == OPP_EVENT_DISABLE) {
+               freq = dev_pm_opp_get_freq(opp);
+
+               /* case of current opp item is disabled */
+               if (info->current_freq == freq) {
+                       freq = 1;
+                       new_opp = dev_pm_opp_find_freq_ceil(info->cpu_dev,
+                                                           &freq);
+                       if (IS_ERR(new_opp)) {
+                               dev_err(info->cpu_dev,
+                                       "all opp items are disabled\n");
+                               ret = PTR_ERR(new_opp);
+                               return notifier_from_errno(ret);
+                       }
+
+                       dev_pm_opp_put(new_opp);
+                       policy = cpufreq_cpu_get(info->opp_cpu);
+                       if (policy) {
+                               cpufreq_driver_target(policy, freq / 1000,
+                                                     CPUFREQ_RELATION_L);
+                               cpufreq_cpu_put(policy);
+                       }
+               }
+       }
+
+       return notifier_from_errno(ret);
+}
+
+static struct device *of_get_cci(struct device *cpu_dev)
+{
+       struct device_node *np;
+       struct platform_device *pdev;
+
+       np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0);
+       if (IS_ERR_OR_NULL(np))
+               return NULL;
+
+       pdev = of_find_device_by_node(np);
+       of_node_put(np);
+       if (IS_ERR_OR_NULL(pdev))
+               return NULL;
+
+       return &pdev->dev;
+}
+
 static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
 {
        struct device *cpu_dev;
-       struct regulator *proc_reg = ERR_PTR(-ENODEV);
-       struct regulator *sram_reg = ERR_PTR(-ENODEV);
-       struct clk *cpu_clk = ERR_PTR(-ENODEV);
-       struct clk *inter_clk = ERR_PTR(-ENODEV);
        struct dev_pm_opp *opp;
        unsigned long rate;
        int ret;
 
        cpu_dev = get_cpu_device(cpu);
        if (!cpu_dev) {
-               pr_err("failed to get cpu%d device\n", cpu);
+               dev_err(cpu_dev, "failed to get cpu%d device\n", cpu);
                return -ENODEV;
        }
+       info->cpu_dev = cpu_dev;
 
-       cpu_clk = clk_get(cpu_dev, "cpu");
-       if (IS_ERR(cpu_clk)) {
-               if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
-                       pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
-               else
-                       pr_err("failed to get cpu clk for cpu%d\n", cpu);
-
-               ret = PTR_ERR(cpu_clk);
-               return ret;
+       info->ccifreq_bound = false;
+       if (info->soc_data->ccifreq_supported) {
+               info->cci_dev = of_get_cci(info->cpu_dev);
+               if (IS_ERR_OR_NULL(info->cci_dev)) {
+                       ret = PTR_ERR(info->cci_dev);
+                       dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
+                       return -ENODEV;
+               }
        }
 
-       inter_clk = clk_get(cpu_dev, "intermediate");
-       if (IS_ERR(inter_clk)) {
-               if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
-                       pr_warn("intermediate clk for cpu%d not ready, retry.\n",
-                               cpu);
-               else
-                       pr_err("failed to get intermediate clk for cpu%d\n",
-                              cpu);
+       info->cpu_clk = clk_get(cpu_dev, "cpu");
+       if (IS_ERR(info->cpu_clk)) {
+               ret = PTR_ERR(info->cpu_clk);
+               return dev_err_probe(cpu_dev, ret,
+                                    "cpu%d: failed to get cpu clk\n", cpu);
+       }
 
-               ret = PTR_ERR(inter_clk);
+       info->inter_clk = clk_get(cpu_dev, "intermediate");
+       if (IS_ERR(info->inter_clk)) {
+               ret = PTR_ERR(info->inter_clk);
+               dev_err_probe(cpu_dev, ret,
+                             "cpu%d: failed to get intermediate clk\n", cpu);
                goto out_free_resources;
        }
 
-       proc_reg = regulator_get_optional(cpu_dev, "proc");
-       if (IS_ERR(proc_reg)) {
-               if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
-                       pr_warn("proc regulator for cpu%d not ready, retry.\n",
-                               cpu);
-               else
-                       pr_err("failed to get proc regulator for cpu%d\n",
-                              cpu);
+       info->proc_reg = regulator_get_optional(cpu_dev, "proc");
+       if (IS_ERR(info->proc_reg)) {
+               ret = PTR_ERR(info->proc_reg);
+               dev_err_probe(cpu_dev, ret,
+                             "cpu%d: failed to get proc regulator\n", cpu);
+               goto out_free_resources;
+       }
 
-               ret = PTR_ERR(proc_reg);
+       ret = regulator_enable(info->proc_reg);
+       if (ret) {
+               dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
                goto out_free_resources;
        }
 
        /* Both presence and absence of sram regulator are valid cases. */
-       sram_reg = regulator_get_exclusive(cpu_dev, "sram");
+       info->sram_reg = regulator_get_optional(cpu_dev, "sram");
+       if (IS_ERR(info->sram_reg))
+               info->sram_reg = NULL;
+       else {
+               ret = regulator_enable(info->sram_reg);
+               if (ret) {
+                       dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+                       goto out_free_resources;
+               }
+       }
 
        /* Get OPP-sharing information from "operating-points-v2" bindings */
        ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
        if (ret) {
-               pr_err("failed to get OPP-sharing information for cpu%d\n",
-                      cpu);
+               dev_err(cpu_dev,
+                       "cpu%d: failed to get OPP-sharing information\n", cpu);
                goto out_free_resources;
        }
 
        ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
        if (ret) {
-               pr_warn("no OPP table for cpu%d\n", cpu);
+               dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
                goto out_free_resources;
        }
 
+       ret = clk_prepare_enable(info->cpu_clk);
+       if (ret)
+               goto out_free_opp_table;
+
+       ret = clk_prepare_enable(info->inter_clk);
+       if (ret)
+               goto out_disable_mux_clock;
+
+       if (info->soc_data->ccifreq_supported) {
+               info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
+               if (info->vproc_on_boot < 0) {
+                       dev_err(info->cpu_dev,
+                               "invalid Vproc value: %d\n", info->vproc_on_boot);
+                       goto out_disable_inter_clock;
+               }
+       }
+
        /* Search a safe voltage for intermediate frequency. */
-       rate = clk_get_rate(inter_clk);
+       rate = clk_get_rate(info->inter_clk);
        opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
        if (IS_ERR(opp)) {
-               pr_err("failed to get intermediate opp for cpu%d\n", cpu);
+               dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu);
                ret = PTR_ERR(opp);
-               goto out_free_opp_table;
+               goto out_disable_inter_clock;
        }
        info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
        dev_pm_opp_put(opp);
 
-       info->cpu_dev = cpu_dev;
-       info->proc_reg = proc_reg;
-       info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
-       info->cpu_clk = cpu_clk;
-       info->inter_clk = inter_clk;
+       mutex_init(&info->reg_lock);
+       info->current_freq = clk_get_rate(info->cpu_clk);
+
+       info->opp_cpu = cpu;
+       info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier;
+       ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb);
+       if (ret) {
+               dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu);
+               goto out_disable_inter_clock;
+       }
 
        /*
         * If SRAM regulator is present, software "voltage tracking" is needed
         * for this CPU power domain.
         */
-       info->need_voltage_tracking = !IS_ERR(sram_reg);
+       info->need_voltage_tracking = (info->sram_reg != NULL);
+
+       /*
+        * We assume min voltage is 0 and tracking target voltage using
+        * min_volt_shift for each iteration.
+        * The vtrack_max is 3 times of expeted iteration count.
+        */
+       info->vtrack_max = 3 * DIV_ROUND_UP(max(info->soc_data->sram_max_volt,
+                                               info->soc_data->proc_max_volt),
+                                           info->soc_data->min_volt_shift);
 
        return 0;
 
+out_disable_inter_clock:
+       clk_disable_unprepare(info->inter_clk);
+
+out_disable_mux_clock:
+       clk_disable_unprepare(info->cpu_clk);
+
 out_free_opp_table:
        dev_pm_opp_of_cpumask_remove_table(&info->cpus);
 
 out_free_resources:
-       if (!IS_ERR(proc_reg))
-               regulator_put(proc_reg);
-       if (!IS_ERR(sram_reg))
-               regulator_put(sram_reg);
-       if (!IS_ERR(cpu_clk))
-               clk_put(cpu_clk);
-       if (!IS_ERR(inter_clk))
-               clk_put(inter_clk);
+       if (regulator_is_enabled(info->proc_reg))
+               regulator_disable(info->proc_reg);
+       if (info->sram_reg && regulator_is_enabled(info->sram_reg))
+               regulator_disable(info->sram_reg);
+
+       if (!IS_ERR(info->proc_reg))
+               regulator_put(info->proc_reg);
+       if (!IS_ERR(info->sram_reg))
+               regulator_put(info->sram_reg);
+       if (!IS_ERR(info->cpu_clk))
+               clk_put(info->cpu_clk);
+       if (!IS_ERR(info->inter_clk))
+               clk_put(info->inter_clk);
 
        return ret;
 }
 
 static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
 {
-       if (!IS_ERR(info->proc_reg))
+       if (!IS_ERR(info->proc_reg)) {
+               regulator_disable(info->proc_reg);
                regulator_put(info->proc_reg);
-       if (!IS_ERR(info->sram_reg))
+       }
+       if (!IS_ERR(info->sram_reg)) {
+               regulator_disable(info->sram_reg);
                regulator_put(info->sram_reg);
-       if (!IS_ERR(info->cpu_clk))
+       }
+       if (!IS_ERR(info->cpu_clk)) {
+               clk_disable_unprepare(info->cpu_clk);
                clk_put(info->cpu_clk);
-       if (!IS_ERR(info->inter_clk))
+       }
+       if (!IS_ERR(info->inter_clk)) {
+               clk_disable_unprepare(info->inter_clk);
                clk_put(info->inter_clk);
+       }
 
        dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+       dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
 }
 
 static int mtk_cpufreq_init(struct cpufreq_policy *policy)
@@ -432,14 +578,15 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
        info = mtk_cpu_dvfs_info_lookup(policy->cpu);
        if (!info) {
                pr_err("dvfs info for cpu%d is not initialized.\n",
-                      policy->cpu);
+                       policy->cpu);
                return -EINVAL;
        }
 
        ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
        if (ret) {
-               pr_err("failed to init cpufreq table for cpu%d: %d\n",
-                      policy->cpu, ret);
+               dev_err(info->cpu_dev,
+                       "failed to init cpufreq table for cpu%d: %d\n",
+                       policy->cpu, ret);
                return ret;
        }
 
@@ -476,9 +623,17 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
 
 static int mtk_cpufreq_probe(struct platform_device *pdev)
 {
+       const struct mtk_cpufreq_platform_data *data;
        struct mtk_cpu_dvfs_info *info, *tmp;
        int cpu, ret;
 
+       data = dev_get_platdata(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev,
+                       "failed to get mtk cpufreq platform data\n");
+               return -ENODEV;
+       }
+
        for_each_possible_cpu(cpu) {
                info = mtk_cpu_dvfs_info_lookup(cpu);
                if (info)
@@ -490,6 +645,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
                        goto release_dvfs_info_list;
                }
 
+               info->soc_data = data;
                ret = mtk_cpu_dvfs_info_init(info, cpu);
                if (ret) {
                        dev_err(&pdev->dev,
@@ -525,20 +681,47 @@ static struct platform_driver mtk_cpufreq_platdrv = {
        .probe          = mtk_cpufreq_probe,
 };
 
+static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
+       .min_volt_shift = 100000,
+       .max_volt_shift = 200000,
+       .proc_max_volt = 1150000,
+       .sram_min_volt = 0,
+       .sram_max_volt = 1150000,
+       .ccifreq_supported = false,
+};
+
+static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
+       .min_volt_shift = 100000,
+       .max_volt_shift = 200000,
+       .proc_max_volt = 1150000,
+       .sram_min_volt = 0,
+       .sram_max_volt = 1150000,
+       .ccifreq_supported = true,
+};
+
+static const struct mtk_cpufreq_platform_data mt8186_platform_data = {
+       .min_volt_shift = 100000,
+       .max_volt_shift = 250000,
+       .proc_max_volt = 1118750,
+       .sram_min_volt = 850000,
+       .sram_max_volt = 1118750,
+       .ccifreq_supported = true,
+};
+
 /* List of machines supported by this driver */
 static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
-       { .compatible = "mediatek,mt2701", },
-       { .compatible = "mediatek,mt2712", },
-       { .compatible = "mediatek,mt7622", },
-       { .compatible = "mediatek,mt7623", },
-       { .compatible = "mediatek,mt8167", },
-       { .compatible = "mediatek,mt817x", },
-       { .compatible = "mediatek,mt8173", },
-       { .compatible = "mediatek,mt8176", },
-       { .compatible = "mediatek,mt8183", },
-       { .compatible = "mediatek,mt8365", },
-       { .compatible = "mediatek,mt8516", },
-
+       { .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt7622", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt7623", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt8167", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt8176", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt8183", .data = &mt8183_platform_data },
+       { .compatible = "mediatek,mt8186", .data = &mt8186_platform_data },
+       { .compatible = "mediatek,mt8365", .data = &mt2701_platform_data },
+       { .compatible = "mediatek,mt8516", .data = &mt2701_platform_data },
        { }
 };
 MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
@@ -547,7 +730,7 @@ static int __init mtk_cpufreq_driver_init(void)
 {
        struct device_node *np;
        const struct of_device_id *match;
-       struct platform_device *pdev;
+       const struct mtk_cpufreq_platform_data *data;
        int err;
 
        np = of_find_node_by_path("/");
@@ -560,6 +743,7 @@ static int __init mtk_cpufreq_driver_init(void)
                pr_debug("Machine is not compatible with mtk-cpufreq\n");
                return -ENODEV;
        }
+       data = match->data;
 
        err = platform_driver_register(&mtk_cpufreq_platdrv);
        if (err)
@@ -571,16 +755,24 @@ static int __init mtk_cpufreq_driver_init(void)
         * and the device registration codes are put here to handle defer
         * probing.
         */
-       pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
-       if (IS_ERR(pdev)) {
+       cpufreq_pdev = platform_device_register_data(NULL, "mtk-cpufreq", -1,
+                                                    data, sizeof(*data));
+       if (IS_ERR(cpufreq_pdev)) {
                pr_err("failed to register mtk-cpufreq platform device\n");
                platform_driver_unregister(&mtk_cpufreq_platdrv);
-               return PTR_ERR(pdev);
+               return PTR_ERR(cpufreq_pdev);
        }
 
        return 0;
 }
-device_initcall(mtk_cpufreq_driver_init);
+module_init(mtk_cpufreq_driver_init)
+
+static void __exit mtk_cpufreq_driver_exit(void)
+{
+       platform_device_unregister(cpufreq_pdev);
+       platform_driver_unregister(&mtk_cpufreq_platdrv);
+}
+module_exit(mtk_cpufreq_driver_exit)
 
 MODULE_DESCRIPTION("MediaTek CPUFreq driver");
 MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
index f0b6f52..ed1ae06 100644 (file)
 #include <linux/cpufreq.h>
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
+#include <linux/soc/pxa/cpu.h>
 #include <linux/io.h>
 
-#include <mach/pxa2xx-regs.h>
-#include <mach/smemc.h>
-
 #ifdef DEBUG
 static unsigned int freq_debug;
 module_param(freq_debug, uint, 0);
@@ -106,8 +104,6 @@ static struct pxa_freqs pxa27x_freqs[] = {
 static struct cpufreq_frequency_table
        pxa27x_freq_table[NUM_PXA27x_FREQS+1];
 
-extern unsigned get_clk_frequency_khz(int info);
-
 #ifdef CONFIG_REGULATOR
 
 static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
index 32f993c..4afa48d 100644 (file)
@@ -8,12 +8,11 @@
 #include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/cpufreq.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/clk/pxa.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 
-#include <mach/generic.h>
-#include <mach/pxa3xx-regs.h>
-
 #define HSS_104M       (0)
 #define HSS_156M       (1)
 #define HSS_208M       (2)
 #define DMCFS_26M      (0)
 #define DMCFS_260M     (3)
 
+#define ACCR_XPDIS             (1 << 31)       /* Core PLL Output Disable */
+#define ACCR_SPDIS             (1 << 30)       /* System PLL Output Disable */
+#define ACCR_D0CS              (1 << 26)       /* D0 Mode Clock Select */
+#define ACCR_PCCE              (1 << 11)       /* Power Mode Change Clock Enable */
+#define ACCR_DDR_D0CS          (1 << 7)        /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
+
+#define ACCR_SMCFS_MASK                (0x7 << 23)     /* Static Memory Controller Frequency Select */
+#define ACCR_SFLFS_MASK                (0x3 << 18)     /* Frequency Select for Internal Memory Controller */
+#define ACCR_XSPCLK_MASK       (0x3 << 16)     /* Core Frequency during Frequency Change */
+#define ACCR_HSS_MASK          (0x3 << 14)     /* System Bus-Clock Frequency Select */
+#define ACCR_DMCFS_MASK                (0x3 << 12)     /* Dynamic Memory Controller Clock Frequency Select */
+#define ACCR_XN_MASK           (0x7 << 8)      /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
+#define ACCR_XL_MASK           (0x1f)          /* Core PLL Run-Mode-to-Oscillator Ratio */
+
+#define ACCR_SMCFS(x)          (((x) & 0x7) << 23)
+#define ACCR_SFLFS(x)          (((x) & 0x3) << 18)
+#define ACCR_XSPCLK(x)         (((x) & 0x3) << 16)
+#define ACCR_HSS(x)            (((x) & 0x3) << 14)
+#define ACCR_DMCFS(x)          (((x) & 0x3) << 12)
+#define ACCR_XN(x)             (((x) & 0x7) << 8)
+#define ACCR_XL(x)             ((x) & 0x1f)
+
 struct pxa3xx_freq_info {
        unsigned int cpufreq_mhz;
        unsigned int core_xl : 5;
@@ -111,41 +132,29 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
 
 static void __update_core_freq(struct pxa3xx_freq_info *info)
 {
-       uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
-       uint32_t accr = ACCR;
-       uint32_t xclkcfg;
-
-       accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
-       accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
+       u32 mask, disable, enable, xclkcfg;
 
+       mask    = ACCR_XN_MASK | ACCR_XL_MASK;
+       disable = mask | ACCR_XSPCLK_MASK;
+       enable  = ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
        /* No clock until core PLL is re-locked */
-       accr |= ACCR_XSPCLK(XSPCLK_NONE);
-
+       enable |= ACCR_XSPCLK(XSPCLK_NONE);
        xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2;     /* turbo bit */
 
-       ACCR = accr;
-       __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
-
-       while ((ACSR & mask) != (accr & mask))
-               cpu_relax();
+       pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask);
 }
 
 static void __update_bus_freq(struct pxa3xx_freq_info *info)
 {
-       uint32_t mask;
-       uint32_t accr = ACCR;
-
-       mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
-               ACCR_DMCFS_MASK;
-
-       accr &= ~mask;
-       accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
-               ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
+       u32 mask, disable, enable;
 
-       ACCR = accr;
+       mask    = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
+                 ACCR_DMCFS_MASK;
+       disable = mask;
+       enable  = ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
+                 ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
 
-       while ((ACSR & mask) != (accr & mask))
-               cpu_relax();
+       pxa3xx_clk_update_accr(disable, enable, 0, mask);
 }
 
 static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
index ac381db..2a6a987 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved
+ * Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved
  */
 
 #include <linux/cpu.h>
 #define CPUFREQ_TBL_STEP_HZ     (50 * KHZ * KHZ)
 #define MAX_CNT                 ~0U
 
+#define NDIV_MASK              0x1FF
+
+#define CORE_OFFSET(cpu)                       (cpu * 8)
+#define CMU_CLKS_BASE                          0x2000
+#define SCRATCH_FREQ_CORE_REG(data, cpu)       (data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu))
+
+#define MMCRAB_CLUSTER_BASE(cl)                        (0x30000 + (cl * 0x10000))
+#define CLUSTER_ACTMON_BASE(data, cl) \
+                       (data->regs + (MMCRAB_CLUSTER_BASE(cl) + data->soc->actmon_cntr_base))
+#define CORE_ACTMON_CNTR_REG(data, cl, cpu)    (CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu))
+
 /* cpufreq transisition latency */
 #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
 
@@ -35,12 +46,6 @@ enum cluster {
        MAX_CLUSTERS,
 };
 
-struct tegra194_cpufreq_data {
-       void __iomem *regs;
-       size_t num_clusters;
-       struct cpufreq_frequency_table **tables;
-};
-
 struct tegra_cpu_ctr {
        u32 cpu;
        u32 coreclk_cnt, last_coreclk_cnt;
@@ -52,13 +57,127 @@ struct read_counters_work {
        struct tegra_cpu_ctr c;
 };
 
+struct tegra_cpufreq_ops {
+       void (*read_counters)(struct tegra_cpu_ctr *c);
+       void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv);
+       void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
+       int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
+};
+
+struct tegra_cpufreq_soc {
+       struct tegra_cpufreq_ops *ops;
+       int maxcpus_per_cluster;
+       phys_addr_t actmon_cntr_base;
+};
+
+struct tegra194_cpufreq_data {
+       void __iomem *regs;
+       size_t num_clusters;
+       struct cpufreq_frequency_table **tables;
+       const struct tegra_cpufreq_soc *soc;
+};
+
 static struct workqueue_struct *read_counters_wq;
 
-static void get_cpu_cluster(void *cluster)
+static void tegra_get_cpu_mpidr(void *mpidr)
+{
+       *((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+}
+
+static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
+{
+       u64 mpidr;
+
+       smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
+
+       if (cpuid)
+               *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       if (clusterid)
+               *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+}
+
+static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
 {
-       u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+       struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+       void __iomem *freq_core_reg;
+       u64 mpidr_id;
+
+       /* use physical id to get address of per core frequency register */
+       mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+       freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+       *ndiv = readl(freq_core_reg) & NDIV_MASK;
+
+       return 0;
+}
 
-       *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
+{
+       struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+       void __iomem *freq_core_reg;
+       u32 cpu, cpuid, clusterid;
+       u64 mpidr_id;
+
+       for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
+               data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
+
+               /* use physical id to get address of per core frequency register */
+               mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+               freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+               writel(ndiv, freq_core_reg);
+       }
+}
+
+/*
+ * This register provides access to two counter values with a single
+ * 64-bit read. The counter values are used to determine the average
+ * actual frequency a core has run at over a period of time.
+ *     [63:32] PLLP counter: Counts at fixed frequency (408 MHz)
+ *     [31:0] Core clock counter: Counts on every core clock cycle
+ */
+static void tegra234_read_counters(struct tegra_cpu_ctr *c)
+{
+       struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+       void __iomem *actmon_reg;
+       u32 cpuid, clusterid;
+       u64 val;
+
+       data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
+       actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
+
+       val = readq(actmon_reg);
+       c->last_refclk_cnt = upper_32_bits(val);
+       c->last_coreclk_cnt = lower_32_bits(val);
+       udelay(US_DELAY);
+       val = readq(actmon_reg);
+       c->refclk_cnt = upper_32_bits(val);
+       c->coreclk_cnt = lower_32_bits(val);
+}
+
+static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
+       .read_counters = tegra234_read_counters,
+       .get_cpu_cluster_id = tegra234_get_cpu_cluster_id,
+       .get_cpu_ndiv = tegra234_get_cpu_ndiv,
+       .set_cpu_ndiv = tegra234_set_cpu_ndiv,
+};
+
+const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
+       .ops = &tegra234_cpufreq_ops,
+       .actmon_cntr_base = 0x9000,
+       .maxcpus_per_cluster = 4,
+};
+
+static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
+{
+       u64 mpidr;
+
+       smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
+
+       if (cpuid)
+               *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+       if (clusterid)
+               *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 }
 
 /*
@@ -85,11 +204,24 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
        return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
 }
 
+static void tegra194_read_counters(struct tegra_cpu_ctr *c)
+{
+       u64 val;
+
+       val = read_freq_feedback();
+       c->last_refclk_cnt = lower_32_bits(val);
+       c->last_coreclk_cnt = upper_32_bits(val);
+       udelay(US_DELAY);
+       val = read_freq_feedback();
+       c->refclk_cnt = lower_32_bits(val);
+       c->coreclk_cnt = upper_32_bits(val);
+}
+
 static void tegra_read_counters(struct work_struct *work)
 {
+       struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
        struct read_counters_work *read_counters_work;
        struct tegra_cpu_ctr *c;
-       u64 val;
 
        /*
         * ref_clk_counter(32 bit counter) runs on constant clk,
@@ -107,13 +239,7 @@ static void tegra_read_counters(struct work_struct *work)
                                          work);
        c = &read_counters_work->c;
 
-       val = read_freq_feedback();
-       c->last_refclk_cnt = lower_32_bits(val);
-       c->last_coreclk_cnt = upper_32_bits(val);
-       udelay(US_DELAY);
-       val = read_freq_feedback();
-       c->refclk_cnt = lower_32_bits(val);
-       c->coreclk_cnt = upper_32_bits(val);
+       data->soc->ops->read_counters(c);
 }
 
 /*
@@ -177,7 +303,7 @@ static unsigned int tegra194_calculate_speed(u32 cpu)
        return (rate_mhz * KHZ); /* in KHz */
 }
 
-static void get_cpu_ndiv(void *ndiv)
+static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
 {
        u64 ndiv_val;
 
@@ -186,30 +312,43 @@ static void get_cpu_ndiv(void *ndiv)
        *(u64 *)ndiv = ndiv_val;
 }
 
-static void set_cpu_ndiv(void *data)
+static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
+{
+       int ret;
+
+       ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
+
+       return ret;
+}
+
+static void tegra194_set_cpu_ndiv_sysreg(void *data)
 {
-       struct cpufreq_frequency_table *tbl = data;
-       u64 ndiv_val = (u64)tbl->driver_data;
+       u64 ndiv_val = *(u64 *)data;
 
        asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
 }
 
+static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
+{
+       on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true);
+}
+
 static unsigned int tegra194_get_speed(u32 cpu)
 {
        struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
        struct cpufreq_frequency_table *pos;
+       u32 cpuid, clusterid;
        unsigned int rate;
        u64 ndiv;
        int ret;
-       u32 cl;
 
-       smp_call_function_single(cpu, get_cpu_cluster, &cl, true);
+       data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
 
        /* reconstruct actual cpu freq using counters */
        rate = tegra194_calculate_speed(cpu);
 
        /* get last written ndiv value */
-       ret = smp_call_function_single(cpu, get_cpu_ndiv, &ndiv, true);
+       ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
        if (WARN_ON_ONCE(ret))
                return rate;
 
@@ -219,7 +358,7 @@ static unsigned int tegra194_get_speed(u32 cpu)
         * to the last written ndiv value from freq_table. This is
         * done to return consistent value.
         */
-       cpufreq_for_each_valid_entry(pos, data->tables[cl]) {
+       cpufreq_for_each_valid_entry(pos, data->tables[clusterid]) {
                if (pos->driver_data != ndiv)
                        continue;
 
@@ -237,19 +376,22 @@ static unsigned int tegra194_get_speed(u32 cpu)
 static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
 {
        struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
-       u32 cpu;
-       u32 cl;
+       int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
+       u32 start_cpu, cpu;
+       u32 clusterid;
 
-       smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
+       data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
 
-       if (cl >= data->num_clusters || !data->tables[cl])
+       if (clusterid >= data->num_clusters || !data->tables[clusterid])
                return -EINVAL;
 
+       start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
        /* set same policy for all cpus in a cluster */
-       for (cpu = (cl * 2); cpu < ((cl + 1) * 2); cpu++)
-               cpumask_set_cpu(cpu, policy->cpus);
-
-       policy->freq_table = data->tables[cl];
+       for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
+               if (cpu_possible(cpu))
+                       cpumask_set_cpu(cpu, policy->cpus);
+       }
+       policy->freq_table = data->tables[clusterid];
        policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
 
        return 0;
@@ -259,13 +401,14 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
                                       unsigned int index)
 {
        struct cpufreq_frequency_table *tbl = policy->freq_table + index;
+       struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
 
        /*
         * Each core writes frequency in per core register. Then both cores
         * in a cluster run at same frequency which is the maximum frequency
         * request out of the values requested by both cores in that cluster.
         */
-       on_each_cpu_mask(policy->cpus, set_cpu_ndiv, tbl, true);
+       data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
 
        return 0;
 }
@@ -280,6 +423,18 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
        .attr = cpufreq_generic_attr,
 };
 
+static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
+       .read_counters = tegra194_read_counters,
+       .get_cpu_cluster_id = tegra194_get_cpu_cluster_id,
+       .get_cpu_ndiv = tegra194_get_cpu_ndiv,
+       .set_cpu_ndiv = tegra194_set_cpu_ndiv,
+};
+
+const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+       .ops = &tegra194_cpufreq_ops,
+       .maxcpus_per_cluster = 2,
+};
+
 static void tegra194_cpufreq_free_resources(void)
 {
        destroy_workqueue(read_counters_wq);
@@ -359,6 +514,7 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
 
 static int tegra194_cpufreq_probe(struct platform_device *pdev)
 {
+       const struct tegra_cpufreq_soc *soc;
        struct tegra194_cpufreq_data *data;
        struct tegra_bpmp *bpmp;
        int err, i;
@@ -367,12 +523,28 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
        if (!data)
                return -ENOMEM;
 
+       soc = of_device_get_match_data(&pdev->dev);
+
+       if (soc->ops && soc->maxcpus_per_cluster) {
+               data->soc = soc;
+       } else {
+               dev_err(&pdev->dev, "soc data missing\n");
+               return -EINVAL;
+       }
+
        data->num_clusters = MAX_CLUSTERS;
        data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
                                    sizeof(*data->tables), GFP_KERNEL);
        if (!data->tables)
                return -ENOMEM;
 
+       if (soc->actmon_cntr_base) {
+               /* mmio registers are used for frequency request and re-construction */
+               data->regs = devm_platform_ioremap_resource(pdev, 0);
+               if (IS_ERR(data->regs))
+                       return PTR_ERR(data->regs);
+       }
+
        platform_set_drvdata(pdev, data);
 
        bpmp = tegra_bpmp_get(&pdev->dev);
@@ -416,10 +588,10 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id tegra194_cpufreq_of_match[] = {
-       { .compatible = "nvidia,tegra194-ccplex", },
+       { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
+       { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
        { /* sentinel */ }
 };
-MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
 
 static struct platform_driver tegra194_ccplex_driver = {
        .driver = {
index f3ec942..2a60d05 100644 (file)
@@ -90,9 +90,12 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
        }
 
        akcipher_req = vc_akcipher_req->akcipher_req;
-       if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
+       if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
+               /* actuall length maybe less than dst buffer */
+               akcipher_req->dst_len = len - sizeof(vc_req->status);
                sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
                                    vc_akcipher_req->dst_buf, akcipher_req->dst_len);
+       }
        virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
 }
 
@@ -103,54 +106,56 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
        struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
        struct virtio_crypto *vcrypto = ctx->vcrypto;
        uint8_t *pkey;
-       unsigned int inlen;
        int err;
        unsigned int num_out = 0, num_in = 0;
+       struct virtio_crypto_op_ctrl_req *ctrl;
+       struct virtio_crypto_session_input *input;
+       struct virtio_crypto_ctrl_request *vc_ctrl_req;
 
        pkey = kmemdup(key, keylen, GFP_ATOMIC);
        if (!pkey)
                return -ENOMEM;
 
-       spin_lock(&vcrypto->ctrl_lock);
-       memcpy(&vcrypto->ctrl.header, header, sizeof(vcrypto->ctrl.header));
-       memcpy(&vcrypto->ctrl.u, para, sizeof(vcrypto->ctrl.u));
-       vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+       vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+       if (!vc_ctrl_req) {
+               err = -ENOMEM;
+               goto out;
+       }
 
-       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       ctrl = &vc_ctrl_req->ctrl;
+       memcpy(&ctrl->header, header, sizeof(ctrl->header));
+       memcpy(&ctrl->u, para, sizeof(ctrl->u));
+       input = &vc_ctrl_req->input;
+       input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+       sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
        sgs[num_out++] = &outhdr_sg;
 
        sg_init_one(&key_sg, pkey, keylen);
        sgs[num_out++] = &key_sg;
 
-       sg_init_one(&inhdr_sg, &vcrypto->input, sizeof(vcrypto->input));
+       sg_init_one(&inhdr_sg, input, sizeof(*input));
        sgs[num_out + num_in++] = &inhdr_sg;
 
-       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
        if (err < 0)
                goto out;
 
-       virtqueue_kick(vcrypto->ctrl_vq);
-       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
-              !virtqueue_is_broken(vcrypto->ctrl_vq))
-               cpu_relax();
-
-       if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+       if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
+               pr_err("virtio_crypto: Create session failed status: %u\n",
+                       le32_to_cpu(input->status));
                err = -EINVAL;
                goto out;
        }
 
-       ctx->session_id = le64_to_cpu(vcrypto->input.session_id);
+       ctx->session_id = le64_to_cpu(input->session_id);
        ctx->session_valid = true;
        err = 0;
 
 out:
-       spin_unlock(&vcrypto->ctrl_lock);
+       kfree(vc_ctrl_req);
        kfree_sensitive(pkey);
 
-       if (err < 0)
-               pr_err("virtio_crypto: Create session failed status: %u\n",
-                       le32_to_cpu(vcrypto->input.status));
-
        return err;
 }
 
@@ -159,37 +164,41 @@ static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akciphe
        struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
        struct virtio_crypto_destroy_session_req *destroy_session;
        struct virtio_crypto *vcrypto = ctx->vcrypto;
-       unsigned int num_out = 0, num_in = 0, inlen;
+       unsigned int num_out = 0, num_in = 0;
        int err;
+       struct virtio_crypto_op_ctrl_req *ctrl;
+       struct virtio_crypto_inhdr *ctrl_status;
+       struct virtio_crypto_ctrl_request *vc_ctrl_req;
 
-       spin_lock(&vcrypto->ctrl_lock);
-       if (!ctx->session_valid) {
-               err = 0;
-               goto out;
-       }
-       vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
-       vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
-       vcrypto->ctrl.header.queue_id = 0;
+       if (!ctx->session_valid)
+               return 0;
+
+       vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+       if (!vc_ctrl_req)
+               return -ENOMEM;
+
+       ctrl_status = &vc_ctrl_req->ctrl_status;
+       ctrl_status->status = VIRTIO_CRYPTO_ERR;
+       ctrl = &vc_ctrl_req->ctrl;
+       ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
+       ctrl->header.queue_id = 0;
 
-       destroy_session = &vcrypto->ctrl.u.destroy_session;
+       destroy_session = &ctrl->u.destroy_session;
        destroy_session->session_id = cpu_to_le64(ctx->session_id);
 
-       sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
        sgs[num_out++] = &outhdr_sg;
 
-       sg_init_one(&inhdr_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status));
+       sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
        sgs[num_out + num_in++] = &inhdr_sg;
 
-       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC);
+       err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
        if (err < 0)
                goto out;
 
-       virtqueue_kick(vcrypto->ctrl_vq);
-       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) &&
-              !virtqueue_is_broken(vcrypto->ctrl_vq))
-               cpu_relax();
-
-       if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+       if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
+               pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+                       ctrl_status->status, destroy_session->session_id);
                err = -EINVAL;
                goto out;
        }
@@ -198,11 +207,7 @@ static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akciphe
        ctx->session_valid = false;
 
 out:
-       spin_unlock(&vcrypto->ctrl_lock);
-       if (err < 0) {
-               pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
-                       vcrypto->ctrl_status.status, destroy_session->session_id);
-       }
+       kfree(vc_ctrl_req);
 
        return err;
 }
index e693d4e..59a4c02 100644 (file)
@@ -13,6 +13,7 @@
 #include <crypto/aead.h>
 #include <crypto/aes.h>
 #include <crypto/engine.h>
+#include <uapi/linux/virtio_crypto.h>
 
 
 /* Internal representation of a data virtqueue */
@@ -65,11 +66,6 @@ struct virtio_crypto {
        /* Maximum size of per request */
        u64 max_size;
 
-       /* Control VQ buffers: protected by the ctrl_lock */
-       struct virtio_crypto_op_ctrl_req ctrl;
-       struct virtio_crypto_session_input input;
-       struct virtio_crypto_inhdr ctrl_status;
-
        unsigned long status;
        atomic_t ref_count;
        struct list_head list;
@@ -85,6 +81,18 @@ struct virtio_crypto_sym_session_info {
        __u64 session_id;
 };
 
+/*
+ * Note: there are padding fields in request, clear them to zero before
+ *       sending to host to avoid to divulge any information.
+ * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
+ */
+struct virtio_crypto_ctrl_request {
+       struct virtio_crypto_op_ctrl_req ctrl;
+       struct virtio_crypto_session_input input;
+       struct virtio_crypto_inhdr ctrl_status;
+       struct completion compl;
+};
+
 struct virtio_crypto_request;
 typedef void (*virtio_crypto_data_callback)
                (struct virtio_crypto_request *vc_req, int len);
@@ -134,5 +142,8 @@ int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
 int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
 void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+                                 unsigned int out_sgs, unsigned int in_sgs,
+                                 struct virtio_crypto_ctrl_request *vc_ctrl_req);
 
 #endif /* _VIRTIO_CRYPTO_COMMON_H */
index c6f482d..1198bd3 100644 (file)
@@ -22,6 +22,56 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
        }
 }
 
+static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+       complete(&vc_ctrl_req->compl);
+}
+
+static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
+{
+       struct virtio_crypto *vcrypto = vq->vdev->priv;
+       struct virtio_crypto_ctrl_request *vc_ctrl_req;
+       unsigned long flags;
+       unsigned int len;
+
+       spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+       do {
+               virtqueue_disable_cb(vq);
+               while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
+                       spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+                       virtio_crypto_ctrlq_callback(vc_ctrl_req);
+                       spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+               }
+               if (unlikely(virtqueue_is_broken(vq)))
+                       break;
+       } while (!virtqueue_enable_cb(vq));
+       spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+}
+
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+               unsigned int out_sgs, unsigned int in_sgs,
+               struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+       int err;
+       unsigned long flags;
+
+       init_completion(&vc_ctrl_req->compl);
+
+       spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
+       if (err < 0) {
+               spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+               return err;
+       }
+
+       virtqueue_kick(vcrypto->ctrl_vq);
+       spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+
+       wait_for_completion(&vc_ctrl_req->compl);
+
+       return 0;
+}
+
 static void virtcrypto_dataq_callback(struct virtqueue *vq)
 {
        struct virtio_crypto *vcrypto = vq->vdev->priv;
@@ -73,7 +123,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
                goto err_names;
 
        /* Parameters for control virtqueue */
-       callbacks[total_vqs - 1] = NULL;
+       callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
        names[total_vqs - 1] = "controlq";
 
        /* Allocate/initialize parameters for data virtqueues */
@@ -94,7 +144,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
                spin_lock_init(&vi->data_vq[i].lock);
                vi->data_vq[i].vq = vqs[i];
                /* Initialize crypto engine */
-               vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
+               vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+                                               virtqueue_get_vring_size(vqs[i]));
                if (!vi->data_vq[i].engine) {
                        ret = -ENOMEM;
                        goto err_engine;
index a618c46..e553cca 100644 (file)
@@ -118,11 +118,14 @@ static int virtio_crypto_alg_skcipher_init_session(
                int encrypt)
 {
        struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
-       unsigned int tmp;
        struct virtio_crypto *vcrypto = ctx->vcrypto;
        int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
        int err;
        unsigned int num_out = 0, num_in = 0;
+       struct virtio_crypto_op_ctrl_req *ctrl;
+       struct virtio_crypto_session_input *input;
+       struct virtio_crypto_sym_create_session_req *sym_create_session;
+       struct virtio_crypto_ctrl_request *vc_ctrl_req;
 
        /*
         * Avoid to do DMA from the stack, switch to using
@@ -133,26 +136,29 @@ static int virtio_crypto_alg_skcipher_init_session(
        if (!cipher_key)
                return -ENOMEM;
 
-       spin_lock(&vcrypto->ctrl_lock);
+       vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+       if (!vc_ctrl_req) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        /* Pad ctrl header */
-       vcrypto->ctrl.header.opcode =
-               cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
-       vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+       ctrl = &vc_ctrl_req->ctrl;
+       ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+       ctrl->header.algo = cpu_to_le32(alg);
        /* Set the default dataqueue id to 0 */
-       vcrypto->ctrl.header.queue_id = 0;
+       ctrl->header.queue_id = 0;
 
-       vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+       input = &vc_ctrl_req->input;
+       input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
        /* Pad cipher's parameters */
-       vcrypto->ctrl.u.sym_create_session.op_type =
-               cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
-       vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
-               vcrypto->ctrl.header.algo;
-       vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
-               cpu_to_le32(keylen);
-       vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
-               cpu_to_le32(op);
-
-       sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sym_create_session = &ctrl->u.sym_create_session;
+       sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+       sym_create_session->u.cipher.para.algo = ctrl->header.algo;
+       sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
+       sym_create_session->u.cipher.para.op = cpu_to_le32(op);
+
+       sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
        sgs[num_out++] = &outhdr;
 
        /* Set key */
@@ -160,45 +166,30 @@ static int virtio_crypto_alg_skcipher_init_session(
        sgs[num_out++] = &key_sg;
 
        /* Return status and session id back */
-       sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+       sg_init_one(&inhdr, input, sizeof(*input));
        sgs[num_out + num_in++] = &inhdr;
 
-       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
-                               num_in, vcrypto, GFP_ATOMIC);
-       if (err < 0) {
-               spin_unlock(&vcrypto->ctrl_lock);
-               kfree_sensitive(cipher_key);
-               return err;
-       }
-       virtqueue_kick(vcrypto->ctrl_vq);
+       err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+       if (err < 0)
+               goto out;
 
-       /*
-        * Trapping into the hypervisor, so the request should be
-        * handled immediately.
-        */
-       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
-              !virtqueue_is_broken(vcrypto->ctrl_vq))
-               cpu_relax();
-
-       if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
-               spin_unlock(&vcrypto->ctrl_lock);
+       if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
                pr_err("virtio_crypto: Create session failed status: %u\n",
-                       le32_to_cpu(vcrypto->input.status));
-               kfree_sensitive(cipher_key);
-               return -EINVAL;
+                       le32_to_cpu(input->status));
+               err = -EINVAL;
+               goto out;
        }
 
        if (encrypt)
-               ctx->enc_sess_info.session_id =
-                       le64_to_cpu(vcrypto->input.session_id);
+               ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
        else
-               ctx->dec_sess_info.session_id =
-                       le64_to_cpu(vcrypto->input.session_id);
-
-       spin_unlock(&vcrypto->ctrl_lock);
+               ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
 
+       err = 0;
+out:
+       kfree(vc_ctrl_req);
        kfree_sensitive(cipher_key);
-       return 0;
+       return err;
 }
 
 static int virtio_crypto_alg_skcipher_close_session(
@@ -206,60 +197,55 @@ static int virtio_crypto_alg_skcipher_close_session(
                int encrypt)
 {
        struct scatterlist outhdr, status_sg, *sgs[2];
-       unsigned int tmp;
        struct virtio_crypto_destroy_session_req *destroy_session;
        struct virtio_crypto *vcrypto = ctx->vcrypto;
        int err;
        unsigned int num_out = 0, num_in = 0;
+       struct virtio_crypto_op_ctrl_req *ctrl;
+       struct virtio_crypto_inhdr *ctrl_status;
+       struct virtio_crypto_ctrl_request *vc_ctrl_req;
 
-       spin_lock(&vcrypto->ctrl_lock);
-       vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+       vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+       if (!vc_ctrl_req)
+               return -ENOMEM;
+
+       ctrl_status = &vc_ctrl_req->ctrl_status;
+       ctrl_status->status = VIRTIO_CRYPTO_ERR;
        /* Pad ctrl header */
-       vcrypto->ctrl.header.opcode =
-               cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+       ctrl = &vc_ctrl_req->ctrl;
+       ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
        /* Set the default virtqueue id to 0 */
-       vcrypto->ctrl.header.queue_id = 0;
+       ctrl->header.queue_id = 0;
 
-       destroy_session = &vcrypto->ctrl.u.destroy_session;
+       destroy_session = &ctrl->u.destroy_session;
 
        if (encrypt)
-               destroy_session->session_id =
-                       cpu_to_le64(ctx->enc_sess_info.session_id);
+               destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
        else
-               destroy_session->session_id =
-                       cpu_to_le64(ctx->dec_sess_info.session_id);
+               destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
 
-       sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+       sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
        sgs[num_out++] = &outhdr;
 
        /* Return status and session id back */
-       sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
-               sizeof(vcrypto->ctrl_status.status));
+       sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
        sgs[num_out + num_in++] = &status_sg;
 
-       err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
-                       num_in, vcrypto, GFP_ATOMIC);
-       if (err < 0) {
-               spin_unlock(&vcrypto->ctrl_lock);
-               return err;
-       }
-       virtqueue_kick(vcrypto->ctrl_vq);
-
-       while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
-              !virtqueue_is_broken(vcrypto->ctrl_vq))
-               cpu_relax();
+       err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+       if (err < 0)
+               goto out;
 
-       if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
-               spin_unlock(&vcrypto->ctrl_lock);
+       if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
                pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
-                       vcrypto->ctrl_status.status,
-                       destroy_session->session_id);
+                       ctrl_status->status, destroy_session->session_id);
 
                return -EINVAL;
        }
-       spin_unlock(&vcrypto->ctrl_lock);
 
-       return 0;
+       err = 0;
+out:
+       kfree(vc_ctrl_req);
+       return err;
 }
 
 static int virtio_crypto_alg_skcipher_init_sessions(
index 005a82f..0e5a566 100644 (file)
@@ -216,8 +216,11 @@ static int __init dio_init(void)
 
                /* Found a board, allocate it an entry in the list */
                dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL);
-               if (!dev)
+               if (!dev) {
+                       if (scode >= DIOII_SCBASE)
+                               iounmap(va);
                        return -ENOMEM;
+               }
 
                dev->bus = &dio_bus;
                dev->dev.parent = &dio_bus.dev;
index 0d42e49..dca7cec 100644 (file)
@@ -131,6 +131,7 @@ config EXTCON_PALMAS
 config EXTCON_PTN5150
        tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
        depends on I2C && (GPIOLIB || COMPILE_TEST)
+       depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
        select REGMAP_I2C
        help
          Say Y here to enable support for USB peripheral and USB host
@@ -156,7 +157,7 @@ config EXTCON_RT8973A
          from abnormal high input voltage (up to 28V).
 
 config EXTCON_SM5502
-       tristate "Silicon Mitus SM5502/SM5504 EXTCON support"
+       tristate "Silicon Mitus SM5502/SM5504/SM5703 EXTCON support"
        depends on I2C
        select IRQ_DOMAIN
        select REGMAP_I2C
index 7c6d585..180be76 100644 (file)
@@ -394,8 +394,8 @@ static int axp288_extcon_probe(struct platform_device *pdev)
                if (adev) {
                        info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
                        put_device(&adev->dev);
-                       if (!info->id_extcon)
-                               return -EPROBE_DEFER;
+                       if (IS_ERR(info->id_extcon))
+                               return PTR_ERR(info->id_extcon);
 
                        dev_info(dev, "controlling USB role\n");
                } else {
index fb527c2..ded1a85 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 
 #define INT3496_GPIO_USB_ID    0
 #define INT3496_GPIO_VBUS_EN   1
@@ -30,7 +31,9 @@ struct int3496_data {
        struct gpio_desc *gpio_usb_id;
        struct gpio_desc *gpio_vbus_en;
        struct gpio_desc *gpio_usb_mux;
+       struct regulator *vbus_boost;
        int usb_id_irq;
+       bool vbus_boost_enabled;
 };
 
 static const unsigned int int3496_cable[] = {
@@ -53,6 +56,27 @@ static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
        { },
 };
 
+static void int3496_set_vbus_boost(struct int3496_data *data, bool enable)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(data->vbus_boost))
+               return;
+
+       if (data->vbus_boost_enabled == enable)
+               return;
+
+       if (enable)
+               ret = regulator_enable(data->vbus_boost);
+       else
+               ret = regulator_disable(data->vbus_boost);
+
+       if (ret == 0)
+               data->vbus_boost_enabled = enable;
+       else
+               dev_err(data->dev, "Error updating Vbus boost regulator: %d\n", ret);
+}
+
 static void int3496_do_usb_id(struct work_struct *work)
 {
        struct int3496_data *data =
@@ -71,6 +95,8 @@ static void int3496_do_usb_id(struct work_struct *work)
 
        if (!IS_ERR(data->gpio_vbus_en))
                gpiod_direction_output(data->gpio_vbus_en, !id);
+       else
+               int3496_set_vbus_boost(data, !id);
 
        extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
 }
@@ -91,10 +117,12 @@ static int int3496_probe(struct platform_device *pdev)
        struct int3496_data *data;
        int ret;
 
-       ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
-       if (ret) {
-               dev_err(dev, "can't add GPIO ACPI mapping\n");
-               return ret;
+       if (has_acpi_companion(dev)) {
+               ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
+               if (ret) {
+                       dev_err(dev, "can't add GPIO ACPI mapping\n");
+                       return ret;
+               }
        }
 
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -106,7 +134,8 @@ static int int3496_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
+       data->gpio_usb_id =
+               devm_gpiod_get(dev, "id", GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
        if (IS_ERR(data->gpio_usb_id)) {
                ret = PTR_ERR(data->gpio_usb_id);
                dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
@@ -120,12 +149,14 @@ static int int3496_probe(struct platform_device *pdev)
        }
 
        data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
-       if (IS_ERR(data->gpio_vbus_en))
-               dev_info(dev, "can't request VBUS EN GPIO\n");
+       if (IS_ERR(data->gpio_vbus_en)) {
+               dev_dbg(dev, "can't request VBUS EN GPIO\n");
+               data->vbus_boost = devm_regulator_get_optional(dev, "vbus");
+       }
 
        data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
        if (IS_ERR(data->gpio_usb_mux))
-               dev_info(dev, "can't request USB MUX GPIO\n");
+               dev_dbg(dev, "can't request USB MUX GPIO\n");
 
        /* register extcon device */
        data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
@@ -164,12 +195,19 @@ static const struct acpi_device_id int3496_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
 
+static const struct platform_device_id int3496_ids[] = {
+       { .name = "intel-int3496" },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, int3496_ids);
+
 static struct platform_driver int3496_driver = {
        .driver = {
                .name = "intel-int3496",
                .acpi_match_table = int3496_acpi_match,
        },
        .probe = int3496_probe,
+       .id_table = int3496_ids,
 };
 
 module_platform_driver(int3496_driver);
index 5b9a3cf..017a071 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/extcon-provider.h>
 #include <linux/gpio/consumer.h>
+#include <linux/usb/role.h>
 
 /* PTN5150 registers */
 #define PTN5150_REG_DEVICE_ID                  0x01
@@ -52,6 +53,7 @@ struct ptn5150_info {
        int irq;
        struct work_struct irq_work;
        struct mutex mutex;
+       struct usb_role_switch *role_sw;
 };
 
 /* List of detectable cables */
@@ -70,6 +72,7 @@ static const struct regmap_config ptn5150_regmap_config = {
 static void ptn5150_check_state(struct ptn5150_info *info)
 {
        unsigned int port_status, reg_data, vbus;
+       enum usb_role usb_role = USB_ROLE_NONE;
        int ret;
 
        ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
@@ -85,6 +88,7 @@ static void ptn5150_check_state(struct ptn5150_info *info)
                extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
                gpiod_set_value_cansleep(info->vbus_gpiod, 0);
                extcon_set_state_sync(info->edev, EXTCON_USB, true);
+               usb_role = USB_ROLE_DEVICE;
                break;
        case PTN5150_UFP_ATTACHED:
                extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -95,10 +99,18 @@ static void ptn5150_check_state(struct ptn5150_info *info)
                        gpiod_set_value_cansleep(info->vbus_gpiod, 1);
 
                extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+               usb_role = USB_ROLE_HOST;
                break;
        default:
                break;
        }
+
+       if (usb_role) {
+               ret = usb_role_switch_set_role(info->role_sw, usb_role);
+               if (ret)
+                       dev_err(info->dev, "failed to set %s role: %d\n",
+                               usb_role_string(usb_role), ret);
+       }
 }
 
 static void ptn5150_irq_work(struct work_struct *work)
@@ -133,6 +145,13 @@ static void ptn5150_irq_work(struct work_struct *work)
                        extcon_set_state_sync(info->edev,
                                        EXTCON_USB, false);
                        gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+
+                       ret = usb_role_switch_set_role(info->role_sw,
+                                                      USB_ROLE_NONE);
+                       if (ret)
+                               dev_err(info->dev,
+                                       "failed to set none role: %d\n",
+                                       ret);
                }
        }
 
@@ -194,6 +213,14 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
        return 0;
 }
 
+static void ptn5150_work_sync_and_put(void *data)
+{
+       struct ptn5150_info *info = data;
+
+       cancel_work_sync(&info->irq_work);
+       usb_role_switch_put(info->role_sw);
+}
+
 static int ptn5150_i2c_probe(struct i2c_client *i2c)
 {
        struct device *dev = &i2c->dev;
@@ -284,6 +311,15 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c)
        if (ret)
                return -EINVAL;
 
+       info->role_sw = usb_role_switch_get(info->dev);
+       if (IS_ERR(info->role_sw))
+               return dev_err_probe(info->dev, PTR_ERR(info->role_sw),
+                                    "failed to get role switch\n");
+
+       ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
+       if (ret)
+               return ret;
+
        /*
         * Update current extcon state if for example OTG connection was there
         * before the probe
index 93da2d8..f706f52 100644 (file)
@@ -798,6 +798,7 @@ static const struct sm5502_type sm5504_data = {
 static const struct of_device_id sm5502_dt_match[] = {
        { .compatible = "siliconmitus,sm5502-muic", .data = &sm5502_data },
        { .compatible = "siliconmitus,sm5504-muic", .data = &sm5504_data },
+       { .compatible = "siliconmitus,sm5703-muic", .data = &sm5502_data },
        { },
 };
 MODULE_DEVICE_TABLE(of, sm5502_dt_match);
@@ -830,6 +831,7 @@ static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops,
 static const struct i2c_device_id sm5502_i2c_id[] = {
        { "sm5502", (kernel_ulong_t)&sm5502_data },
        { "sm5504", (kernel_ulong_t)&sm5504_data },
+       { "sm5703-muic", (kernel_ulong_t)&sm5502_data },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
index f2b65d9..40d967a 100644 (file)
@@ -226,16 +226,6 @@ static int usb_extcon_suspend(struct device *dev)
                }
        }
 
-       /*
-        * We don't want to process any IRQs after this point
-        * as GPIOs used behind I2C subsystem might not be
-        * accessible until resume completes. So disable IRQ.
-        */
-       if (info->id_gpiod)
-               disable_irq(info->id_irq);
-       if (info->vbus_gpiod)
-               disable_irq(info->vbus_irq);
-
        if (!device_may_wakeup(dev))
                pinctrl_pm_select_sleep_state(dev);
 
@@ -267,11 +257,6 @@ static int usb_extcon_resume(struct device *dev)
                }
        }
 
-       if (info->id_gpiod)
-               enable_irq(info->id_irq);
-       if (info->vbus_gpiod)
-               enable_irq(info->vbus_irq);
-
        queue_delayed_work(system_power_efficient_wq,
                           &info->wq_detcable, 0);
 
index 5290cc2..fde1db6 100644 (file)
@@ -68,7 +68,7 @@ static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
        struct cros_ec_command *msg;
        int ret;
 
-       msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+       msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
index a09e704..d3a32b8 100644 (file)
@@ -399,6 +399,7 @@ static ssize_t cable_state_show(struct device *dev,
 /**
  * extcon_sync() - Synchronize the state for an external connector.
  * @edev:      the extcon device
+ * @id:                the unique id indicating an external connector
  *
  * Note that this function send a notification in order to synchronize
  * the state and property of an external connector.
@@ -736,6 +737,9 @@ EXPORT_SYMBOL_GPL(extcon_set_property);
 
 /**
  * extcon_set_property_sync() - Set property of an external connector with sync.
+ * @edev:      the extcon device
+ * @id:                the unique id indicating an external connector
+ * @prop:      the property id indicating an extcon property
  * @prop_val:  the pointer including the new value of extcon property
  *
  * Note that when setting the property value of external connector,
@@ -851,6 +855,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability);
  * @extcon_name:       the extcon name provided with extcon_dev_register()
  *
  * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
+ * NOTE: This function returns -EPROBE_DEFER so it may only be called from
+ * probe() functions.
  */
 struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
@@ -864,7 +870,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
                if (!strcmp(sd->name, extcon_name))
                        goto out;
        }
-       sd = NULL;
+       sd = ERR_PTR(-EPROBE_DEFER);
 out:
        mutex_unlock(&extcon_dev_list_lock);
        return sd;
@@ -1218,19 +1224,14 @@ int extcon_dev_register(struct extcon_dev *edev)
                edev->dev.type = &edev->extcon_dev_type;
        }
 
-       ret = device_register(&edev->dev);
-       if (ret) {
-               put_device(&edev->dev);
-               goto err_dev;
-       }
-
        spin_lock_init(&edev->lock);
-       edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
-                               sizeof(*edev->nh), GFP_KERNEL);
-       if (!edev->nh) {
-               ret = -ENOMEM;
-               device_unregister(&edev->dev);
-               goto err_dev;
+       if (edev->max_supported) {
+               edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
+                               GFP_KERNEL);
+               if (!edev->nh) {
+                       ret = -ENOMEM;
+                       goto err_alloc_nh;
+               }
        }
 
        for (index = 0; index < edev->max_supported; index++)
@@ -1241,6 +1242,12 @@ int extcon_dev_register(struct extcon_dev *edev)
        dev_set_drvdata(&edev->dev, edev);
        edev->state = 0;
 
+       ret = device_register(&edev->dev);
+       if (ret) {
+               put_device(&edev->dev);
+               goto err_dev;
+       }
+
        mutex_lock(&extcon_dev_list_lock);
        list_add(&edev->entry, &extcon_dev_list);
        mutex_unlock(&extcon_dev_list_lock);
@@ -1249,6 +1256,9 @@ int extcon_dev_register(struct extcon_dev *edev)
 
 err_dev:
        if (edev->max_supported)
+               kfree(edev->nh);
+err_alloc_nh:
+       if (edev->max_supported)
                kfree(edev->extcon_dev_type.groups);
 err_alloc_groups:
        if (edev->max_supported && edev->mutually_exclusive) {
@@ -1308,6 +1318,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
        if (edev->max_supported) {
                kfree(edev->extcon_dev_type.groups);
                kfree(edev->cables);
+               kfree(edev->nh);
        }
 
        put_device(&edev->dev);
index 1be0e82..28fcddc 100644 (file)
@@ -32,8 +32,7 @@ obj-y                         += broadcom/
 obj-y                          += cirrus/
 obj-y                          += meson/
 obj-$(CONFIG_GOOGLE_FIRMWARE)  += google/
-obj-$(CONFIG_EFI)              += efi/
-obj-$(CONFIG_UEFI_CPER)                += efi/
+obj-y                          += efi/
 obj-y                          += imx/
 obj-y                          += psci/
 obj-y                          += smccc/
index 3a35377..66727ad 100644 (file)
@@ -604,7 +604,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
                                    "%d-%d", dh->type, entry->instance);
 
        if (*ret) {
-               kfree(entry);
+               kobject_put(&entry->kobj);
                return;
        }
 
index 69353dd..5cc2389 100644 (file)
@@ -685,8 +685,7 @@ static void edd_populate_dir(struct edd_device * edev)
        int i;
 
        for (i = 0; (attr = edd_attrs[i]) && !error; i++) {
-               if (!attr->test ||
-                   (attr->test && attr->test(edev)))
+               if (!attr->test || attr->test(edev))
                        error = sysfs_create_file(&edev->kobj,&attr->attr);
        }
 
index 4720ba9..7aa4717 100644 (file)
@@ -193,6 +193,9 @@ config EFI_TEST
          Say Y here to enable the runtime services support via /dev/efi_test.
          If unsure, say N.
 
+config EFI_DEV_PATH_PARSER
+       bool
+
 config APPLE_PROPERTIES
        bool "Apple Device Properties"
        depends on EFI_STUB && X86
@@ -255,40 +258,15 @@ config EFI_DISABLE_PCI_DMA
          options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
          may be used to override this option.
 
-endmenu
-
-config EFI_EMBEDDED_FIRMWARE
-       bool
-       depends on EFI
-       select CRYPTO_LIB_SHA256
-
-config UEFI_CPER
-       bool
-
-config UEFI_CPER_ARM
-       bool
-       depends on UEFI_CPER && ( ARM || ARM64 )
-       default y
-
-config UEFI_CPER_X86
-       bool
-       depends on UEFI_CPER && X86
-       default y
-
-config EFI_DEV_PATH_PARSER
-       bool
-       depends on ACPI
-       default n
-
 config EFI_EARLYCON
        def_bool y
-       depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
+       depends on SERIAL_EARLYCON && !ARM && !IA64
        select FONT_SUPPORT
        select ARCH_USE_MEMREMAP_PROT
 
 config EFI_CUSTOM_SSDT_OVERLAYS
        bool "Load custom ACPI SSDT overlay from an EFI variable"
-       depends on EFI && ACPI
+       depends on ACPI
        default ACPI_TABLE_UPGRADE
        help
          Allow loading of an ACPI SSDT overlay from an EFI variable specified
@@ -314,7 +292,6 @@ config EFI_DISABLE_RUNTIME
 
 config EFI_COCO_SECRET
        bool "EFI Confidential Computing Secret Area Support"
-       depends on EFI
        help
          Confidential Computing platforms (such as AMD SEV) allow the
          Guest Owner to securely inject secrets during guest VM launch.
@@ -327,3 +304,22 @@ config EFI_COCO_SECRET
          for usage inside the kernel.  This will allow the
          virt/coco/efi_secret module to access the secrets, which in turn
          allows userspace programs to access the injected secrets.
+
+config EFI_EMBEDDED_FIRMWARE
+       bool
+       select CRYPTO_LIB_SHA256
+
+endmenu
+
+config UEFI_CPER
+       bool
+
+config UEFI_CPER_ARM
+       bool
+       depends on UEFI_CPER && ( ARM || ARM64 )
+       default y
+
+config UEFI_CPER_X86
+       bool
+       depends on UEFI_CPER && X86
+       default y
index b14e88c..05ae8bc 100644 (file)
@@ -260,10 +260,10 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
                                      EFI_MEMORY_WB);
 
                if (status != EFI_SUCCESS) {
-                       efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %d\n",
+                       efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %lx\n",
                                 unprotect_start,
                                 unprotect_start + unprotect_size,
-                                (int)status);
+                                status);
                }
        }
 }
index 8177a0f..14663f6 100644 (file)
@@ -948,17 +948,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
 void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
 {
        struct stratix10_svc_data_mem *pmem;
-       size_t size = 0;
 
        list_for_each_entry(pmem, &svc_data_mem, node)
                if (pmem->vaddr == kaddr) {
-                       size = pmem->size;
-                       break;
+                       gen_pool_free(chan->ctrl->genpool,
+                                      (unsigned long)kaddr, pmem->size);
+                       pmem->vaddr = NULL;
+                       list_del(&pmem->node);
+                       return;
                }
 
-       gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
-       pmem->vaddr = NULL;
-       list_del(&pmem->node);
+       list_del(&svc_data_mem);
 }
 EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
 
index f21ece5..7977a49 100644 (file)
 /* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */
 #define CRL_APB_BOOTPIN_CTRL_MASK      0xF0FU
 
+/* IOCTL/QUERY feature payload size */
+#define FEATURE_PAYLOAD_SIZE           2
+
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK          GENMASK(15, 0)
+
 static bool feature_check_enabled;
 static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
+static u32 query_features[FEATURE_PAYLOAD_SIZE];
 
 static struct platform_device *em_dev;
 
@@ -167,21 +175,28 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
        return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
 }
 
-/**
- * zynqmp_pm_feature() - Check weather given feature is supported or not
- * @api_id:            API ID to check
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_feature(const u32 api_id)
+static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
 {
        int ret;
-       u32 ret_payload[PAYLOAD_ARG_CNT];
        u64 smc_arg[2];
-       struct pm_api_feature_data *feature_data;
 
-       if (!feature_check_enabled)
-               return 0;
+       smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+       smc_arg[1] = api_id;
+
+       ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+       if (ret)
+               ret = -EOPNOTSUPP;
+       else
+               ret = ret_payload[1];
+
+       return ret;
+}
+
+static int do_feature_check_call(const u32 api_id)
+{
+       int ret;
+       u32 ret_payload[PAYLOAD_ARG_CNT];
+       struct pm_api_feature_data *feature_data;
 
        /* Check for existing entry in hash table for given api */
        hash_for_each_possible(pm_api_features_map, feature_data, hentry,
@@ -196,23 +211,86 @@ int zynqmp_pm_feature(const u32 api_id)
                return -ENOMEM;
 
        feature_data->pm_api_id = api_id;
-       smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
-       smc_arg[1] = api_id;
-
-       ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
-       if (ret)
-               ret = -EOPNOTSUPP;
-       else
-               ret = ret_payload[1];
+       ret = __do_feature_check_call(api_id, ret_payload);
 
        feature_data->feature_status = ret;
        hash_add(pm_api_features_map, &feature_data->hentry, api_id);
 
+       if (api_id == PM_IOCTL)
+               /* Store supported IOCTL IDs mask */
+               memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+       else if (api_id == PM_QUERY_DATA)
+               /* Store supported QUERY IDs mask */
+               memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
 
 /**
+ * zynqmp_pm_feature() - Check whether given feature is supported or not and
+ *                      store supported IOCTL/QUERY ID mask
+ * @api_id:            API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_feature(const u32 api_id)
+{
+       int ret;
+
+       if (!feature_check_enabled)
+               return 0;
+
+       ret = do_feature_check_call(api_id);
+
+       return ret;
+}
+
+/**
+ * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function
+ *                                    is supported or not
+ * @api_id:            PM_IOCTL or PM_QUERY_DATA
+ * @id:                        IOCTL or QUERY function IDs
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+       int ret;
+       u32 *bit_mask;
+
+       /* Input arguments validation */
+       if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA))
+               return -EINVAL;
+
+       /* Check feature check API version */
+       ret = do_feature_check_call(PM_FEATURE_CHECK);
+       if (ret < 0)
+               return ret;
+
+       /* Check if feature check version 2 is supported or not */
+       if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) {
+               /*
+                * Call feature check for IOCTL/QUERY API to get IOCTL ID or
+                * QUERY ID feature status.
+                */
+               ret = do_feature_check_call(api_id);
+               if (ret < 0)
+                       return ret;
+
+               bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features;
+
+               if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U)
+                       return -EOPNOTSUPP;
+       } else {
+               return -ENODATA;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
+
+/**
  * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
  *                        caller function depending on the configuration
  * @pm_api_id:         Requested PM-API call
@@ -1584,6 +1662,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
        struct zynqmp_devinfo *devinfo;
        int ret;
 
+       ret = get_set_conduit_method(dev->of_node);
+       if (ret)
+               return ret;
+
        np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
        if (!np) {
                np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
@@ -1592,11 +1674,14 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
 
                feature_check_enabled = true;
        }
-       of_node_put(np);
 
-       ret = get_set_conduit_method(dev->of_node);
-       if (ret)
-               return ret;
+       if (!feature_check_enabled) {
+               ret = do_feature_check_call(PM_FEATURE_CHECK);
+               if (ret >= 0)
+                       feature_check_enabled = true;
+       }
+
+       of_node_put(np);
 
        devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
        if (!devinfo)
index 0bff783..5935b3d 100644 (file)
@@ -18,9 +18,9 @@ obj-$(CONFIG_FPGA_MGR_TS73XX)         += ts73xx-fpga.o
 obj-$(CONFIG_FPGA_MGR_XILINX_SPI)      += xilinx-spi.o
 obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA)       += zynq-fpga.o
 obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA)     += zynqmp-fpga.o
-obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA)      += versal-fpga.o
-obj-$(CONFIG_ALTERA_PR_IP_CORE)         += altera-pr-ip-core.o
-obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT)    += altera-pr-ip-core-plat.o
+obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA)     += versal-fpga.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE)                += altera-pr-ip-core.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT)   += altera-pr-ip-core-plat.o
 
 # FPGA Bridge Drivers
 obj-$(CONFIG_FPGA_BRIDGE)              += fpga-bridge.o
index 717ac97..fd1fa55 100644 (file)
@@ -259,6 +259,15 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
                         */
                        bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
                        offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
+                       if (bar == FME_PORT_OFST_BAR_SKIP) {
+                               continue;
+                       } else if (bar >= PCI_STD_NUM_BARS) {
+                               dev_err(&pcidev->dev, "bad BAR %d for port %d\n",
+                                       bar, i);
+                               ret = -EINVAL;
+                               break;
+                       }
+
                        start = pci_resource_start(pcidev, bar) + offset;
                        len = pci_resource_len(pcidev, bar) - offset;
 
index 599bb21..6bff39f 100644 (file)
@@ -940,9 +940,12 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
 {
        void __iomem *base = binfo->ioaddr + ofst;
        unsigned int i, ibase, inr = 0;
+       enum dfl_id_type type;
        int virq;
        u64 v;
 
+       type = feature_dev_id_type(binfo->feature_dev);
+
        /*
         * Ideally DFL framework should only read info from DFL header, but
         * current version DFL only provides mmio resources information for
@@ -957,22 +960,25 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
         * code will be added. But in order to be compatible to old version
         * DFL, the driver may still fall back to these quirks.
         */
-       switch (fid) {
-       case PORT_FEATURE_ID_UINT:
-               v = readq(base + PORT_UINT_CAP);
-               ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
-               inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
-               break;
-       case PORT_FEATURE_ID_ERROR:
-               v = readq(base + PORT_ERROR_CAP);
-               ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
-               inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
-               break;
-       case FME_FEATURE_ID_GLOBAL_ERR:
-               v = readq(base + FME_ERROR_CAP);
-               ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
-               inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
-               break;
+       if (type == PORT_ID) {
+               switch (fid) {
+               case PORT_FEATURE_ID_UINT:
+                       v = readq(base + PORT_UINT_CAP);
+                       ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+                       inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+                       break;
+               case PORT_FEATURE_ID_ERROR:
+                       v = readq(base + PORT_ERROR_CAP);
+                       ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+                       inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+                       break;
+               }
+       } else if (type == FME_ID) {
+               if (fid == FME_FEATURE_ID_GLOBAL_ERR) {
+                       v = readq(base + FME_ERROR_CAP);
+                       ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+                       inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+               }
        }
 
        if (!inr) {
index 53572c7..06cfcd5 100644 (file)
@@ -89,6 +89,7 @@
 #define FME_HDR_NEXT_AFU       NEXT_AFU
 #define FME_HDR_CAP            0x30
 #define FME_HDR_PORT_OFST(n)   (0x38 + ((n) * 0x8))
+#define FME_PORT_OFST_BAR_SKIP 7
 #define FME_HDR_BITSTREAM_ID   0x60
 #define FME_HDR_BITSTREAM_MD   0x68
 
index d49a9ce..a3595ec 100644 (file)
@@ -148,11 +148,12 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
        int ret;
 
        mgr->state = FPGA_MGR_STATE_WRITE_INIT;
-       if (!mgr->mops->initial_header_size)
+       if (!mgr->mops->initial_header_size) {
                ret = fpga_mgr_write_init(mgr, info, NULL, 0);
-       else
-               ret = fpga_mgr_write_init(
-                   mgr, info, buf, min(mgr->mops->initial_header_size, count));
+       } else {
+               count = min(mgr->mops->initial_header_size, count);
+               ret = fpga_mgr_write_init(mgr, info, buf, count);
+       }
 
        if (ret) {
                dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
@@ -730,6 +731,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
  * @parent:    fpga manager device from pdev
  * @info:      parameters for fpga manager
  *
+ * Return:  fpga manager pointer on success, negative error code otherwise.
+ *
  * This is the devres variant of fpga_mgr_register_full() for which the unregister
  * function will be called automatically when the managing device is detached.
  */
@@ -763,6 +766,8 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
  * @mops:      pointer to structure of fpga manager ops
  * @priv:      fpga manager private data
  *
+ * Return:  fpga manager pointer on success, negative error code otherwise.
+ *
  * This is the devres variant of fpga_mgr_register() for which the
  * unregister function will be called automatically when the managing
  * device is detached.
index b0ac18d..485948e 100644 (file)
@@ -18,9 +18,9 @@
 static DEFINE_IDA(fpga_region_ida);
 static struct class *fpga_region_class;
 
-struct fpga_region *fpga_region_class_find(
-       struct device *start, const void *data,
-       int (*match)(struct device *, const void *))
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+                      int (*match)(struct device *, const void *))
 {
        struct device *dev;
 
index 50b8305..ae82532 100644 (file)
@@ -28,7 +28,7 @@ MODULE_DEVICE_TABLE(of, fpga_region_of_match);
  *
  * Caller will need to put_device(&region->dev) when done.
  *
- * Returns FPGA Region struct or NULL
+ * Return: FPGA Region struct or NULL
  */
 static struct fpga_region *of_fpga_region_find(struct device_node *np)
 {
@@ -80,7 +80,7 @@ static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np)
  * Caller should call fpga_bridges_put(&region->bridge_list) when
  * done with the bridges.
  *
- * Return 0 for success (even if there are no bridges specified)
+ * Return: 0 for success (even if there are no bridges specified)
  * or -EBUSY if any of the bridges are in use.
  */
 static int of_fpga_region_get_bridges(struct fpga_region *region)
@@ -139,13 +139,13 @@ static int of_fpga_region_get_bridges(struct fpga_region *region)
 }
 
 /**
- * child_regions_with_firmware
+ * child_regions_with_firmware - Used to check the child region info.
  * @overlay: device node of the overlay
  *
  * If the overlay adds child FPGA regions, they are not allowed to have
  * firmware-name property.
  *
- * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ * Return: 0 for OK or -EINVAL if child FPGA region adds firmware-name.
  */
 static int child_regions_with_firmware(struct device_node *overlay)
 {
@@ -184,14 +184,14 @@ static int child_regions_with_firmware(struct device_node *overlay)
  * Given an overlay applied to an FPGA region, parse the FPGA image specific
  * info in the overlay and do some checking.
  *
- * Returns:
+ * Return:
  *   NULL if overlay doesn't direct us to program the FPGA.
  *   fpga_image_info struct if there is an image to program.
  *   error code for invalid overlay.
  */
-static struct fpga_image_info *of_fpga_region_parse_ov(
-                                               struct fpga_region *region,
-                                               struct device_node *overlay)
+static struct fpga_image_info *
+of_fpga_region_parse_ov(struct fpga_region *region,
+                       struct device_node *overlay)
 {
        struct device *dev = &region->dev;
        struct fpga_image_info *info;
@@ -279,7 +279,7 @@ ret_no_info:
  * If the checks fail, overlay is rejected and does not get added to the
  * live tree.
  *
- * Returns 0 for success or negative error code for failure.
+ * Return: 0 for success or negative error code for failure.
  */
 static int of_fpga_region_notify_pre_apply(struct fpga_region *region,
                                           struct of_overlay_notify_data *nd)
@@ -339,7 +339,7 @@ static void of_fpga_region_notify_post_remove(struct fpga_region *region,
  * This notifier handles programming an FPGA when a "firmware-name" property is
  * added to an fpga-region.
  *
- * Returns NOTIFY_OK or error if FPGA programming fails.
+ * Return: NOTIFY_OK or error if FPGA programming fails.
  */
 static int of_fpga_region_notify(struct notifier_block *nb,
                                 unsigned long action, void *arg)
@@ -446,6 +446,8 @@ static struct platform_driver of_fpga_region_driver = {
 /**
  * of_fpga_region_init - init function for fpga_region class
  * Creates the fpga_region class and registers a reconfig notifier.
+ *
+ * Return: 0 on success, negative error code otherwise.
  */
 static int __init of_fpga_region_init(void)
 {
index f1e4ac9..e388e75 100644 (file)
@@ -406,12 +406,6 @@ static int adp5588_gpio_probe(struct i2c_client *client)
        if (ret)
                return ret;
 
-       if (pdata && pdata->setup) {
-               ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
-               if (ret < 0)
-                       dev_warn(&client->dev, "setup failed, %d\n", ret);
-       }
-
        i2c_set_clientdata(client, dev);
 
        return 0;
@@ -419,20 +413,7 @@ static int adp5588_gpio_probe(struct i2c_client *client)
 
 static int adp5588_gpio_remove(struct i2c_client *client)
 {
-       struct adp5588_gpio_platform_data *pdata =
-                       dev_get_platdata(&client->dev);
        struct adp5588_gpio *dev = i2c_get_clientdata(client);
-       int ret;
-
-       if (pdata && pdata->teardown) {
-               ret = pdata->teardown(client,
-                                     dev->gpio_chip.base, dev->gpio_chip.ngpio,
-                                     pdata->context);
-               if (ret < 0) {
-                       dev_err(&client->dev, "teardown failed %d\n", ret);
-                       return ret;
-               }
-       }
 
        if (dev->client->irq)
                free_irq(dev->client->irq, dev);
index b444c6a..08bc52c 100644 (file)
@@ -1120,20 +1120,21 @@ static int pca953x_regcache_sync(struct device *dev)
 {
        struct pca953x_chip *chip = dev_get_drvdata(dev);
        int ret;
+       u8 regaddr;
 
        /*
         * The ordering between direction and output is important,
         * sync these registers first and only then sync the rest.
         */
-       ret = regcache_sync_region(chip->regmap, chip->regs->direction,
-                                  chip->regs->direction + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
        if (ret) {
                dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
                return ret;
        }
 
-       ret = regcache_sync_region(chip->regmap, chip->regs->output,
-                                  chip->regs->output + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
        if (ret) {
                dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
                return ret;
@@ -1141,16 +1142,18 @@ static int pca953x_regcache_sync(struct device *dev)
 
 #ifdef CONFIG_GPIO_PCA953X_IRQ
        if (chip->driver_data & PCA_PCAL) {
-               ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
-                                          PCAL953X_IN_LATCH + NBANK(chip));
+               regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
+               ret = regcache_sync_region(chip->regmap, regaddr,
+                                          regaddr + NBANK(chip));
                if (ret) {
                        dev_err(dev, "Failed to sync INT latch registers: %d\n",
                                ret);
                        return ret;
                }
 
-               ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
-                                          PCAL953X_INT_MASK + NBANK(chip));
+               regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
+               ret = regcache_sync_region(chip->regmap, regaddr,
+                                          regaddr + NBANK(chip));
                if (ret) {
                        dev_err(dev, "Failed to sync INT mask registers: %d\n",
                                ret);
index 84c4f1e..de28a68 100644 (file)
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2017 NVIDIA Corporation
+ * Copyright (c) 2016-2022 NVIDIA Corporation
  *
  * Author: Thierry Reding <treding@nvidia.com>
+ *        Dipen Patel <dpatel@nvidia.com>
  */
 
 #include <linux/gpio/driver.h>
@@ -11,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/hte.h>
 
 #include <dt-bindings/gpio/tegra186-gpio.h>
 #include <dt-bindings/gpio/tegra194-gpio.h>
@@ -36,6 +38,7 @@
 #define  TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4)
 #define  TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE BIT(5)
 #define  TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6)
+#define  TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC BIT(7)
 
 #define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04
 #define  TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(x) ((x) & 0xff)
@@ -76,6 +79,7 @@ struct tegra_gpio_soc {
        const struct tegra186_pin_range *pin_ranges;
        unsigned int num_pin_ranges;
        const char *pinmux;
+       bool has_gte;
 };
 
 struct tegra_gpio {
@@ -193,6 +197,76 @@ static int tegra186_gpio_direction_output(struct gpio_chip *chip,
        return 0;
 }
 
+#define HTE_BOTH_EDGES (HTE_RISING_EDGE_TS | HTE_FALLING_EDGE_TS)
+
+static int tegra186_gpio_en_hw_ts(struct gpio_chip *gc, u32 offset,
+                                 unsigned long flags)
+{
+       struct tegra_gpio *gpio;
+       void __iomem *base;
+       int value;
+
+       if (!gc)
+               return -EINVAL;
+
+       gpio = gpiochip_get_data(gc);
+       if (!gpio)
+               return -ENODEV;
+
+       base = tegra186_gpio_get_base(gpio, offset);
+       if (WARN_ON(base == NULL))
+               return -EINVAL;
+
+       value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+       value |= TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC;
+
+       if (flags == HTE_BOTH_EDGES) {
+               value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+       } else if (flags == HTE_RISING_EDGE_TS) {
+               value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+               value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+       } else if (flags == HTE_FALLING_EDGE_TS) {
+               value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+       }
+
+       writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+       return 0;
+}
+
+static int tegra186_gpio_dis_hw_ts(struct gpio_chip *gc, u32 offset,
+                                  unsigned long flags)
+{
+       struct tegra_gpio *gpio;
+       void __iomem *base;
+       int value;
+
+       if (!gc)
+               return -EINVAL;
+
+       gpio = gpiochip_get_data(gc);
+       if (!gpio)
+               return -ENODEV;
+
+       base = tegra186_gpio_get_base(gpio, offset);
+       if (WARN_ON(base == NULL))
+               return -EINVAL;
+
+       value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+       value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TIMESTAMP_FUNC;
+       if (flags == HTE_BOTH_EDGES) {
+               value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+       } else if (flags == HTE_RISING_EDGE_TS) {
+               value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+               value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+       } else if (flags == HTE_FALLING_EDGE_TS) {
+               value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+       }
+       writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+       return 0;
+}
+
 static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
        struct tegra_gpio *gpio = gpiochip_get_data(chip);
@@ -747,6 +821,10 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
        gpio->gpio.set = tegra186_gpio_set;
        gpio->gpio.set_config = tegra186_gpio_set_config;
        gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
+       if (gpio->soc->has_gte) {
+               gpio->gpio.en_hw_timestamp = tegra186_gpio_en_hw_ts;
+               gpio->gpio.dis_hw_timestamp = tegra186_gpio_dis_hw_ts;
+       }
 
        gpio->gpio.base = -1;
 
@@ -991,6 +1069,7 @@ static const struct tegra_gpio_soc tegra194_aon_soc = {
        .name = "tegra194-gpio-aon",
        .instance = 1,
        .num_irqs_per_bank = 8,
+       .has_gte = true,
 };
 
 #define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins)    \
index c2900b1..f5aa5f9 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/timekeeping.h>
 #include <linux/uaccess.h>
 #include <linux/workqueue.h>
+#include <linux/hte.h>
 #include <uapi/linux/gpio.h>
 
 #include "gpiolib.h"
@@ -464,6 +465,25 @@ struct line {
         * stale value.
         */
        unsigned int level;
+       /*
+        * -- hte specific fields --
+        */
+       struct hte_ts_desc hdesc;
+       /*
+        * HTE provider sets line level at the time of event. The valid
+        * value is 0 or 1 and negative value for an error.
+        */
+       int raw_level;
+       /*
+        * when sw_debounce is set on HTE enabled line, this is running
+        * counter of the discarded events.
+        */
+       u32 total_discard_seq;
+       /*
+        * when sw_debounce is set on HTE enabled line, this variable records
+        * last sequence number before debounce period expires.
+        */
+       u32 last_seqno;
 };
 
 /**
@@ -518,6 +538,7 @@ struct linereq {
         GPIO_V2_LINE_DRIVE_FLAGS | \
         GPIO_V2_LINE_EDGE_FLAGS | \
         GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
+        GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
         GPIO_V2_LINE_BIAS_FLAGS)
 
 static void linereq_put_event(struct linereq *lr,
@@ -542,10 +563,98 @@ static u64 line_event_timestamp(struct line *line)
 {
        if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
                return ktime_get_real_ns();
+       else if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+               return line->timestamp_ns;
 
        return ktime_get_ns();
 }
 
+static enum hte_return process_hw_ts_thread(void *p)
+{
+       struct line *line;
+       struct linereq *lr;
+       struct gpio_v2_line_event le;
+       int level;
+       u64 eflags;
+
+       if (!p)
+               return HTE_CB_HANDLED;
+
+       line = p;
+       lr = line->req;
+
+       memset(&le, 0, sizeof(le));
+
+       le.timestamp_ns = line->timestamp_ns;
+       eflags = READ_ONCE(line->eflags);
+
+       if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
+               if (line->raw_level >= 0) {
+                       if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+                               level = !line->raw_level;
+                       else
+                               level = line->raw_level;
+               } else {
+                       level = gpiod_get_value_cansleep(line->desc);
+               }
+
+               if (level)
+                       le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+               else
+                       le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+       } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
+               /* Emit low-to-high event */
+               le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+       } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
+               /* Emit high-to-low event */
+               le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+       } else {
+               return HTE_CB_HANDLED;
+       }
+       le.line_seqno = line->line_seqno;
+       le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
+       le.offset = gpio_chip_hwgpio(line->desc);
+
+       linereq_put_event(lr, &le);
+
+       return HTE_CB_HANDLED;
+}
+
+static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+       struct line *line;
+       struct linereq *lr;
+       int diff_seqno = 0;
+
+       if (!ts || !p)
+               return HTE_CB_HANDLED;
+
+       line = p;
+       line->timestamp_ns = ts->tsc;
+       line->raw_level = ts->raw_level;
+       lr = line->req;
+
+       if (READ_ONCE(line->sw_debounced)) {
+               line->total_discard_seq++;
+               line->last_seqno = ts->seq;
+               mod_delayed_work(system_wq, &line->work,
+                 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
+       } else {
+               if (unlikely(ts->seq < line->line_seqno))
+                       return HTE_CB_HANDLED;
+
+               diff_seqno = ts->seq - line->line_seqno;
+               line->line_seqno = ts->seq;
+               if (lr->num_lines != 1)
+                       line->req_seqno = atomic_add_return(diff_seqno,
+                                                           &lr->seqno);
+
+               return HTE_RUN_SECOND_CB;
+       }
+
+       return HTE_CB_HANDLED;
+}
+
 static irqreturn_t edge_irq_thread(int irq, void *p)
 {
        struct line *line = p;
@@ -651,10 +760,16 @@ static void debounce_work_func(struct work_struct *work)
        struct gpio_v2_line_event le;
        struct line *line = container_of(work, struct line, work.work);
        struct linereq *lr;
-       int level;
+       int level, diff_seqno;
        u64 eflags;
 
-       level = gpiod_get_raw_value_cansleep(line->desc);
+       if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+               level = line->raw_level;
+               if (level < 0)
+                       level = gpiod_get_raw_value_cansleep(line->desc);
+       } else {
+               level = gpiod_get_raw_value_cansleep(line->desc);
+       }
        if (level < 0) {
                pr_debug_ratelimited("debouncer failed to read line value\n");
                return;
@@ -685,10 +800,21 @@ static void debounce_work_func(struct work_struct *work)
        lr = line->req;
        le.timestamp_ns = line_event_timestamp(line);
        le.offset = gpio_chip_hwgpio(line->desc);
-       line->line_seqno++;
-       le.line_seqno = line->line_seqno;
-       le.seqno = (lr->num_lines == 1) ?
-               le.line_seqno : atomic_inc_return(&lr->seqno);
+       if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+               /* discard events except the last one */
+               line->total_discard_seq -= 1;
+               diff_seqno = line->last_seqno - line->total_discard_seq -
+                               line->line_seqno;
+               line->line_seqno = line->last_seqno - line->total_discard_seq;
+               le.line_seqno = line->line_seqno;
+               le.seqno = (lr->num_lines == 1) ?
+                       le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
+       } else {
+               line->line_seqno++;
+               le.line_seqno = line->line_seqno;
+               le.seqno = (lr->num_lines == 1) ?
+                       le.line_seqno : atomic_inc_return(&lr->seqno);
+       }
 
        if (level)
                /* Emit low-to-high event */
@@ -700,8 +826,34 @@ static void debounce_work_func(struct work_struct *work)
        linereq_put_event(lr, &le);
 }
 
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+       int ret;
+       unsigned long flags = 0;
+       struct hte_ts_desc *hdesc = &line->hdesc;
+
+       if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+               flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+                                 HTE_FALLING_EDGE_TS : HTE_RISING_EDGE_TS;
+       if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+               flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+                                 HTE_RISING_EDGE_TS : HTE_FALLING_EDGE_TS;
+
+       line->total_discard_seq = 0;
+
+       hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags,
+                          NULL, line->desc);
+
+       ret = hte_ts_get(NULL, hdesc, 0);
+       if (ret)
+               return ret;
+
+       return hte_request_ts_ns(hdesc, process_hw_ts,
+                                process_hw_ts_thread, line);
+}
+
 static int debounce_setup(struct line *line,
-                         unsigned int debounce_period_us)
+                         unsigned int debounce_period_us, bool hte_req)
 {
        unsigned long irqflags;
        int ret, level, irq;
@@ -721,19 +873,27 @@ static int debounce_setup(struct line *line,
                if (level < 0)
                        return level;
 
-               irq = gpiod_to_irq(line->desc);
-               if (irq < 0)
-                       return -ENXIO;
+               if (!hte_req) {
+                       irq = gpiod_to_irq(line->desc);
+                       if (irq < 0)
+                               return -ENXIO;
 
-               WRITE_ONCE(line->level, level);
-               irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
-               ret = request_irq(irq, debounce_irq_handler, irqflags,
-                                 line->req->label, line);
-               if (ret)
-                       return ret;
+                       irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+                       ret = request_irq(irq, debounce_irq_handler, irqflags,
+                                         line->req->label, line);
+                       if (ret)
+                               return ret;
+                       line->irq = irq;
+               } else {
+                       ret = hte_edge_setup(line,
+                                            GPIO_V2_LINE_FLAG_EDGE_RISING |
+                                            GPIO_V2_LINE_FLAG_EDGE_FALLING);
+                       if (ret)
+                               return ret;
+               }
 
+               WRITE_ONCE(line->level, level);
                WRITE_ONCE(line->sw_debounced, 1);
-               line->irq = irq;
        }
        return 0;
 }
@@ -766,13 +926,16 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
        return 0;
 }
 
-static void edge_detector_stop(struct line *line)
+static void edge_detector_stop(struct line *line, bool hte_en)
 {
-       if (line->irq) {
+       if (line->irq && !hte_en) {
                free_irq(line->irq, line);
                line->irq = 0;
        }
 
+       if (hte_en)
+               hte_ts_put(&line->hdesc);
+
        cancel_delayed_work_sync(&line->work);
        WRITE_ONCE(line->sw_debounced, 0);
        WRITE_ONCE(line->eflags, 0);
@@ -784,7 +947,7 @@ static void edge_detector_stop(struct line *line)
 static int edge_detector_setup(struct line *line,
                               struct gpio_v2_line_config *lc,
                               unsigned int line_idx,
-                              u64 eflags)
+                              u64 eflags, bool hte_req)
 {
        u32 debounce_period_us;
        unsigned long irqflags = 0;
@@ -799,7 +962,7 @@ static int edge_detector_setup(struct line *line,
        WRITE_ONCE(line->eflags, eflags);
        if (gpio_v2_line_config_debounced(lc, line_idx)) {
                debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
-               ret = debounce_setup(line, debounce_period_us);
+               ret = debounce_setup(line, debounce_period_us, hte_req);
                if (ret)
                        return ret;
                WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
@@ -809,6 +972,9 @@ static int edge_detector_setup(struct line *line,
        if (!eflags || READ_ONCE(line->sw_debounced))
                return 0;
 
+       if (hte_req)
+               return hte_edge_setup(line, eflags);
+
        irq = gpiod_to_irq(line->desc);
        if (irq < 0)
                return -ENXIO;
@@ -834,13 +1000,18 @@ static int edge_detector_setup(struct line *line,
 static int edge_detector_update(struct line *line,
                                struct gpio_v2_line_config *lc,
                                unsigned int line_idx,
-                               u64 eflags, bool polarity_change)
+                               u64 flags, bool polarity_change,
+                               bool prev_hte_flag)
 {
+       u64 eflags = flags & GPIO_V2_LINE_EDGE_FLAGS;
        unsigned int debounce_period_us =
-               gpio_v2_line_config_debounce_period(lc, line_idx);
+                       gpio_v2_line_config_debounce_period(lc, line_idx);
+       bool hte_change = (prev_hte_flag !=
+                     ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) != 0));
 
        if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
-           (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
+           (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)
+           && !hte_change)
                return 0;
 
        /* sw debounced and still will be...*/
@@ -851,11 +1022,12 @@ static int edge_detector_update(struct line *line,
        }
 
        /* reconfiguring edge detection or sw debounce being disabled */
-       if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
+       if ((line->irq && !READ_ONCE(line->sw_debounced)) || prev_hte_flag ||
            (!debounce_period_us && READ_ONCE(line->sw_debounced)))
-               edge_detector_stop(line);
+               edge_detector_stop(line, prev_hte_flag);
 
-       return edge_detector_setup(line, lc, line_idx, eflags);
+       return edge_detector_setup(line, lc, line_idx, eflags,
+                                  flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
 }
 
 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
@@ -891,7 +1063,6 @@ static int gpio_v2_line_flags_validate(u64 flags)
        /* Return an error if an unknown flag is set */
        if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
                return -EINVAL;
-
        /*
         * Do not allow both INPUT and OUTPUT flags to be set as they are
         * contradictory.
@@ -900,6 +1071,11 @@ static int gpio_v2_line_flags_validate(u64 flags)
            (flags & GPIO_V2_LINE_FLAG_OUTPUT))
                return -EINVAL;
 
+       /* Only allow one event clock source */
+       if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
+           (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+               return -EINVAL;
+
        /* Edge detection requires explicit input. */
        if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
            !(flags & GPIO_V2_LINE_FLAG_INPUT))
@@ -992,6 +1168,8 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
 
        assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
                   flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
+       assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
+                  flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
 }
 
 static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1121,6 +1299,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
        unsigned int i;
        u64 flags;
        bool polarity_change;
+       bool prev_hte_flag;
        int ret;
 
        for (i = 0; i < lr->num_lines; i++) {
@@ -1130,6 +1309,8 @@ static long linereq_set_config_unlocked(struct linereq *lr,
                        (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
                         ((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
 
+               prev_hte_flag = !!test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags);
+
                gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
                /*
                 * Lines have to be requested explicitly for input
@@ -1138,7 +1319,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
                if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
                        int val = gpio_v2_line_config_output_value(lc, i);
 
-                       edge_detector_stop(&lr->lines[i]);
+                       edge_detector_stop(&lr->lines[i], prev_hte_flag);
                        ret = gpiod_direction_output(desc, val);
                        if (ret)
                                return ret;
@@ -1148,8 +1329,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
                                return ret;
 
                        ret = edge_detector_update(&lr->lines[i], lc, i,
-                                       flags & GPIO_V2_LINE_EDGE_FLAGS,
-                                       polarity_change);
+                                       flags, polarity_change, prev_hte_flag);
                        if (ret)
                                return ret;
                }
@@ -1280,9 +1460,12 @@ static ssize_t linereq_read(struct file *file,
 static void linereq_free(struct linereq *lr)
 {
        unsigned int i;
+       bool hte;
 
        for (i = 0; i < lr->num_lines; i++) {
-               edge_detector_stop(&lr->lines[i]);
+               hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
+                                &lr->lines[i].desc->flags);
+               edge_detector_stop(&lr->lines[i], hte);
                if (lr->lines[i].desc)
                        gpiod_free(lr->lines[i].desc);
        }
@@ -1408,7 +1591,8 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
                                goto out_free_linereq;
 
                        ret = edge_detector_setup(&lr->lines[i], lc, i,
-                                       flags & GPIO_V2_LINE_EDGE_FLAGS);
+                               flags & GPIO_V2_LINE_EDGE_FLAGS,
+                               flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
                        if (ret)
                                goto out_free_linereq;
                }
@@ -1961,6 +2145,8 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
 
        if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
                info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
+       else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
+               info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
 
        debounce_period_us = READ_ONCE(desc->debounce_period_us);
        if (debounce_period_us) {
index 9fff4f4..9535f48 100644 (file)
@@ -2454,6 +2454,64 @@ set_output_flag:
 EXPORT_SYMBOL_GPL(gpiod_direction_output);
 
 /**
+ * gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds.
+ *
+ * @desc: GPIO to enable.
+ * @flags: Flags related to GPIO edge.
+ *
+ * Return 0 in case of success, else negative error code.
+ */
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
+{
+       int ret = 0;
+       struct gpio_chip *gc;
+
+       VALIDATE_DESC(desc);
+
+       gc = desc->gdev->chip;
+       if (!gc->en_hw_timestamp) {
+               gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
+               return -ENOTSUPP;
+       }
+
+       ret = gc->en_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+       if (ret)
+               gpiod_warn(desc, "%s: hw ts request failed\n", __func__);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
+
+/**
+ * gpiod_disable_hw_timestamp_ns - Disable hardware timestamp.
+ *
+ * @desc: GPIO to disable.
+ * @flags: Flags related to GPIO edge, same value as used during enable call.
+ *
+ * Return 0 in case of success, else negative error code.
+ */
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
+{
+       int ret = 0;
+       struct gpio_chip *gc;
+
+       VALIDATE_DESC(desc);
+
+       gc = desc->gdev->chip;
+       if (!gc->dis_hw_timestamp) {
+               gpiod_warn(desc, "%s: hw ts not supported\n", __func__);
+               return -ENOTSUPP;
+       }
+
+       ret = gc->dis_hw_timestamp(gc, gpio_chip_hwgpio(desc), flags);
+       if (ret)
+               gpiod_warn(desc, "%s: hw ts release failed\n", __func__);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
+
+/**
  * gpiod_set_config - sets @config for a GPIO
  * @desc: descriptor of the GPIO for which to set the configuration
  * @config: Same packed config format as generic pinconf
index eef3ec0..d900ecd 100644 (file)
@@ -161,6 +161,7 @@ struct gpio_desc {
 #define FLAG_EDGE_RISING     16        /* GPIO CDEV detects rising edge events */
 #define FLAG_EDGE_FALLING    17        /* GPIO CDEV detects falling edge events */
 #define FLAG_EVENT_CLOCK_REALTIME      18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define FLAG_EVENT_CLOCK_HTE           19 /* GPIO CDEV reports hardware timestamps in events */
 
        /* Connection label */
        const char              *label;
index 835c883..8997f00 100644 (file)
@@ -2,7 +2,6 @@
 # drm/tegra depends on host1x, so if both drivers are built-in care must be
 # taken to initialize them in the correct order. Link order is the only way
 # to ensure this currently.
-obj-$(CONFIG_TEGRA_HOST1X)     += host1x/
-obj-y                  += drm/ vga/
+obj-y                  += host1x/ drm/ vga/
 obj-$(CONFIG_IMX_IPUV3_CORE)   += ipu-v3/
 obj-$(CONFIG_TRACE_GPU_MEM)            += trace/
index 8b5452a..67abf8d 100644 (file)
@@ -1621,7 +1621,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 
        mutex_lock(&mem->lock);
 
-       /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
+       /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
        if (mem->alloc_flags &
            (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
             KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
index 63e0293..fd8f373 100644 (file)
@@ -188,13 +188,17 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
                        vram_type = AMDGPU_VRAM_TYPE_DDR3;
                        break;
                case Ddr4MemType:
-               case LpDdr4MemType:
                        vram_type = AMDGPU_VRAM_TYPE_DDR4;
                        break;
+               case LpDdr4MemType:
+                       vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
+                       break;
                case Ddr5MemType:
-               case LpDdr5MemType:
                        vram_type = AMDGPU_VRAM_TYPE_DDR5;
                        break;
+               case LpDdr5MemType:
+                       vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
+                       break;
                default:
                        vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
                        break;
index e552a20..b28af04 100644 (file)
@@ -116,7 +116,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
        int ret;
 
        if (cs->in.num_chunks == 0)
-               return 0;
+               return -EINVAL;
 
        chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
        if (!chunk_array)
@@ -1252,7 +1252,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        p->fence = dma_fence_get(&job->base.s_fence->finished);
 
-       amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
+       seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
        amdgpu_cs_post_dependencies(p);
 
        if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
index c317078..7dc92ef 100644 (file)
@@ -135,9 +135,9 @@ static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_
 
 static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
 {
-       struct amdgpu_device *adev = ctx->adev;
-       int32_t ctx_prio;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        unsigned int hw_prio;
+       int32_t ctx_prio;
 
        ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
                        ctx->init_priority : ctx->override_priority;
@@ -162,17 +162,50 @@ static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
        return hw_prio;
 }
 
+/* Calculate the time spend on the hw */
+static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
+{
+       struct drm_sched_fence *s_fence;
+
+       if (!fence)
+               return ns_to_ktime(0);
+
+       /* When the fence is not even scheduled it can't have spend time */
+       s_fence = to_drm_sched_fence(fence);
+       if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
+               return ns_to_ktime(0);
+
+       /* When it is still running account how much already spend */
+       if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
+               return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
+
+       return ktime_sub(s_fence->finished.timestamp,
+                        s_fence->scheduled.timestamp);
+}
+
+static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
+                                     struct amdgpu_ctx_entity *centity)
+{
+       ktime_t res = ns_to_ktime(0);
+       uint32_t i;
+
+       spin_lock(&ctx->ring_lock);
+       for (i = 0; i < amdgpu_sched_jobs; i++) {
+               res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
+       }
+       spin_unlock(&ctx->ring_lock);
+       return res;
+}
 
 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
                                  const u32 ring)
 {
-       struct amdgpu_device *adev = ctx->adev;
-       struct amdgpu_ctx_entity *entity;
        struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
-       unsigned num_scheds = 0;
-       int32_t ctx_prio;
-       unsigned int hw_prio;
+       struct amdgpu_device *adev = ctx->mgr->adev;
+       struct amdgpu_ctx_entity *entity;
        enum drm_sched_priority drm_prio;
+       unsigned int hw_prio, num_scheds;
+       int32_t ctx_prio;
        int r;
 
        entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
@@ -182,6 +215,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
 
        ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
                        ctx->init_priority : ctx->override_priority;
+       entity->hw_ip = hw_ip;
        entity->sequence = 1;
        hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
        drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
@@ -220,10 +254,25 @@ error_free_entity:
        return r;
 }
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev,
-                          int32_t priority,
-                          struct drm_file *filp,
-                          struct amdgpu_ctx *ctx)
+static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
+       ktime_t res = ns_to_ktime(0);
+       int i;
+
+       if (!entity)
+               return res;
+
+       for (i = 0; i < amdgpu_sched_jobs; ++i) {
+               res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
+               dma_fence_put(entity->fences[i]);
+       }
+
+       kfree(entity);
+       return res;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
+                          struct drm_file *filp, struct amdgpu_ctx *ctx)
 {
        int r;
 
@@ -233,15 +282,14 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
        memset(ctx, 0, sizeof(*ctx));
 
-       ctx->adev = adev;
-
        kref_init(&ctx->refcount);
+       ctx->mgr = mgr;
        spin_lock_init(&ctx->ring_lock);
        mutex_init(&ctx->lock);
 
-       ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+       ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
        ctx->reset_counter_query = ctx->reset_counter;
-       ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+       ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
        ctx->init_priority = priority;
        ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
        ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
@@ -249,24 +297,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
        return 0;
 }
 
-static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
-{
-
-       int i;
-
-       if (!entity)
-               return;
-
-       for (i = 0; i < amdgpu_sched_jobs; ++i)
-               dma_fence_put(entity->fences[i]);
-
-       kfree(entity);
-}
-
 static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
                                        u32 *stable_pstate)
 {
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        enum amd_dpm_forced_level current_level;
 
        current_level = amdgpu_dpm_get_performance_level(adev);
@@ -294,7 +328,7 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
 static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
                                        u32 stable_pstate)
 {
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        enum amd_dpm_forced_level level;
        u32 current_stable_pstate;
        int r;
@@ -345,7 +379,8 @@ done:
 static void amdgpu_ctx_fini(struct kref *ref)
 {
        struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_ctx_mgr *mgr = ctx->mgr;
+       struct amdgpu_device *adev = mgr->adev;
        unsigned i, j, idx;
 
        if (!adev)
@@ -353,8 +388,10 @@ static void amdgpu_ctx_fini(struct kref *ref)
 
        for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
                for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
-                       amdgpu_ctx_fini_entity(ctx->entities[i][j]);
-                       ctx->entities[i][j] = NULL;
+                       ktime_t spend;
+
+                       spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+                       atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
                }
        }
 
@@ -421,7 +458,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
        }
 
        *id = (uint32_t)r;
-       r = amdgpu_ctx_init(adev, priority, filp, ctx);
+       r = amdgpu_ctx_init(mgr, priority, filp, ctx);
        if (r) {
                idr_remove(&mgr->ctx_handles, *id);
                *id = 0;
@@ -671,9 +708,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
        return 0;
 }
 
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
-                         struct drm_sched_entity *entity,
-                         struct dma_fence *fence, uint64_t *handle)
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+                             struct drm_sched_entity *entity,
+                             struct dma_fence *fence)
 {
        struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
        uint64_t seq = centity->sequence;
@@ -682,8 +719,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
 
        idx = seq & (amdgpu_sched_jobs - 1);
        other = centity->fences[idx];
-       if (other)
-               BUG_ON(!dma_fence_is_signaled(other));
+       WARN_ON(other && !dma_fence_is_signaled(other));
 
        dma_fence_get(fence);
 
@@ -692,9 +728,11 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
        centity->sequence++;
        spin_unlock(&ctx->ring_lock);
 
+       atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
+                    &ctx->mgr->time_spend[centity->hw_ip]);
+
        dma_fence_put(other);
-       if (handle)
-               *handle = seq;
+       return seq;
 }
 
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@@ -731,7 +769,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
                                           int hw_ip,
                                           int32_t priority)
 {
-       struct amdgpu_device *adev = ctx->adev;
+       struct amdgpu_device *adev = ctx->mgr->adev;
        unsigned int hw_prio;
        struct drm_gpu_scheduler **scheds = NULL;
        unsigned num_scheds;
@@ -796,10 +834,17 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
        return r;
 }
 
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+                        struct amdgpu_device *adev)
 {
+       unsigned int i;
+
+       mgr->adev = adev;
        mutex_init(&mgr->lock);
        idr_init(&mgr->ctx_handles);
+
+       for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+               atomic64_set(&mgr->time_spend[i], 0);
 }
 
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
@@ -875,80 +920,38 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
        mutex_destroy(&mgr->lock);
 }
 
-static void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx,
-               struct amdgpu_ctx_entity *centity, ktime_t *total, ktime_t *max)
-{
-       ktime_t now, t1;
-       uint32_t i;
-
-       *total = *max = 0;
-
-       now = ktime_get();
-       for (i = 0; i < amdgpu_sched_jobs; i++) {
-               struct dma_fence *fence;
-               struct drm_sched_fence *s_fence;
-
-               spin_lock(&ctx->ring_lock);
-               fence = dma_fence_get(centity->fences[i]);
-               spin_unlock(&ctx->ring_lock);
-               if (!fence)
-                       continue;
-               s_fence = to_drm_sched_fence(fence);
-               if (!dma_fence_is_signaled(&s_fence->scheduled)) {
-                       dma_fence_put(fence);
-                       continue;
-               }
-               t1 = s_fence->scheduled.timestamp;
-               if (!ktime_before(t1, now)) {
-                       dma_fence_put(fence);
-                       continue;
-               }
-               if (dma_fence_is_signaled(&s_fence->finished) &&
-                       s_fence->finished.timestamp < now)
-                       *total += ktime_sub(s_fence->finished.timestamp, t1);
-               else
-                       *total += ktime_sub(now, t1);
-               t1 = ktime_sub(now, t1);
-               dma_fence_put(fence);
-               *max = max(t1, *max);
-       }
-}
-
-ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
-               uint32_t idx, uint64_t *elapsed)
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+                         ktime_t usage[AMDGPU_HW_IP_NUM])
 {
-       struct idr *idp;
        struct amdgpu_ctx *ctx;
+       unsigned int hw_ip, i;
        uint32_t id;
-       struct amdgpu_ctx_entity *centity;
-       ktime_t total = 0, max = 0;
 
-       if (idx >= AMDGPU_MAX_ENTITY_NUM)
-               return 0;
-       idp = &mgr->ctx_handles;
+       /*
+        * This is a little bit racy because it can be that a ctx or a fence are
+        * destroyed just in the moment we try to account them. But that is ok
+        * since exactly that case is explicitely allowed by the interface.
+        */
        mutex_lock(&mgr->lock);
-       idr_for_each_entry(idp, ctx, id) {
-               ktime_t ttotal, tmax;
+       for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+               uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
 
-               if (!ctx->entities[hwip][idx])
-                       continue;
-
-               centity = ctx->entities[hwip][idx];
-               amdgpu_ctx_fence_time(ctx, centity, &ttotal, &tmax);
+               usage[hw_ip] = ns_to_ktime(ns);
+       }
 
-               /* Harmonic mean approximation diverges for very small
-                * values. If ratio < 0.01% ignore
-                */
-               if (AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(tmax, ttotal))
-                       continue;
+       idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
+               for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+                       for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
+                               struct amdgpu_ctx_entity *centity;
+                               ktime_t spend;
 
-               total = ktime_add(total, ttotal);
-               max = ktime_after(tmax, max) ? tmax : max;
+                               centity = ctx->entities[hw_ip][i];
+                               if (!centity)
+                                       continue;
+                               spend = amdgpu_ctx_entity_time(ctx, centity);
+                               usage[hw_ip] = ktime_add(usage[hw_ip], spend);
+                       }
+               }
        }
-
        mutex_unlock(&mgr->lock);
-       if (elapsed)
-               *elapsed = max;
-
-       return total;
 }
index 142f2f8..cc7c8af 100644 (file)
 #ifndef __AMDGPU_CTX_H__
 #define __AMDGPU_CTX_H__
 
+#include <linux/ktime.h>
+#include <linux/types.h>
+
 #include "amdgpu_ring.h"
 
 struct drm_device;
 struct drm_file;
 struct amdgpu_fpriv;
+struct amdgpu_ctx_mgr;
 
 #define AMDGPU_MAX_ENTITY_NUM 4
-#define AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(max, total) ((max) > 16384ULL*(total))
 
 struct amdgpu_ctx_entity {
+       uint32_t                hw_ip;
        uint64_t                sequence;
        struct drm_sched_entity entity;
        struct dma_fence        *fences[];
@@ -40,7 +44,7 @@ struct amdgpu_ctx_entity {
 
 struct amdgpu_ctx {
        struct kref                     refcount;
-       struct amdgpu_device            *adev;
+       struct amdgpu_ctx_mgr           *mgr;
        unsigned                        reset_counter;
        unsigned                        reset_counter_query;
        uint32_t                        vram_lost_counter;
@@ -61,6 +65,7 @@ struct amdgpu_ctx_mgr {
        struct mutex            lock;
        /* protected by lock */
        struct idr              ctx_handles;
+       atomic64_t              time_spend[AMDGPU_HW_IP_NUM];
 };
 
 extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
@@ -70,9 +75,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 
 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
                          u32 ring, struct drm_sched_entity **entity);
-void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
-                         struct drm_sched_entity *entity,
-                         struct dma_fence *fence, uint64_t *seq);
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+                             struct drm_sched_entity *entity,
+                             struct dma_fence *fence);
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
                                       struct drm_sched_entity *entity,
                                       uint64_t seq);
@@ -85,10 +90,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
                               struct drm_sched_entity *entity);
 
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+                        struct amdgpu_device *adev);
 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
-               uint32_t idx, uint64_t *elapsed);
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+                         ktime_t usage[AMDGPU_HW_IP_NUM]);
+
 #endif
index 9af8d7a..625424f 100644 (file)
@@ -1556,9 +1556,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 
        adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 
-       amdgpu_gmc_tmz_set(adev);
-
-
        return 0;
 }
 
@@ -3701,6 +3698,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r)
                return r;
 
+       /* Enable TMZ based on IP_VERSION */
+       amdgpu_gmc_tmz_set(adev);
+
        amdgpu_gmc_noretry_set(adev);
        /* Need to get xgmi info early to decide the reset behavior*/
        if (adev->gmc.xgmi.supported) {
@@ -5219,6 +5219,10 @@ retry:   /* Rest of adevs pre asic reset from XGMI hive. */
                r = amdgpu_device_reset_sriov(adev, job ? false : true);
                if (r)
                        adev->asic_reset_res = r;
+
+               /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
+               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+                       amdgpu_ras_resume(adev);
        } else {
                r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
                if (r && r == -EAGAIN)
index 881570d..47f0344 100644 (file)
@@ -1130,13 +1130,24 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
                                        ip->revision & 0xc0;
                                ip->revision &= ~0xc0;
-                               adev->vcn.num_vcn_inst++;
+                               if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
+                                       adev->vcn.num_vcn_inst++;
+                               else
+                                       dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
+                                               adev->vcn.num_vcn_inst + 1,
+                                               AMDGPU_MAX_VCN_INSTANCES);
                        }
                        if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
                            le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
                            le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
-                           le16_to_cpu(ip->hw_id) == SDMA3_HWID)
-                               adev->sdma.num_instances++;
+                           le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
+                               if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
+                                       adev->sdma.num_instances++;
+                               else
+                                       dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
+                                               adev->sdma.num_instances + 1,
+                                               AMDGPU_MAX_SDMA_INSTANCES);
+                       }
 
                        if (le16_to_cpu(ip->hw_id) == UMC_HWID)
                                adev->gmc.num_umc++;
@@ -1361,7 +1372,7 @@ union mall_info {
        struct mall_info_v1_0 v1;
 };
 
-int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
        union mall_info *mall_info;
index 8592d43..8890300 100644 (file)
  * - 3.43.0 - Add device hot plug/unplug support
  * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
  * - 3.45.0 - Add context ioctl stable pstate interface
- * * 3.46.0 - To enable hot plug amdgpu tests in libdrm
+ * - 3.46.0 - To enable hot plug amdgpu tests in libdrm
+ * * 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       46
+#define KMS_DRIVER_MINOR       47
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit;
@@ -1940,6 +1941,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
        {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
        {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
        {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
 
        { PCI_DEVICE(0x1002, PCI_ANY_ID),
index 5a6857c..99a7855 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
 
 #include "amdgpu.h"
 #include "amdgpu_vm.h"
@@ -54,58 +55,49 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = {
 
 void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
 {
-       struct amdgpu_fpriv *fpriv;
-       uint32_t bus, dev, fn, i, domain;
-       uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
        struct drm_file *file = f->private_data;
        struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
-       struct amdgpu_bo *root;
+       struct amdgpu_fpriv *fpriv = file->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
+
+       uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
+       ktime_t usage[AMDGPU_HW_IP_NUM];
+       uint32_t bus, dev, fn, domain;
+       unsigned int hw_ip;
        int ret;
 
-       ret = amdgpu_file_to_fpriv(f, &fpriv);
-       if (ret)
-               return;
        bus = adev->pdev->bus->number;
        domain = pci_domain_nr(adev->pdev->bus);
        dev = PCI_SLOT(adev->pdev->devfn);
        fn = PCI_FUNC(adev->pdev->devfn);
 
-       root = amdgpu_bo_ref(fpriv->vm.root.bo);
-       if (!root)
+       ret = amdgpu_bo_reserve(vm->root.bo, false);
+       if (ret)
                return;
 
-       ret = amdgpu_bo_reserve(root, false);
-       if (ret) {
-               DRM_ERROR("Fail to reserve bo\n");
-               return;
-       }
-       amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
-       amdgpu_bo_unreserve(root);
-       amdgpu_bo_unref(&root);
+       amdgpu_vm_get_memory(vm, &vram_mem, &gtt_mem, &cpu_mem);
+       amdgpu_bo_unreserve(vm->root.bo);
 
-       seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
-                       dev, fn, fpriv->vm.pasid);
-       seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
-       seq_printf(m, "gtt mem:\t%llu kB\n", gtt_mem/1024UL);
-       seq_printf(m, "cpu mem:\t%llu kB\n", cpu_mem/1024UL);
-       for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
-               uint32_t count = amdgpu_ctx_num_entities[i];
-               int idx = 0;
-               uint64_t total = 0, min = 0;
-               uint32_t perc, frac;
+       amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
 
-               for (idx = 0; idx < count; idx++) {
-                       total = amdgpu_ctx_mgr_fence_usage(&fpriv->ctx_mgr,
-                               i, idx, &min);
-                       if ((total == 0) || (min == 0))
-                               continue;
+       /*
+        * ******************************************************************
+        * For text output format description please see drm-usage-stats.rst!
+        * ******************************************************************
+        */
 
-                       perc = div64_u64(10000 * total, min);
-                       frac = perc % 100;
+       seq_printf(m, "pasid:\t%u\n", fpriv->vm.pasid);
+       seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name);
+       seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
+       seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
+       seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL);
+       seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL);
+       seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL);
+       for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+               if (!usage[hw_ip])
+                       continue;
 
-                       seq_printf(m, "%s%d:\t%d.%d%%\n",
-                                       amdgpu_ip_name[i],
-                                       idx, perc/100, frac);
-               }
+               seq_printf(m, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip],
+                          ktime_to_ns(usage[hw_ip]));
        }
 }
index 6525712..8ef31d6 100644 (file)
@@ -296,8 +296,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
                      AMDGPU_GEM_CREATE_VRAM_CLEARED |
                      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
                      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
-                     AMDGPU_GEM_CREATE_ENCRYPTED))
-
+                     AMDGPU_GEM_CREATE_ENCRYPTED |
+                     AMDGPU_GEM_CREATE_DISCARDABLE))
                return -EINVAL;
 
        /* reject invalid gem domains */
@@ -645,6 +645,8 @@ uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
                pte_flag |= AMDGPU_PTE_WRITEABLE;
        if (flags & AMDGPU_VM_PAGE_PRT)
                pte_flag |= AMDGPU_PTE_PRT;
+       if (flags & AMDGPU_VM_PAGE_NOALLOC)
+               pte_flag |= AMDGPU_PTE_NOALLOC;
 
        if (adev->gmc.gmc_funcs->map_mtype)
                pte_flag |= amdgpu_gmc_map_mtype(adev,
@@ -658,7 +660,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 {
        const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
                AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
-               AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
+               AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
+               AMDGPU_VM_PAGE_NOALLOC;
        const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
                AMDGPU_VM_PAGE_PRT;
 
index 88b852b..798c562 100644 (file)
@@ -512,9 +512,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
  */
 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
 {
-       switch (adev->asic_type) {
-       case CHIP_RAVEN:
-       case CHIP_RENOIR:
+       switch (adev->ip_versions[GC_HWIP][0]) {
+       /* RAVEN */
+       case IP_VERSION(9, 2, 2):
+       case IP_VERSION(9, 1, 0):
+       /* RENOIR looks like RAVEN */
+       case IP_VERSION(9, 3, 0):
                if (amdgpu_tmz == 0) {
                        adev->gmc.tmz_enabled = false;
                        dev_info(adev->dev,
@@ -525,12 +528,20 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
                                 "Trusted Memory Zone (TMZ) feature enabled\n");
                }
                break;
-       case CHIP_NAVI10:
-       case CHIP_NAVI14:
-       case CHIP_NAVI12:
-       case CHIP_VANGOGH:
-       case CHIP_YELLOW_CARP:
-       case CHIP_IP_DISCOVERY:
+       case IP_VERSION(10, 1, 10):
+       case IP_VERSION(10, 1, 1):
+       case IP_VERSION(10, 1, 2):
+       case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 3, 0):
+       case IP_VERSION(10, 3, 2):
+       case IP_VERSION(10, 3, 4):
+       case IP_VERSION(10, 3, 5):
+       /* VANGOGH */
+       case IP_VERSION(10, 3, 1):
+       /* YELLOW_CARP*/
+       case IP_VERSION(10, 3, 3):
+       /* GC 10.3.7 */
+       case IP_VERSION(10, 3, 7):
                /* Don't enable it by default yet.
                 */
                if (amdgpu_tmz < 1) {
index 497478f..801f6fa 100644 (file)
@@ -1152,7 +1152,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        mutex_init(&fpriv->bo_list_lock);
        idr_init(&fpriv->bo_list_handles);
 
-       amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
+       amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
 
        file_priv->driver_priv = fpriv;
        goto out_suspend;
index 5444515..2c82b1d 100644 (file)
@@ -567,6 +567,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
                bp->domain;
        bo->allowed_domains = bo->preferred_domains;
        if (bp->type != ttm_bo_type_kernel &&
+           !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
            bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
                bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 
@@ -1018,7 +1019,9 @@ static const char *amdgpu_vram_names[] = {
        "DDR3",
        "DDR4",
        "GDDR6",
-       "DDR5"
+       "DDR5",
+       "LPDDR4",
+       "LPDDR5"
 };
 
 /**
index 4c9cbdc..147b79c 100644 (file)
@@ -41,7 +41,6 @@
 
 /* BO flag to indicate a KFD userptr BO */
 #define AMDGPU_AMDKFD_CREATE_USERPTR_BO        (1ULL << 63)
-#define AMDGPU_AMDKFD_CREATE_SVM_BO    (1ULL << 62)
 
 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
 #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
index 214e4e8..e9411c2 100644 (file)
@@ -1177,7 +1177,7 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
        psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
        psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
 
-       if (!psp->xgmi_context.context.initialized) {
+       if (!psp->xgmi_context.context.mem_context.shared_buf) {
                ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
                if (ret)
                        return ret;
index 035891e..2de9309 100644 (file)
@@ -726,7 +726,9 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
        /* Do not enable if it is not allowed. */
        WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
 
-       if (!amdgpu_ras_intr_triggered()) {
+       /* Only enable ras feature operation handle on host side */
+       if (!amdgpu_sriov_vf(adev) &&
+               !amdgpu_ras_intr_triggered()) {
                ret = psp_ras_enable_features(&adev->psp, info, enable);
                if (ret) {
                        dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
@@ -1523,7 +1525,9 @@ static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
  */
 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
 {
-       if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
+       /* Fatal error events are handled on host side */
+       if (amdgpu_sriov_vf(adev) ||
+               !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
                return;
 
        if (adev->nbio.ras &&
@@ -2270,10 +2274,14 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
 {
        adev->ras_hw_enabled = adev->ras_enabled = 0;
 
-       if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
+       if (!adev->is_atom_fw ||
            !amdgpu_ras_asic_supported(adev))
                return;
 
+       if (!(amdgpu_sriov_vf(adev) &&
+               (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))))
+               return;
+
        if (!adev->gmc.xgmi.connected_to_cpu) {
                if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
                        dev_info(adev->dev, "MEM ECC is active.\n");
@@ -2285,15 +2293,21 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
 
                if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
                        dev_info(adev->dev, "SRAM ECC is active.\n");
-                       adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
-                                                   1 << AMDGPU_RAS_BLOCK__DF);
-
-                       if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
-                               adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
-                                               1 << AMDGPU_RAS_BLOCK__JPEG);
-                       else
-                               adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
-                                               1 << AMDGPU_RAS_BLOCK__JPEG);
+                       if (!amdgpu_sriov_vf(adev)) {
+                               adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+                                                           1 << AMDGPU_RAS_BLOCK__DF);
+
+                               if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
+                                       adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+                                                       1 << AMDGPU_RAS_BLOCK__JPEG);
+                               else
+                                       adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+                                                       1 << AMDGPU_RAS_BLOCK__JPEG);
+                       } else {
+                               adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
+                                                               1 << AMDGPU_RAS_BLOCK__SDMA |
+                                                               1 << AMDGPU_RAS_BLOCK__GFX);
+                       }
                } else {
                        dev_info(adev->dev, "SRAM ECC is not presented.\n");
                }
@@ -2637,6 +2651,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
        struct amdgpu_ras_block_object *obj;
        int r;
 
+       /* Guest side doesn't need init ras feature */
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
                if (!node->ras_obj) {
                        dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
index 8e221a1..42c1f05 100644 (file)
@@ -124,6 +124,10 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
                struct amdgpu_iv_entry *entry)
 {
        kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+       if (amdgpu_sriov_vf(adev))
+               return AMDGPU_RAS_SUCCESS;
+
        amdgpu_ras_reset_gpu(adev);
 
        return AMDGPU_RAS_SUCCESS;
index ec26edd..be6f76a 100644 (file)
@@ -117,7 +117,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
        }
 
        abo = ttm_to_amdgpu_bo(bo);
-       if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
+       if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
                placement->num_placement = 0;
                placement->num_busy_placement = 0;
                return;
index 65a4126..c5f46d2 100644 (file)
@@ -5111,7 +5111,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
        mutex_unlock(&adev->srbm_mutex);
 
        /* Initialize all compute VMIDs to have no GDS, GWS, or OA
-          acccess. These should be enabled by FW for target VMIDs. */
+          access. These should be enabled by FW for target VMIDs. */
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
@@ -6898,7 +6898,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
                            (order_base_2(prop->queue_size / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
-                           ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+                           (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
 #ifdef __BIG_ENDIAN
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
 #endif
@@ -6919,23 +6919,6 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
        mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
 
-       tmp = 0;
-       /* enable the doorbell if requested */
-       if (prop->use_doorbell) {
-               tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                               DOORBELL_OFFSET, prop->doorbell_index);
-
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                                   DOORBELL_EN, 1);
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                                   DOORBELL_SOURCE, 0);
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                                   DOORBELL_HIT, 0);
-       }
-
-       mqd->cp_hqd_pq_doorbell_control = tmp;
-
        /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
        mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
 
@@ -6973,20 +6956,6 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
        /* disable wptr polling */
        WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
 
-       /* write the EOP addr */
-       WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
-              mqd->cp_hqd_eop_base_addr_lo);
-       WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
-              mqd->cp_hqd_eop_base_addr_hi);
-
-       /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
-       WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
-              mqd->cp_hqd_eop_control);
-
-       /* enable doorbell? */
-       WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
-              mqd->cp_hqd_pq_doorbell_control);
-
        /* disable the queue if it's active */
        if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
                WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
@@ -7005,6 +6974,19 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
                       mqd->cp_hqd_pq_wptr_hi);
        }
 
+       /* disable doorbells */
+       WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+       /* write the EOP addr */
+       WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
+              mqd->cp_hqd_eop_base_addr_lo);
+       WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
+              mqd->cp_hqd_eop_base_addr_hi);
+
+       /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+       WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
+              mqd->cp_hqd_eop_control);
+
        /* set the pointer to the MQD */
        WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
               mqd->cp_mqd_base_addr_lo);
index 8773cbd..8c0a3fc 100644 (file)
@@ -4082,7 +4082,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
                            (order_base_2(prop->queue_size / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
-                           ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+                           (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
index 90f6421..7f0b18b 100644 (file)
@@ -3714,7 +3714,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
        mutex_unlock(&adev->srbm_mutex);
 
        /* Initialize all compute VMIDs to have no GDS, GWS, or OA
-          acccess. These should be enabled by FW for target VMIDs. */
+          access. These should be enabled by FW for target VMIDs. */
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
                WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
@@ -4490,7 +4490,7 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
                            (order_base_2(ring->ring_size / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
-                       ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+                       (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
 #ifdef __BIG_ENDIAN
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
 #endif
@@ -5815,7 +5815,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
                /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
                gfx_v8_0_wait_for_rlc_serdes(adev);
 
-               /* write cmd to Set CGCG Overrride */
+               /* write cmd to Set CGCG Override */
                gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
 
                /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
index 83639b5..5349ca4 100644 (file)
@@ -2535,7 +2535,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
        mutex_unlock(&adev->srbm_mutex);
 
        /* Initialize all compute VMIDs to have no GDS, GWS, or OA
-          acccess. These should be enabled by FW for target VMIDs. */
+          access. These should be enabled by FW for target VMIDs. */
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
@@ -3514,7 +3514,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
                            (order_base_2(ring->ring_size / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
-                       ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+                       (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
 #ifdef __BIG_ENDIAN
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
 #endif
@@ -3535,23 +3535,6 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
        mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
 
-       tmp = 0;
-       /* enable the doorbell if requested */
-       if (ring->use_doorbell) {
-               tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                               DOORBELL_OFFSET, ring->doorbell_index);
-
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                                        DOORBELL_EN, 1);
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                                        DOORBELL_SOURCE, 0);
-               tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
-                                        DOORBELL_HIT, 0);
-       }
-
-       mqd->cp_hqd_pq_doorbell_control = tmp;
-
        /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
        ring->wptr = 0;
        mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
index b8c7978..9077dfc 100644 (file)
@@ -613,6 +613,9 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
        *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
        *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
 
+       *flags &= ~AMDGPU_PTE_NOALLOC;
+       *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+
        if (mapping->flags & AMDGPU_PTE_PRT) {
                *flags |= AMDGPU_PTE_PRT;
                *flags |= AMDGPU_PTE_SNOOPED;
index 477f67d..a0c0b7d 100644 (file)
@@ -500,6 +500,9 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
        *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
        *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
 
+       *flags &= ~AMDGPU_PTE_NOALLOC;
+       *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+
        if (mapping->flags & AMDGPU_PTE_PRT) {
                *flags |= AMDGPU_PTE_PRT;
                *flags |= AMDGPU_PTE_SNOOPED;
index d6d79e9..9e1ef81 100644 (file)
 MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin");
 MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin");
 MODULE_FIRMWARE("amdgpu/aldebaran_cap.bin");
-MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin");
 MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin");
 MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin");
-MODULE_FIRMWARE("amdgpu/psp_13_0_5_asd.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_5_toc.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
-MODULE_FIRMWARE("amdgpu/psp_13_0_8_asd.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
@@ -85,17 +82,17 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
                err = psp_init_sos_microcode(psp, chip_name);
                if (err)
                        return err;
-               err = psp_init_ta_microcode(&adev->psp, chip_name);
-               if (err)
-                       return err;
+               /* It's not necessary to load ras ta on Guest side */
+               if (!amdgpu_sriov_vf(adev)) {
+                       err = psp_init_ta_microcode(&adev->psp, chip_name);
+                       if (err)
+                               return err;
+               }
                break;
        case IP_VERSION(13, 0, 1):
        case IP_VERSION(13, 0, 3):
        case IP_VERSION(13, 0, 5):
        case IP_VERSION(13, 0, 8):
-               err = psp_init_asd_microcode(psp, chip_name);
-               if (err)
-                       return err;
                err = psp_init_toc_microcode(psp, chip_name);
                if (err)
                        return err;
index c6a8520..9e18a2b 100644 (file)
@@ -42,6 +42,7 @@
 
 #include "soc15.h"
 #include "soc15_common.h"
+#include "soc21.h"
 
 static const struct amd_ip_funcs soc21_common_ip_funcs;
 
index 475f897..60a8164 100644 (file)
@@ -166,7 +166,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
        0x807c847c, 0x806eff6e,
        0x00000400, 0xbf0a757c,
        0xbf85ffef, 0xbf9c0000,
-       0xbf8200cd, 0xbef8007e,
+       0xbf8200ce, 0xbef8007e,
        0x8679ff7f, 0x0000ffff,
        0x8779ff79, 0x00040000,
        0xbefa0080, 0xbefb00ff,
@@ -212,304 +212,310 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
        0x761e0000, 0xe0524100,
        0x761e0100, 0xe0524200,
        0x761e0200, 0xe0524300,
-       0x761e0300, 0xb8f22a05,
-       0x80728172, 0x8e728a72,
-       0xb8f61605, 0x80768176,
-       0x8e768676, 0x80727672,
-       0x80f2c072, 0xb8f31605,
-       0x80738173, 0x8e738473,
-       0x8e7a8273, 0xbefa00ff,
-       0x01000000, 0xbefc0073,
-       0xc031003c, 0x00000072,
-       0x80f2c072, 0xbf8c007f,
-       0x80fc907c, 0xbe802d00,
-       0xbe822d02, 0xbe842d04,
-       0xbe862d06, 0xbe882d08,
-       0xbe8a2d0a, 0xbe8c2d0c,
-       0xbe8e2d0e, 0xbf06807c,
-       0xbf84fff1, 0xb8f22a05,
-       0x80728172, 0x8e728a72,
-       0xb8f61605, 0x80768176,
-       0x8e768676, 0x80727672,
-       0xbefa0084, 0xbefa00ff,
-       0x01000000, 0xc0211cfc,
+       0x761e0300, 0xbf8c0f70,
+       0xb8f22a05, 0x80728172,
+       0x8e728a72, 0xb8f61605,
+       0x80768176, 0x8e768676,
+       0x80727672, 0x80f2c072,
+       0xb8f31605, 0x80738173,
+       0x8e738473, 0x8e7a8273,
+       0xbefa00ff, 0x01000000,
+       0xbefc0073, 0xc031003c,
+       0x00000072, 0x80f2c072,
+       0xbf8c007f, 0x80fc907c,
+       0xbe802d00, 0xbe822d02,
+       0xbe842d04, 0xbe862d06,
+       0xbe882d08, 0xbe8a2d0a,
+       0xbe8c2d0c, 0xbe8e2d0e,
+       0xbf06807c, 0xbf84fff1,
+       0xb8f22a05, 0x80728172,
+       0x8e728a72, 0xb8f61605,
+       0x80768176, 0x8e768676,
+       0x80727672, 0xbefa0084,
+       0xbefa00ff, 0x01000000,
+       0xc0211cfc, 0x00000072,
+       0x80728472, 0xc0211c3c,
        0x00000072, 0x80728472,
-       0xc0211c3c, 0x00000072,
-       0x80728472, 0xc0211c7c,
+       0xc0211c7c, 0x00000072,
+       0x80728472, 0xc0211bbc,
        0x00000072, 0x80728472,
-       0xc0211bbc, 0x00000072,
-       0x80728472, 0xc0211bfc,
+       0xc0211bfc, 0x00000072,
+       0x80728472, 0xc0211d3c,
        0x00000072, 0x80728472,
-       0xc0211d3c, 0x00000072,
-       0x80728472, 0xc0211d7c,
+       0xc0211d7c, 0x00000072,
+       0x80728472, 0xc0211a3c,
        0x00000072, 0x80728472,
-       0xc0211a3c, 0x00000072,
-       0x80728472, 0xc0211a7c,
+       0xc0211a7c, 0x00000072,
+       0x80728472, 0xc0211dfc,
        0x00000072, 0x80728472,
-       0xc0211dfc, 0x00000072,
-       0x80728472, 0xc0211b3c,
+       0xc0211b3c, 0x00000072,
+       0x80728472, 0xc0211b7c,
        0x00000072, 0x80728472,
-       0xc0211b7c, 0x00000072,
-       0x80728472, 0xbf8c007f,
-       0xbefc0073, 0xbefe006e,
-       0xbeff006f, 0x867375ff,
-       0x000003ff, 0xb9734803,
-       0x867375ff, 0xfffff800,
-       0x8f738b73, 0xb973a2c3,
-       0xb977f801, 0x8673ff71,
-       0xf0000000, 0x8f739c73,
-       0x8e739073, 0xbef60080,
-       0x87767376, 0x8673ff71,
-       0x08000000, 0x8f739b73,
-       0x8e738f73, 0x87767376,
-       0x8673ff74, 0x00800000,
-       0x8f739773, 0xb976f807,
-       0x8671ff71, 0x0000ffff,
-       0x86fe7e7e, 0x86ea6a6a,
-       0x8f768374, 0xb976e0c2,
-       0xbf800002, 0xb9740002,
-       0xbf8a0000, 0x95807370,
-       0xbf810000, 0x00000000,
+       0xbf8c007f, 0xbefc0073,
+       0xbefe006e, 0xbeff006f,
+       0x867375ff, 0x000003ff,
+       0xb9734803, 0x867375ff,
+       0xfffff800, 0x8f738b73,
+       0xb973a2c3, 0xb977f801,
+       0x8673ff71, 0xf0000000,
+       0x8f739c73, 0x8e739073,
+       0xbef60080, 0x87767376,
+       0x8673ff71, 0x08000000,
+       0x8f739b73, 0x8e738f73,
+       0x87767376, 0x8673ff74,
+       0x00800000, 0x8f739773,
+       0xb976f807, 0x8671ff71,
+       0x0000ffff, 0x86fe7e7e,
+       0x86ea6a6a, 0x8f768374,
+       0xb976e0c2, 0xbf800002,
+       0xb9740002, 0xbf8a0000,
+       0x95807370, 0xbf810000,
 };
 
 
 static const uint32_t cwsr_trap_gfx9_hex[] = {
-       0xbf820001, 0xbf820248,
-       0xb8f8f802, 0x89788678,
-       0xb8eef801, 0x866eff6e,
-       0x00000800, 0xbf840003,
+       0xbf820001, 0xbf820254,
+       0xb8f8f802, 0x8978ff78,
+       0x00020006, 0xb8fbf803,
        0x866eff78, 0x00002000,
-       0xbf840016, 0xb8fbf803,
+       0xbf840009, 0x866eff6d,
+       0x00ff0000, 0xbf85001e,
        0x866eff7b, 0x00000400,
-       0xbf85003b, 0x866eff7b,
-       0x00000800, 0xbf850003,
-       0x866eff7b, 0x00000100,
-       0xbf84000c, 0x866eff78,
-       0x00002000, 0xbf840005,
-       0xbf8e0010, 0xb8eef803,
-       0x866eff6e, 0x00000400,
-       0xbf84fffb, 0x8778ff78,
-       0x00002000, 0x80ec886c,
-       0x82ed806d, 0xb8eef807,
-       0x866fff6e, 0x001f8000,
-       0x8e6f8b6f, 0x8977ff77,
-       0xfc000000, 0x87776f77,
-       0x896eff6e, 0x001f8000,
-       0xb96ef807, 0xb8faf812,
+       0xbf850051, 0xbf8e0010,
+       0xb8fbf803, 0xbf82fffa,
+       0x866eff7b, 0x00000900,
+       0xbf850015, 0x866eff7b,
+       0x000071ff, 0xbf840008,
+       0x866fff7b, 0x00007080,
+       0xbf840001, 0xbeee1a87,
+       0xb8eff801, 0x8e6e8c6e,
+       0x866e6f6e, 0xbf85000a,
+       0x866eff6d, 0x00ff0000,
+       0xbf850007, 0xb8eef801,
+       0x866eff6e, 0x00000800,
+       0xbf850003, 0x866eff7b,
+       0x00000400, 0xbf850036,
+       0xb8faf807, 0x867aff7a,
+       0x001f8000, 0x8e7a8b7a,
+       0x8977ff77, 0xfc000000,
+       0x87777a77, 0xba7ff807,
+       0x00000000, 0xb8faf812,
        0xb8fbf813, 0x8efa887a,
-       0xc0071bbd, 0x00000000,
-       0xbf8cc07f, 0xc0071ebd,
-       0x00000008, 0xbf8cc07f,
-       0x86ee6e6e, 0xbf840001,
-       0xbe801d6e, 0xb8fbf803,
-       0x867bff7b, 0x000001ff,
+       0xc0031bbd, 0x00000010,
+       0xbf8cc07f, 0x8e6e976e,
+       0x8977ff77, 0x00800000,
+       0x87776e77, 0xc0071bbd,
+       0x00000000, 0xbf8cc07f,
+       0xc0071ebd, 0x00000008,
+       0xbf8cc07f, 0x86ee6e6e,
+       0xbf840001, 0xbe801d6e,
+       0x866eff6d, 0x01ff0000,
+       0xbf850005, 0x8778ff78,
+       0x00002000, 0x80ec886c,
+       0x82ed806d, 0xbf820005,
+       0x866eff6d, 0x01000000,
        0xbf850002, 0x806c846c,
        0x826d806d, 0x866dff6d,
-       0x0000ffff, 0x8f6e8b77,
-       0x866eff6e, 0x001f8000,
-       0xb96ef807, 0x86fe7e7e,
+       0x0000ffff, 0x8f7a8b77,
+       0x867aff7a, 0x001f8000,
+       0xb97af807, 0x86fe7e7e,
        0x86ea6a6a, 0x8f6e8378,
        0xb96ee0c2, 0xbf800002,
        0xb9780002, 0xbe801f6c,
        0x866dff6d, 0x0000ffff,
        0xbefa0080, 0xb97a0283,
-       0xb8fa2407, 0x8e7a9b7a,
-       0x876d7a6d, 0xb8fa03c7,
-       0x8e7a9a7a, 0x876d7a6d,
        0xb8faf807, 0x867aff7a,
-       0x00007fff, 0xb97af807,
-       0xbeee007e, 0xbeef007f,
-       0xbefe0180, 0xbf900004,
-       0x877a8478, 0xb97af802,
-       0xbf8e0002, 0xbf88fffe,
-       0xb8fa2a05, 0x807a817a,
-       0x8e7a8a7a, 0xb8fb1605,
-       0x807b817b, 0x8e7b867b,
-       0x807a7b7a, 0x807a7e7a,
-       0x827b807f, 0x867bff7b,
-       0x0000ffff, 0xc04b1c3d,
-       0x00000050, 0xbf8cc07f,
-       0xc04b1d3d, 0x00000060,
-       0xbf8cc07f, 0xc0431e7d,
-       0x00000074, 0xbf8cc07f,
-       0xbef4007e, 0x8675ff7f,
-       0x0000ffff, 0x8775ff75,
-       0x00040000, 0xbef60080,
-       0xbef700ff, 0x00807fac,
-       0x867aff7f, 0x08000000,
-       0x8f7a837a, 0x87777a77,
-       0x867aff7f, 0x70000000,
-       0x8f7a817a, 0x87777a77,
-       0xbef1007c, 0xbef00080,
-       0xb8f02a05, 0x80708170,
-       0x8e708a70, 0xb8fa1605,
-       0x807a817a, 0x8e7a867a,
-       0x80707a70, 0xbef60084,
-       0xbef600ff, 0x01000000,
-       0xbefe007c, 0xbefc0070,
-       0xc0611c7a, 0x0000007c,
-       0xbf8cc07f, 0x80708470,
-       0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611b3a,
+       0x001f8000, 0x8e7a8b7a,
+       0x8977ff77, 0xfc000000,
+       0x87777a77, 0xba7ff807,
+       0x00000000, 0xbeee007e,
+       0xbeef007f, 0xbefe0180,
+       0xbf900004, 0x877a8478,
+       0xb97af802, 0xbf8e0002,
+       0xbf88fffe, 0xb8fa2a05,
+       0x807a817a, 0x8e7a8a7a,
+       0xb8fb1605, 0x807b817b,
+       0x8e7b867b, 0x807a7b7a,
+       0x807a7e7a, 0x827b807f,
+       0x867bff7b, 0x0000ffff,
+       0xc04b1c3d, 0x00000050,
+       0xbf8cc07f, 0xc04b1d3d,
+       0x00000060, 0xbf8cc07f,
+       0xc0431e7d, 0x00000074,
+       0xbf8cc07f, 0xbef4007e,
+       0x8675ff7f, 0x0000ffff,
+       0x8775ff75, 0x00040000,
+       0xbef60080, 0xbef700ff,
+       0x00807fac, 0xbef1007c,
+       0xbef00080, 0xb8f02a05,
+       0x80708170, 0x8e708a70,
+       0xb8fa1605, 0x807a817a,
+       0x8e7a867a, 0x80707a70,
+       0xbef60084, 0xbef600ff,
+       0x01000000, 0xbefe007c,
+       0xbefc0070, 0xc0611c7a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611b7a, 0x0000007c,
+       0xc0611b3a, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611bba,
+       0xbefc0070, 0xc0611b7a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611bfa, 0x0000007c,
+       0xc0611bba, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611e3a,
-       0x0000007c, 0xbf8cc07f,
-       0x80708470, 0xbefc007e,
-       0xb8fbf803, 0xbefe007c,
-       0xbefc0070, 0xc0611efa,
+       0xbefc0070, 0xc0611bfa,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611a3a, 0x0000007c,
+       0xc0611e3a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0xb8fbf803,
+       0xbefe007c, 0xbefc0070,
+       0xc0611efa, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611a7a,
+       0xbefc0070, 0xc0611a3a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
-       0xb8f1f801, 0xbefe007c,
-       0xbefc0070, 0xc0611c7a,
-       0x0000007c, 0xbf8cc07f,
-       0x80708470, 0xbefc007e,
-       0x867aff7f, 0x04000000,
-       0xbeef0080, 0x876f6f7a,
-       0xb8f02a05, 0x80708170,
-       0x8e708a70, 0xb8fb1605,
-       0x807b817b, 0x8e7b847b,
-       0x8e76827b, 0xbef600ff,
-       0x01000000, 0xbef20174,
-       0x80747074, 0x82758075,
-       0xbefc0080, 0xbf800000,
-       0xbe802b00, 0xbe822b02,
-       0xbe842b04, 0xbe862b06,
-       0xbe882b08, 0xbe8a2b0a,
-       0xbe8c2b0c, 0xbe8e2b0e,
-       0xc06b003a, 0x00000000,
-       0xbf8cc07f, 0xc06b013a,
-       0x00000010, 0xbf8cc07f,
-       0xc06b023a, 0x00000020,
-       0xbf8cc07f, 0xc06b033a,
-       0x00000030, 0xbf8cc07f,
-       0x8074c074, 0x82758075,
-       0x807c907c, 0xbf0a7b7c,
-       0xbf85ffe7, 0xbef40172,
-       0xbef00080, 0xbefe00c1,
-       0xbeff00c1, 0xbee80080,
-       0xbee90080, 0xbef600ff,
-       0x01000000, 0x867aff78,
-       0x00400000, 0xbf850003,
-       0xb8faf803, 0x897a7aff,
-       0x10000000, 0xbf85004d,
-       0xbe840080, 0xd2890000,
-       0x00000900, 0x80048104,
-       0xd2890001, 0x00000900,
-       0x80048104, 0xd2890002,
-       0x00000900, 0x80048104,
-       0xd2890003, 0x00000900,
-       0x80048104, 0xc069003a,
-       0x00000070, 0xbf8cc07f,
-       0x80709070, 0xbf06c004,
-       0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000901,
+       0xbefe007c, 0xbefc0070,
+       0xc0611a7a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0xb8f1f801,
+       0xbefe007c, 0xbefc0070,
+       0xc0611c7a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0x867aff7f,
+       0x04000000, 0xbeef0080,
+       0x876f6f7a, 0xb8f02a05,
+       0x80708170, 0x8e708a70,
+       0xb8fb1605, 0x807b817b,
+       0x8e7b847b, 0x8e76827b,
+       0xbef600ff, 0x01000000,
+       0xbef20174, 0x80747074,
+       0x82758075, 0xbefc0080,
+       0xbf800000, 0xbe802b00,
+       0xbe822b02, 0xbe842b04,
+       0xbe862b06, 0xbe882b08,
+       0xbe8a2b0a, 0xbe8c2b0c,
+       0xbe8e2b0e, 0xc06b003a,
+       0x00000000, 0xbf8cc07f,
+       0xc06b013a, 0x00000010,
+       0xbf8cc07f, 0xc06b023a,
+       0x00000020, 0xbf8cc07f,
+       0xc06b033a, 0x00000030,
+       0xbf8cc07f, 0x8074c074,
+       0x82758075, 0x807c907c,
+       0xbf0a7b7c, 0xbf85ffe7,
+       0xbef40172, 0xbef00080,
+       0xbefe00c1, 0xbeff00c1,
+       0xbee80080, 0xbee90080,
+       0xbef600ff, 0x01000000,
+       0x867aff78, 0x00400000,
+       0xbf850003, 0xb8faf803,
+       0x897a7aff, 0x10000000,
+       0xbf85004d, 0xbe840080,
+       0xd2890000, 0x00000900,
        0x80048104, 0xd2890001,
-       0x00000901, 0x80048104,
-       0xd2890002, 0x00000901,
+       0x00000900, 0x80048104,
+       0xd2890002, 0x00000900,
        0x80048104, 0xd2890003,
-       0x00000901, 0x80048104,
+       0x00000900, 0x80048104,
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
        0xbe840080, 0xd2890000,
-       0x00000902, 0x80048104,
-       0xd2890001, 0x00000902,
+       0x00000901, 0x80048104,
+       0xd2890001, 0x00000901,
        0x80048104, 0xd2890002,
-       0x00000902, 0x80048104,
-       0xd2890003, 0x00000902,
+       0x00000901, 0x80048104,
+       0xd2890003, 0x00000901,
        0x80048104, 0xc069003a,
        0x00000070, 0xbf8cc07f,
        0x80709070, 0xbf06c004,
        0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000903,
+       0xd2890000, 0x00000902,
        0x80048104, 0xd2890001,
-       0x00000903, 0x80048104,
-       0xd2890002, 0x00000903,
+       0x00000902, 0x80048104,
+       0xd2890002, 0x00000902,
        0x80048104, 0xd2890003,
-       0x00000903, 0x80048104,
+       0x00000902, 0x80048104,
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
-       0xbf820008, 0xe0724000,
-       0x701d0000, 0xe0724100,
-       0x701d0100, 0xe0724200,
-       0x701d0200, 0xe0724300,
-       0x701d0300, 0xbefe00c1,
-       0xbeff00c1, 0xb8fb4306,
-       0x867bc17b, 0xbf840063,
-       0xbf8a0000, 0x867aff6f,
-       0x04000000, 0xbf84005f,
-       0x8e7b867b, 0x8e7b827b,
-       0xbef6007b, 0xb8f02a05,
-       0x80708170, 0x8e708a70,
-       0xb8fa1605, 0x807a817a,
-       0x8e7a867a, 0x80707a70,
-       0x8070ff70, 0x00000080,
-       0xbef600ff, 0x01000000,
-       0xbefc0080, 0xd28c0002,
-       0x000100c1, 0xd28d0003,
-       0x000204c1, 0x867aff78,
-       0x00400000, 0xbf850003,
-       0xb8faf803, 0x897a7aff,
-       0x10000000, 0xbf850030,
-       0x24040682, 0xd86e4000,
-       0x00000002, 0xbf8cc07f,
        0xbe840080, 0xd2890000,
-       0x00000900, 0x80048104,
-       0xd2890001, 0x00000900,
+       0x00000903, 0x80048104,
+       0xd2890001, 0x00000903,
        0x80048104, 0xd2890002,
-       0x00000900, 0x80048104,
-       0xd2890003, 0x00000900,
+       0x00000903, 0x80048104,
+       0xd2890003, 0x00000903,
        0x80048104, 0xc069003a,
        0x00000070, 0xbf8cc07f,
        0x80709070, 0xbf06c004,
-       0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000901,
+       0xbf84ffee, 0xbf820008,
+       0xe0724000, 0x701d0000,
+       0xe0724100, 0x701d0100,
+       0xe0724200, 0x701d0200,
+       0xe0724300, 0x701d0300,
+       0xbefe00c1, 0xbeff00c1,
+       0xb8fb4306, 0x867bc17b,
+       0xbf840063, 0xbf8a0000,
+       0x867aff6f, 0x04000000,
+       0xbf84005f, 0x8e7b867b,
+       0x8e7b827b, 0xbef6007b,
+       0xb8f02a05, 0x80708170,
+       0x8e708a70, 0xb8fa1605,
+       0x807a817a, 0x8e7a867a,
+       0x80707a70, 0x8070ff70,
+       0x00000080, 0xbef600ff,
+       0x01000000, 0xbefc0080,
+       0xd28c0002, 0x000100c1,
+       0xd28d0003, 0x000204c1,
+       0x867aff78, 0x00400000,
+       0xbf850003, 0xb8faf803,
+       0x897a7aff, 0x10000000,
+       0xbf850030, 0x24040682,
+       0xd86e4000, 0x00000002,
+       0xbf8cc07f, 0xbe840080,
+       0xd2890000, 0x00000900,
        0x80048104, 0xd2890001,
-       0x00000901, 0x80048104,
-       0xd2890002, 0x00000901,
+       0x00000900, 0x80048104,
+       0xd2890002, 0x00000900,
        0x80048104, 0xd2890003,
-       0x00000901, 0x80048104,
+       0x00000900, 0x80048104,
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
-       0x680404ff, 0x00000200,
+       0xbe840080, 0xd2890000,
+       0x00000901, 0x80048104,
+       0xd2890001, 0x00000901,
+       0x80048104, 0xd2890002,
+       0x00000901, 0x80048104,
+       0xd2890003, 0x00000901,
+       0x80048104, 0xc069003a,
+       0x00000070, 0xbf8cc07f,
+       0x80709070, 0xbf06c004,
+       0xbf84ffee, 0x680404ff,
+       0x00000200, 0xd0c9006a,
+       0x0000f702, 0xbf87ffd2,
+       0xbf820015, 0xd1060002,
+       0x00011103, 0x7e0602ff,
+       0x00000200, 0xbefc00ff,
+       0x00010000, 0xbe800077,
+       0x8677ff77, 0xff7fffff,
+       0x8777ff77, 0x00058000,
+       0xd8ec0000, 0x00000002,
+       0xbf8cc07f, 0xe0765000,
+       0x701d0002, 0x68040702,
        0xd0c9006a, 0x0000f702,
-       0xbf87ffd2, 0xbf820015,
-       0xd1060002, 0x00011103,
-       0x7e0602ff, 0x00000200,
-       0xbefc00ff, 0x00010000,
-       0xbe800077, 0x8677ff77,
-       0xff7fffff, 0x8777ff77,
-       0x00058000, 0xd8ec0000,
-       0x00000002, 0xbf8cc07f,
-       0xe0765000, 0x701d0002,
-       0x68040702, 0xd0c9006a,
-       0x0000f702, 0xbf87fff7,
-       0xbef70000, 0xbef000ff,
-       0x00000400, 0xbefe00c1,
-       0xbeff00c1, 0xb8fb2a05,
-       0x807b817b, 0x8e7b827b,
-       0x8e76887b, 0xbef600ff,
+       0xbf87fff7, 0xbef70000,
+       0xbef000ff, 0x00000400,
+       0xbefe00c1, 0xbeff00c1,
+       0xb8fb2a05, 0x807b817b,
+       0x8e7b827b, 0xbef600ff,
        0x01000000, 0xbefc0084,
        0xbf0a7b7c, 0xbf84006d,
        0xbf11017c, 0x807bff7b,
@@ -566,15 +572,11 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
        0x701d0300, 0x807c847c,
        0x8070ff70, 0x00000400,
        0xbf0a7b7c, 0xbf85ffef,
-       0xbf9c0000, 0xbf8200da,
+       0xbf9c0000, 0xbf8200c7,
        0xbef4007e, 0x8675ff7f,
        0x0000ffff, 0x8775ff75,
        0x00040000, 0xbef60080,
        0xbef700ff, 0x00807fac,
-       0x866eff7f, 0x08000000,
-       0x8f6e836e, 0x87776e77,
-       0x866eff7f, 0x70000000,
-       0x8f6e816e, 0x87776e77,
        0x866eff7f, 0x04000000,
        0xbf84001e, 0xbefe00c1,
        0xbeff00c1, 0xb8ef4306,
@@ -591,33 +593,33 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
        0x781d0000, 0x807cff7c,
        0x00000200, 0x8078ff78,
        0x00000200, 0xbf0a6f7c,
-       0xbf85fff6, 0xbef80080,
-       0xbefe00c1, 0xbeff00c1,
-       0xb8ef2a05, 0x806f816f,
-       0x8e6f826f, 0x8e76886f,
-       0xbef600ff, 0x01000000,
-       0xbeee0078, 0x8078ff78,
-       0x00000400, 0xbefc0084,
-       0xbf11087c, 0x806fff6f,
-       0x00008000, 0xe0524000,
-       0x781d0000, 0xe0524100,
-       0x781d0100, 0xe0524200,
-       0x781d0200, 0xe0524300,
-       0x781d0300, 0xbf8c0f70,
-       0x7e000300, 0x7e020301,
-       0x7e040302, 0x7e060303,
-       0x807c847c, 0x8078ff78,
-       0x00000400, 0xbf0a6f7c,
-       0xbf85ffee, 0xbf9c0000,
-       0xe0524000, 0x6e1d0000,
-       0xe0524100, 0x6e1d0100,
-       0xe0524200, 0x6e1d0200,
-       0xe0524300, 0x6e1d0300,
-       0xb8f82a05, 0x80788178,
-       0x8e788a78, 0xb8ee1605,
-       0x806e816e, 0x8e6e866e,
-       0x80786e78, 0x80f8c078,
-       0xb8ef1605, 0x806f816f,
+       0xbf85fff6, 0xbefe00c1,
+       0xbeff00c1, 0xbef600ff,
+       0x01000000, 0xb8ef2a05,
+       0x806f816f, 0x8e6f826f,
+       0x806fff6f, 0x00008000,
+       0xbef80080, 0xbeee0078,
+       0x8078ff78, 0x00000400,
+       0xbefc0084, 0xbf11087c,
+       0xe0524000, 0x781d0000,
+       0xe0524100, 0x781d0100,
+       0xe0524200, 0x781d0200,
+       0xe0524300, 0x781d0300,
+       0xbf8c0f70, 0x7e000300,
+       0x7e020301, 0x7e040302,
+       0x7e060303, 0x807c847c,
+       0x8078ff78, 0x00000400,
+       0xbf0a6f7c, 0xbf85ffee,
+       0xbf9c0000, 0xe0524000,
+       0x6e1d0000, 0xe0524100,
+       0x6e1d0100, 0xe0524200,
+       0x6e1d0200, 0xe0524300,
+       0x6e1d0300, 0xbf8c0f70,
+       0xb8f82a05, 0x80788178,
+       0x8e788a78, 0xb8ee1605,
+       0x806e816e, 0x8e6e866e,
+       0x80786e78, 0x80f8c078,
+       0xb8ef1605, 0x806f816f,
        0x8e6f846f, 0x8e76826f,
        0xbef600ff, 0x01000000,
        0xbefc006f, 0xc031003a,
@@ -663,90 +665,101 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
        0xc00b1c37, 0x00000050,
        0xc00b1d37, 0x00000060,
        0xc0031e77, 0x00000074,
-       0xbf8cc07f, 0x866fff6d,
-       0xf8000000, 0x8f6f9b6f,
-       0x8e6f906f, 0xbeee0080,
-       0x876e6f6e, 0x866fff6d,
-       0x04000000, 0x8f6f9a6f,
-       0x8e6f8f6f, 0x876e6f6e,
-       0x866fff7a, 0x00800000,
-       0x8f6f976f, 0xb96ef807,
-       0x866dff6d, 0x0000ffff,
-       0x86fe7e7e, 0x86ea6a6a,
-       0x8f6e837a, 0xb96ee0c2,
-       0xbf800002, 0xb97a0002,
-       0xbf8a0000, 0x95806f6c,
-       0xbf810000, 0x00000000,
+       0xbf8cc07f, 0x8f6e8b77,
+       0x866eff6e, 0x001f8000,
+       0xb96ef807, 0x866dff6d,
+       0x0000ffff, 0x86fe7e7e,
+       0x86ea6a6a, 0x8f6e837a,
+       0xb96ee0c2, 0xbf800002,
+       0xb97a0002, 0xbf8a0000,
+       0xbe801f6c, 0xbf810000,
 };
 
 static const uint32_t cwsr_trap_nv1x_hex[] = {
-       0xbf820001, 0xbf8201cd,
+       0xbf820001, 0xbf8201f1,
        0xb0804004, 0xb978f802,
-       0x8a788678, 0xb96ef801,
-       0x876eff6e, 0x00000800,
-       0xbf840003, 0x876eff78,
+       0x8a78ff78, 0x00020006,
+       0xb97bf803, 0x876eff78,
        0x00002000, 0xbf840009,
-       0xb97bf803, 0x876eff7b,
-       0x00000400, 0xbf850033,
-       0x876eff7b, 0x00000100,
-       0xbf840002, 0x8878ff78,
-       0x00002000, 0x8a77ff77,
-       0xff000000, 0xb96ef807,
-       0x876fff6e, 0x02000000,
-       0x8f6f866f, 0x88776f77,
-       0x876fff6e, 0x003f8000,
-       0x8f6f896f, 0x88776f77,
-       0x8a6eff6e, 0x023f8000,
-       0xb9eef807, 0xb97af812,
+       0x876eff6d, 0x00ff0000,
+       0xbf85001e, 0x876eff7b,
+       0x00000400, 0xbf850057,
+       0xbf8e0010, 0xb97bf803,
+       0xbf82fffa, 0x876eff7b,
+       0x00000900, 0xbf850015,
+       0x876eff7b, 0x000071ff,
+       0xbf840008, 0x876fff7b,
+       0x00007080, 0xbf840001,
+       0xbeee1d87, 0xb96ff801,
+       0x8f6e8c6e, 0x876e6f6e,
+       0xbf85000a, 0x876eff6d,
+       0x00ff0000, 0xbf850007,
+       0xb96ef801, 0x876eff6e,
+       0x00000800, 0xbf850003,
+       0x876eff7b, 0x00000400,
+       0xbf85003c, 0x8a77ff77,
+       0xff000000, 0xb97af807,
+       0x877bff7a, 0x02000000,
+       0x8f7b867b, 0x88777b77,
+       0x877bff7a, 0x003f8000,
+       0x8f7b897b, 0x88777b77,
+       0x8a7aff7a, 0x023f8000,
+       0xb9faf807, 0xb97af812,
        0xb97bf813, 0x8ffa887a,
-       0xf4051bbd, 0xfa000000,
-       0xbf8cc07f, 0xf4051ebd,
-       0xfa000008, 0xbf8cc07f,
-       0x87ee6e6e, 0xbf840001,
-       0xbe80206e, 0xb97bf803,
-       0x877bff7b, 0x000001ff,
+       0xf4011bbd, 0xfa000010,
+       0xbf8cc07f, 0x8f6e976e,
+       0x8a77ff77, 0x00800000,
+       0x88776e77, 0xf4051bbd,
+       0xfa000000, 0xbf8cc07f,
+       0xf4051ebd, 0xfa000008,
+       0xbf8cc07f, 0x87ee6e6e,
+       0xbf840001, 0xbe80206e,
+       0x876eff6d, 0x01ff0000,
+       0xbf850005, 0x8878ff78,
+       0x00002000, 0x80ec886c,
+       0x82ed806d, 0xbf820005,
+       0x876eff6d, 0x01000000,
        0xbf850002, 0x806c846c,
        0x826d806d, 0x876dff6d,
-       0x0000ffff, 0x906e8977,
-       0x876fff6e, 0x003f8000,
-       0x906e8677, 0x876eff6e,
-       0x02000000, 0x886e6f6e,
-       0xb9eef807, 0x87fe7e7e,
+       0x0000ffff, 0x907a8977,
+       0x877bff7a, 0x003f8000,
+       0x907a8677, 0x877aff7a,
+       0x02000000, 0x887a7b7a,
+       0xb9faf807, 0x87fe7e7e,
        0x87ea6a6a, 0xb9f8f802,
        0xbe80226c, 0x876dff6d,
        0x0000ffff, 0xbefa0380,
-       0xb9fa0283, 0xb97a2c07,
-       0x8f7a9a7a, 0x886d7a6d,
-       0xb97a03c7, 0x8f7a997a,
-       0x886d7a6d, 0xb97a0647,
-       0x8f7a987a, 0x886d7a6d,
-       0xb97af807, 0x877aff7a,
-       0x00007fff, 0xb9faf807,
-       0xbeee037e, 0xbeef037f,
-       0xbefe0480, 0xbf900004,
-       0xbf8e0002, 0xbf88fffe,
-       0xb97b02dc, 0x8f7b997b,
-       0x887b7b7f, 0xb97a2a05,
+       0xb9fa0283, 0x8a77ff77,
+       0xff000000, 0xb97af807,
+       0x877bff7a, 0x02000000,
+       0x8f7b867b, 0x88777b77,
+       0x877bff7a, 0x003f8000,
+       0x8f7b897b, 0x88777b77,
+       0x8a7aff7a, 0x023f8000,
+       0xb9faf807, 0xbeee037e,
+       0xbeef037f, 0xbefe0480,
+       0xbf900004, 0xbf8e0002,
+       0xbf88fffe, 0x877aff7f,
+       0x04000000, 0x8f7a857a,
+       0x886d7a6d, 0xb97b02dc,
+       0x8f7b997b, 0xb97a2a05,
        0x807a817a, 0xbf0d997b,
        0xbf850002, 0x8f7a897a,
        0xbf820001, 0x8f7a8a7a,
-       0x877bff7f, 0x0000ffff,
-       0x807aff7a, 0x00000200,
-       0x807a7e7a, 0x827b807b,
-       0xf4491c3d, 0xfa000050,
-       0xf4491d3d, 0xfa000060,
-       0xf4411e7d, 0xfa000074,
-       0xbef4037e, 0x8775ff7f,
-       0x0000ffff, 0x8875ff75,
-       0x00040000, 0xbef60380,
-       0xbef703ff, 0x10807fac,
-       0x877aff7f, 0x08000000,
-       0x907a837a, 0x88777a77,
-       0x877aff7f, 0x70000000,
-       0x907a817a, 0x88777a77,
-       0xbef1037c, 0xbef00380,
-       0xb97302dc, 0x8f739973,
-       0x8873737f, 0xb97bf816,
+       0xb97b1e06, 0x8f7b8a7b,
+       0x807a7b7a, 0x877bff7f,
+       0x0000ffff, 0x807aff7a,
+       0x00000200, 0x807a7e7a,
+       0x827b807b, 0xf4491c3d,
+       0xfa000050, 0xf4491d3d,
+       0xfa000060, 0xf4411e7d,
+       0xfa000074, 0xbef4037e,
+       0x8775ff7f, 0x0000ffff,
+       0x8875ff75, 0x00040000,
+       0xbef60380, 0xbef703ff,
+       0x10807fac, 0xbef1037c,
+       0xbef00380, 0xb97302dc,
+       0x8f739973, 0xb97bf816,
        0xba80f816, 0x00000000,
        0xbefe03c1, 0x907c9973,
        0x877c817c, 0xbf06817c,
@@ -763,7 +776,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xe0704100, 0x705d0100,
        0xe0704200, 0x705d0200,
        0xe0704300, 0x705d0300,
-       0xb9702a05, 0x80708170,
+       0xb9703a05, 0x80708170,
        0xbf0d9973, 0xbf850002,
        0x8f708970, 0xbf820001,
        0x8f708a70, 0xb97a1e06,
@@ -776,8 +789,9 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xbefe037c, 0xbefc0370,
        0xf4611b3a, 0xf8000000,
        0x80708470, 0xbefc037e,
+       0x8a7aff6d, 0x80000000,
        0xbefe037c, 0xbefc0370,
-       0xf4611b7a, 0xf8000000,
+       0xf4611eba, 0xf8000000,
        0x80708470, 0xbefc037e,
        0xbefe037c, 0xbefc0370,
        0xf4611bba, 0xf8000000,
@@ -838,10 +852,10 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xbf820001, 0xbeff03c1,
        0xb97b4306, 0x877bc17b,
        0xbf840044, 0xbf8a0000,
-       0x877aff73, 0x04000000,
+       0x877aff6d, 0x80000000,
        0xbf840040, 0x8f7b867b,
        0x8f7b827b, 0xbef6037b,
-       0xb9702a05, 0x80708170,
+       0xb9703a05, 0x80708170,
        0xbf0d9973, 0xbf850002,
        0x8f708970, 0xbf820001,
        0x8f708a70, 0xb97a1e06,
@@ -877,7 +891,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xbef003ff, 0x00000200,
        0xbeff0380, 0xbf820003,
        0xbef003ff, 0x00000400,
-       0xbeff03c1, 0xb97b2a05,
+       0xbeff03c1, 0xb97b3a05,
        0x807b817b, 0x8f7b827b,
        0x907c9973, 0x877c817c,
        0xbf06817c, 0xbf850017,
@@ -894,7 +908,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xbf0a7b7c, 0xbf85ffef,
        0xbf820025, 0xbef603ff,
        0x01000000, 0xbefc0384,
-       0xbf0a7b7c, 0xbf840020,
+       0xbf0a7b7c, 0xbf840011,
        0x7e008700, 0x7e028701,
        0x7e048702, 0x7e068703,
        0xe0704000, 0x705d0000,
@@ -911,71 +925,69 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0x705d0000, 0x807c817c,
        0x8070ff70, 0x00000080,
        0xbf0a7b7c, 0xbf85fff8,
-       0xbf820151, 0xbef4037e,
+       0xbf820144, 0xbef4037e,
        0x8775ff7f, 0x0000ffff,
        0x8875ff75, 0x00040000,
        0xbef60380, 0xbef703ff,
-       0x10807fac, 0x876eff7f,
-       0x08000000, 0x906e836e,
-       0x88776e77, 0x876eff7f,
-       0x70000000, 0x906e816e,
-       0x88776e77, 0xb97202dc,
-       0x8f729972, 0x8872727f,
-       0x876eff7f, 0x04000000,
-       0xbf840034, 0xbefe03c1,
-       0x907c9972, 0x877c817c,
-       0xbf06817c, 0xbf850002,
-       0xbeff0380, 0xbf820001,
-       0xbeff03c1, 0xb96f4306,
-       0x876fc16f, 0xbf840029,
-       0x8f6f866f, 0x8f6f826f,
-       0xbef6036f, 0xb9782a05,
-       0x80788178, 0xbf0d9972,
-       0xbf850002, 0x8f788978,
-       0xbf820001, 0x8f788a78,
-       0xb96e1e06, 0x8f6e8a6e,
-       0x80786e78, 0x8078ff78,
-       0x00000200, 0x8078ff78,
-       0x00000080, 0xbef603ff,
-       0x01000000, 0x907c9972,
-       0x877c817c, 0xbf06817c,
-       0xbefc0380, 0xbf850009,
-       0xe0310000, 0x781d0000,
-       0x807cff7c, 0x00000080,
-       0x8078ff78, 0x00000080,
-       0xbf0a6f7c, 0xbf85fff8,
-       0xbf820008, 0xe0310000,
-       0x781d0000, 0x807cff7c,
-       0x00000100, 0x8078ff78,
-       0x00000100, 0xbf0a6f7c,
-       0xbf85fff8, 0xbef80380,
+       0x10807fac, 0xb97202dc,
+       0x8f729972, 0x876eff7f,
+       0x04000000, 0xbf840034,
        0xbefe03c1, 0x907c9972,
        0x877c817c, 0xbf06817c,
        0xbf850002, 0xbeff0380,
        0xbf820001, 0xbeff03c1,
-       0xb96f2a05, 0x806f816f,
-       0x8f6f826f, 0x907c9972,
-       0x877c817c, 0xbf06817c,
-       0xbf850021, 0xbef603ff,
-       0x01000000, 0xbeee0378,
+       0xb96f4306, 0x876fc16f,
+       0xbf840029, 0x8f6f866f,
+       0x8f6f826f, 0xbef6036f,
+       0xb9783a05, 0x80788178,
+       0xbf0d9972, 0xbf850002,
+       0x8f788978, 0xbf820001,
+       0x8f788a78, 0xb96e1e06,
+       0x8f6e8a6e, 0x80786e78,
        0x8078ff78, 0x00000200,
-       0xbefc0384, 0xe0304000,
-       0x785d0000, 0xe0304080,
-       0x785d0100, 0xe0304100,
-       0x785d0200, 0xe0304180,
-       0x785d0300, 0xbf8c3f70,
-       0x7e008500, 0x7e028501,
-       0x7e048502, 0x7e068503,
-       0x807c847c, 0x8078ff78,
-       0x00000200, 0xbf0a6f7c,
-       0xbf85ffee, 0xe0304000,
-       0x6e5d0000, 0xe0304080,
-       0x6e5d0100, 0xe0304100,
-       0x6e5d0200, 0xe0304180,
-       0x6e5d0300, 0xbf820032,
+       0x8078ff78, 0x00000080,
+       0xbef603ff, 0x01000000,
+       0x907c9972, 0x877c817c,
+       0xbf06817c, 0xbefc0380,
+       0xbf850009, 0xe0310000,
+       0x781d0000, 0x807cff7c,
+       0x00000080, 0x8078ff78,
+       0x00000080, 0xbf0a6f7c,
+       0xbf85fff8, 0xbf820008,
+       0xe0310000, 0x781d0000,
+       0x807cff7c, 0x00000100,
+       0x8078ff78, 0x00000100,
+       0xbf0a6f7c, 0xbf85fff8,
+       0xbef80380, 0xbefe03c1,
+       0x907c9972, 0x877c817c,
+       0xbf06817c, 0xbf850002,
+       0xbeff0380, 0xbf820001,
+       0xbeff03c1, 0xb96f3a05,
+       0x806f816f, 0x8f6f826f,
+       0x907c9972, 0x877c817c,
+       0xbf06817c, 0xbf850024,
+       0xbef603ff, 0x01000000,
+       0xbeee0378, 0x8078ff78,
+       0x00000200, 0xbefc0384,
+       0xbf0a6f7c, 0xbf840050,
+       0xe0304000, 0x785d0000,
+       0xe0304080, 0x785d0100,
+       0xe0304100, 0x785d0200,
+       0xe0304180, 0x785d0300,
+       0xbf8c3f70, 0x7e008500,
+       0x7e028501, 0x7e048502,
+       0x7e068503, 0x807c847c,
+       0x8078ff78, 0x00000200,
+       0xbf0a6f7c, 0xbf85ffee,
+       0xe0304000, 0x6e5d0000,
+       0xe0304080, 0x6e5d0100,
+       0xe0304100, 0x6e5d0200,
+       0xe0304180, 0x6e5d0300,
+       0xbf8c3f70, 0xbf820034,
        0xbef603ff, 0x01000000,
        0xbeee0378, 0x8078ff78,
        0x00000400, 0xbefc0384,
+       0xbf0a6f7c, 0xbf840012,
        0xe0304000, 0x785d0000,
        0xe0304100, 0x785d0100,
        0xe0304200, 0x785d0200,
@@ -998,7 +1010,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0x6e5d0100, 0xe0304200,
        0x6e5d0200, 0xe0304300,
        0x6e5d0300, 0xbf8c3f70,
-       0xb9782a05, 0x80788178,
+       0xb9783a05, 0x80788178,
        0xbf0d9972, 0xbf850002,
        0x8f788978, 0xbf820001,
        0x8f788a78, 0xb96e1e06,
@@ -1025,7 +1037,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xbe8c310c, 0xbe8e310e,
        0xbf06807c, 0xbf84fff0,
        0xba80f801, 0x00000000,
-       0xbf8a0000, 0xb9782a05,
+       0xbf8a0000, 0xb9783a05,
        0x80788178, 0xbf0d9972,
        0xbf850002, 0x8f788978,
        0xbf820001, 0x8f788a78,
@@ -1060,270 +1072,272 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xb96e2a05, 0x806e816e,
        0xbf0d9972, 0xbf850002,
        0x8f6e896e, 0xbf820001,
-       0x8f6e8a6e, 0x806eff6e,
-       0x00000200, 0x806e746e,
-       0x826f8075, 0x876fff6f,
-       0x0000ffff, 0xf4091c37,
-       0xfa000050, 0xf4091d37,
-       0xfa000060, 0xf4011e77,
-       0xfa000074, 0xbf8cc07f,
-       0x876fff6d, 0xfc000000,
-       0x906f9a6f, 0x8f6f906f,
-       0xbeee0380, 0x886e6f6e,
-       0x876fff6d, 0x02000000,
-       0x906f996f, 0x8f6f8f6f,
-       0x886e6f6e, 0x876fff6d,
-       0x01000000, 0x906f986f,
-       0x8f6f996f, 0x886e6f6e,
-       0x876fff7a, 0x00800000,
-       0x906f976f, 0xb9eef807,
-       0x876dff6d, 0x0000ffff,
-       0x87fe7e7e, 0x87ea6a6a,
-       0xb9faf802, 0xbe80226c,
-       0xbf810000, 0xbf9f0000,
+       0x8f6e8a6e, 0xb96f1e06,
+       0x8f6f8a6f, 0x806e6f6e,
+       0x806eff6e, 0x00000200,
+       0x806e746e, 0x826f8075,
+       0x876fff6f, 0x0000ffff,
+       0xf4091c37, 0xfa000050,
+       0xf4091d37, 0xfa000060,
+       0xf4011e77, 0xfa000074,
+       0xbf8cc07f, 0x906e8977,
+       0x876fff6e, 0x003f8000,
+       0x906e8677, 0x876eff6e,
+       0x02000000, 0x886e6f6e,
+       0xb9eef807, 0x876dff6d,
+       0x0000ffff, 0x87fe7e7e,
+       0x87ea6a6a, 0xb9faf802,
+       0xbe80226c, 0xbf810000,
        0xbf9f0000, 0xbf9f0000,
        0xbf9f0000, 0xbf9f0000,
+       0xbf9f0000, 0x00000000,
 };
 
 static const uint32_t cwsr_trap_arcturus_hex[] = {
-       0xbf820001, 0xbf8202c4,
-       0xb8f8f802, 0x89788678,
-       0xb8eef801, 0x866eff6e,
-       0x00000800, 0xbf840003,
+       0xbf820001, 0xbf8202d0,
+       0xb8f8f802, 0x8978ff78,
+       0x00020006, 0xb8fbf803,
        0x866eff78, 0x00002000,
-       0xbf840016, 0xb8fbf803,
+       0xbf840009, 0x866eff6d,
+       0x00ff0000, 0xbf85001e,
        0x866eff7b, 0x00000400,
-       0xbf85003b, 0x866eff7b,
-       0x00000800, 0xbf850003,
-       0x866eff7b, 0x00000100,
-       0xbf84000c, 0x866eff78,
-       0x00002000, 0xbf840005,
-       0xbf8e0010, 0xb8eef803,
-       0x866eff6e, 0x00000400,
-       0xbf84fffb, 0x8778ff78,
-       0x00002000, 0x80ec886c,
-       0x82ed806d, 0xb8eef807,
-       0x866fff6e, 0x001f8000,
-       0x8e6f8b6f, 0x8977ff77,
-       0xfc000000, 0x87776f77,
-       0x896eff6e, 0x001f8000,
-       0xb96ef807, 0xb8faf812,
+       0xbf850051, 0xbf8e0010,
+       0xb8fbf803, 0xbf82fffa,
+       0x866eff7b, 0x00000900,
+       0xbf850015, 0x866eff7b,
+       0x000071ff, 0xbf840008,
+       0x866fff7b, 0x00007080,
+       0xbf840001, 0xbeee1a87,
+       0xb8eff801, 0x8e6e8c6e,
+       0x866e6f6e, 0xbf85000a,
+       0x866eff6d, 0x00ff0000,
+       0xbf850007, 0xb8eef801,
+       0x866eff6e, 0x00000800,
+       0xbf850003, 0x866eff7b,
+       0x00000400, 0xbf850036,
+       0xb8faf807, 0x867aff7a,
+       0x001f8000, 0x8e7a8b7a,
+       0x8977ff77, 0xfc000000,
+       0x87777a77, 0xba7ff807,
+       0x00000000, 0xb8faf812,
        0xb8fbf813, 0x8efa887a,
-       0xc0071bbd, 0x00000000,
-       0xbf8cc07f, 0xc0071ebd,
-       0x00000008, 0xbf8cc07f,
-       0x86ee6e6e, 0xbf840001,
-       0xbe801d6e, 0xb8fbf803,
-       0x867bff7b, 0x000001ff,
+       0xc0031bbd, 0x00000010,
+       0xbf8cc07f, 0x8e6e976e,
+       0x8977ff77, 0x00800000,
+       0x87776e77, 0xc0071bbd,
+       0x00000000, 0xbf8cc07f,
+       0xc0071ebd, 0x00000008,
+       0xbf8cc07f, 0x86ee6e6e,
+       0xbf840001, 0xbe801d6e,
+       0x866eff6d, 0x01ff0000,
+       0xbf850005, 0x8778ff78,
+       0x00002000, 0x80ec886c,
+       0x82ed806d, 0xbf820005,
+       0x866eff6d, 0x01000000,
        0xbf850002, 0x806c846c,
        0x826d806d, 0x866dff6d,
-       0x0000ffff, 0x8f6e8b77,
-       0x866eff6e, 0x001f8000,
-       0xb96ef807, 0x86fe7e7e,
+       0x0000ffff, 0x8f7a8b77,
+       0x867aff7a, 0x001f8000,
+       0xb97af807, 0x86fe7e7e,
        0x86ea6a6a, 0x8f6e8378,
        0xb96ee0c2, 0xbf800002,
        0xb9780002, 0xbe801f6c,
        0x866dff6d, 0x0000ffff,
        0xbefa0080, 0xb97a0283,
-       0xb8fa2407, 0x8e7a9b7a,
-       0x876d7a6d, 0xb8fa03c7,
-       0x8e7a9a7a, 0x876d7a6d,
        0xb8faf807, 0x867aff7a,
-       0x00007fff, 0xb97af807,
-       0xbeee007e, 0xbeef007f,
-       0xbefe0180, 0xbf900004,
-       0x877a8478, 0xb97af802,
-       0xbf8e0002, 0xbf88fffe,
-       0xb8fa2a05, 0x807a817a,
-       0x8e7a8a7a, 0x8e7a817a,
-       0xb8fb1605, 0x807b817b,
-       0x8e7b867b, 0x807a7b7a,
-       0x807a7e7a, 0x827b807f,
-       0x867bff7b, 0x0000ffff,
-       0xc04b1c3d, 0x00000050,
-       0xbf8cc07f, 0xc04b1d3d,
-       0x00000060, 0xbf8cc07f,
-       0xc0431e7d, 0x00000074,
-       0xbf8cc07f, 0xbef4007e,
-       0x8675ff7f, 0x0000ffff,
-       0x8775ff75, 0x00040000,
-       0xbef60080, 0xbef700ff,
-       0x00807fac, 0x867aff7f,
-       0x08000000, 0x8f7a837a,
-       0x87777a77, 0x867aff7f,
-       0x70000000, 0x8f7a817a,
-       0x87777a77, 0xbef1007c,
-       0xbef00080, 0xb8f02a05,
-       0x80708170, 0x8e708a70,
-       0x8e708170, 0xb8fa1605,
-       0x807a817a, 0x8e7a867a,
-       0x80707a70, 0xbef60084,
-       0xbef600ff, 0x01000000,
-       0xbefe007c, 0xbefc0070,
-       0xc0611c7a, 0x0000007c,
-       0xbf8cc07f, 0x80708470,
-       0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611b3a,
+       0x001f8000, 0x8e7a8b7a,
+       0x8977ff77, 0xfc000000,
+       0x87777a77, 0xba7ff807,
+       0x00000000, 0xbeee007e,
+       0xbeef007f, 0xbefe0180,
+       0xbf900004, 0x877a8478,
+       0xb97af802, 0xbf8e0002,
+       0xbf88fffe, 0xb8fa2a05,
+       0x807a817a, 0x8e7a8a7a,
+       0x8e7a817a, 0xb8fb1605,
+       0x807b817b, 0x8e7b867b,
+       0x807a7b7a, 0x807a7e7a,
+       0x827b807f, 0x867bff7b,
+       0x0000ffff, 0xc04b1c3d,
+       0x00000050, 0xbf8cc07f,
+       0xc04b1d3d, 0x00000060,
+       0xbf8cc07f, 0xc0431e7d,
+       0x00000074, 0xbf8cc07f,
+       0xbef4007e, 0x8675ff7f,
+       0x0000ffff, 0x8775ff75,
+       0x00040000, 0xbef60080,
+       0xbef700ff, 0x00807fac,
+       0xbef1007c, 0xbef00080,
+       0xb8f02a05, 0x80708170,
+       0x8e708a70, 0x8e708170,
+       0xb8fa1605, 0x807a817a,
+       0x8e7a867a, 0x80707a70,
+       0xbef60084, 0xbef600ff,
+       0x01000000, 0xbefe007c,
+       0xbefc0070, 0xc0611c7a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611b7a, 0x0000007c,
+       0xc0611b3a, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611bba,
+       0xbefc0070, 0xc0611b7a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611bfa, 0x0000007c,
+       0xc0611bba, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611e3a,
-       0x0000007c, 0xbf8cc07f,
-       0x80708470, 0xbefc007e,
-       0xb8fbf803, 0xbefe007c,
-       0xbefc0070, 0xc0611efa,
+       0xbefc0070, 0xc0611bfa,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611a3a, 0x0000007c,
+       0xc0611e3a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0xb8fbf803,
+       0xbefe007c, 0xbefc0070,
+       0xc0611efa, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611a7a,
+       0xbefc0070, 0xc0611a3a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
-       0xb8f1f801, 0xbefe007c,
-       0xbefc0070, 0xc0611c7a,
-       0x0000007c, 0xbf8cc07f,
-       0x80708470, 0xbefc007e,
-       0x867aff7f, 0x04000000,
-       0xbeef0080, 0x876f6f7a,
-       0xb8f02a05, 0x80708170,
-       0x8e708a70, 0x8e708170,
-       0xb8fb1605, 0x807b817b,
-       0x8e7b847b, 0x8e76827b,
-       0xbef600ff, 0x01000000,
-       0xbef20174, 0x80747074,
-       0x82758075, 0xbefc0080,
-       0xbf800000, 0xbe802b00,
-       0xbe822b02, 0xbe842b04,
-       0xbe862b06, 0xbe882b08,
-       0xbe8a2b0a, 0xbe8c2b0c,
-       0xbe8e2b0e, 0xc06b003a,
-       0x00000000, 0xbf8cc07f,
-       0xc06b013a, 0x00000010,
-       0xbf8cc07f, 0xc06b023a,
-       0x00000020, 0xbf8cc07f,
-       0xc06b033a, 0x00000030,
-       0xbf8cc07f, 0x8074c074,
-       0x82758075, 0x807c907c,
-       0xbf0a7b7c, 0xbf85ffe7,
-       0xbef40172, 0xbef00080,
-       0xbefe00c1, 0xbeff00c1,
-       0xbee80080, 0xbee90080,
-       0xbef600ff, 0x01000000,
-       0x867aff78, 0x00400000,
-       0xbf850003, 0xb8faf803,
-       0x897a7aff, 0x10000000,
-       0xbf85004d, 0xbe840080,
-       0xd2890000, 0x00000900,
-       0x80048104, 0xd2890001,
+       0xbefe007c, 0xbefc0070,
+       0xc0611a7a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0xb8f1f801,
+       0xbefe007c, 0xbefc0070,
+       0xc0611c7a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0x867aff7f,
+       0x04000000, 0xbeef0080,
+       0x876f6f7a, 0xb8f02a05,
+       0x80708170, 0x8e708a70,
+       0x8e708170, 0xb8fb1605,
+       0x807b817b, 0x8e7b847b,
+       0x8e76827b, 0xbef600ff,
+       0x01000000, 0xbef20174,
+       0x80747074, 0x82758075,
+       0xbefc0080, 0xbf800000,
+       0xbe802b00, 0xbe822b02,
+       0xbe842b04, 0xbe862b06,
+       0xbe882b08, 0xbe8a2b0a,
+       0xbe8c2b0c, 0xbe8e2b0e,
+       0xc06b003a, 0x00000000,
+       0xbf8cc07f, 0xc06b013a,
+       0x00000010, 0xbf8cc07f,
+       0xc06b023a, 0x00000020,
+       0xbf8cc07f, 0xc06b033a,
+       0x00000030, 0xbf8cc07f,
+       0x8074c074, 0x82758075,
+       0x807c907c, 0xbf0a7b7c,
+       0xbf85ffe7, 0xbef40172,
+       0xbef00080, 0xbefe00c1,
+       0xbeff00c1, 0xbee80080,
+       0xbee90080, 0xbef600ff,
+       0x01000000, 0x867aff78,
+       0x00400000, 0xbf850003,
+       0xb8faf803, 0x897a7aff,
+       0x10000000, 0xbf85004d,
+       0xbe840080, 0xd2890000,
        0x00000900, 0x80048104,
-       0xd2890002, 0x00000900,
-       0x80048104, 0xd2890003,
+       0xd2890001, 0x00000900,
+       0x80048104, 0xd2890002,
        0x00000900, 0x80048104,
-       0xc069003a, 0x00000070,
-       0xbf8cc07f, 0x80709070,
-       0xbf06c004, 0xbf84ffee,
-       0xbe840080, 0xd2890000,
-       0x00000901, 0x80048104,
-       0xd2890001, 0x00000901,
-       0x80048104, 0xd2890002,
-       0x00000901, 0x80048104,
-       0xd2890003, 0x00000901,
+       0xd2890003, 0x00000900,
        0x80048104, 0xc069003a,
        0x00000070, 0xbf8cc07f,
        0x80709070, 0xbf06c004,
        0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000902,
+       0xd2890000, 0x00000901,
        0x80048104, 0xd2890001,
-       0x00000902, 0x80048104,
-       0xd2890002, 0x00000902,
+       0x00000901, 0x80048104,
+       0xd2890002, 0x00000901,
        0x80048104, 0xd2890003,
-       0x00000902, 0x80048104,
+       0x00000901, 0x80048104,
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
        0xbe840080, 0xd2890000,
-       0x00000903, 0x80048104,
-       0xd2890001, 0x00000903,
-       0x80048104, 0xd2890002,
-       0x00000903, 0x80048104,
-       0xd2890003, 0x00000903,
-       0x80048104, 0xc069003a,
-       0x00000070, 0xbf8cc07f,
-       0x80709070, 0xbf06c004,
-       0xbf84ffee, 0xbf820008,
-       0xe0724000, 0x701d0000,
-       0xe0724100, 0x701d0100,
-       0xe0724200, 0x701d0200,
-       0xe0724300, 0x701d0300,
-       0xbefe00c1, 0xbeff00c1,
-       0xb8fb4306, 0x867bc17b,
-       0xbf840064, 0xbf8a0000,
-       0x867aff6f, 0x04000000,
-       0xbf840060, 0x8e7b867b,
-       0x8e7b827b, 0xbef6007b,
-       0xb8f02a05, 0x80708170,
-       0x8e708a70, 0x8e708170,
-       0xb8fa1605, 0x807a817a,
-       0x8e7a867a, 0x80707a70,
-       0x8070ff70, 0x00000080,
-       0xbef600ff, 0x01000000,
-       0xbefc0080, 0xd28c0002,
-       0x000100c1, 0xd28d0003,
-       0x000204c1, 0x867aff78,
-       0x00400000, 0xbf850003,
-       0xb8faf803, 0x897a7aff,
-       0x10000000, 0xbf850030,
-       0x24040682, 0xd86e4000,
-       0x00000002, 0xbf8cc07f,
-       0xbe840080, 0xd2890000,
-       0x00000900, 0x80048104,
-       0xd2890001, 0x00000900,
+       0x00000902, 0x80048104,
+       0xd2890001, 0x00000902,
        0x80048104, 0xd2890002,
-       0x00000900, 0x80048104,
-       0xd2890003, 0x00000900,
+       0x00000902, 0x80048104,
+       0xd2890003, 0x00000902,
        0x80048104, 0xc069003a,
        0x00000070, 0xbf8cc07f,
        0x80709070, 0xbf06c004,
        0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000901,
+       0xd2890000, 0x00000903,
        0x80048104, 0xd2890001,
-       0x00000901, 0x80048104,
-       0xd2890002, 0x00000901,
+       0x00000903, 0x80048104,
+       0xd2890002, 0x00000903,
        0x80048104, 0xd2890003,
-       0x00000901, 0x80048104,
+       0x00000903, 0x80048104,
+       0xc069003a, 0x00000070,
+       0xbf8cc07f, 0x80709070,
+       0xbf06c004, 0xbf84ffee,
+       0xbf820008, 0xe0724000,
+       0x701d0000, 0xe0724100,
+       0x701d0100, 0xe0724200,
+       0x701d0200, 0xe0724300,
+       0x701d0300, 0xbefe00c1,
+       0xbeff00c1, 0xb8fb4306,
+       0x867bc17b, 0xbf840064,
+       0xbf8a0000, 0x867aff6f,
+       0x04000000, 0xbf840060,
+       0x8e7b867b, 0x8e7b827b,
+       0xbef6007b, 0xb8f02a05,
+       0x80708170, 0x8e708a70,
+       0x8e708170, 0xb8fa1605,
+       0x807a817a, 0x8e7a867a,
+       0x80707a70, 0x8070ff70,
+       0x00000080, 0xbef600ff,
+       0x01000000, 0xbefc0080,
+       0xd28c0002, 0x000100c1,
+       0xd28d0003, 0x000204c1,
+       0x867aff78, 0x00400000,
+       0xbf850003, 0xb8faf803,
+       0x897a7aff, 0x10000000,
+       0xbf850030, 0x24040682,
+       0xd86e4000, 0x00000002,
+       0xbf8cc07f, 0xbe840080,
+       0xd2890000, 0x00000900,
+       0x80048104, 0xd2890001,
+       0x00000900, 0x80048104,
+       0xd2890002, 0x00000900,
+       0x80048104, 0xd2890003,
+       0x00000900, 0x80048104,
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
-       0x680404ff, 0x00000200,
+       0xbe840080, 0xd2890000,
+       0x00000901, 0x80048104,
+       0xd2890001, 0x00000901,
+       0x80048104, 0xd2890002,
+       0x00000901, 0x80048104,
+       0xd2890003, 0x00000901,
+       0x80048104, 0xc069003a,
+       0x00000070, 0xbf8cc07f,
+       0x80709070, 0xbf06c004,
+       0xbf84ffee, 0x680404ff,
+       0x00000200, 0xd0c9006a,
+       0x0000f702, 0xbf87ffd2,
+       0xbf820015, 0xd1060002,
+       0x00011103, 0x7e0602ff,
+       0x00000200, 0xbefc00ff,
+       0x00010000, 0xbe800077,
+       0x8677ff77, 0xff7fffff,
+       0x8777ff77, 0x00058000,
+       0xd8ec0000, 0x00000002,
+       0xbf8cc07f, 0xe0765000,
+       0x701d0002, 0x68040702,
        0xd0c9006a, 0x0000f702,
-       0xbf87ffd2, 0xbf820015,
-       0xd1060002, 0x00011103,
-       0x7e0602ff, 0x00000200,
-       0xbefc00ff, 0x00010000,
-       0xbe800077, 0x8677ff77,
-       0xff7fffff, 0x8777ff77,
-       0x00058000, 0xd8ec0000,
-       0x00000002, 0xbf8cc07f,
-       0xe0765000, 0x701d0002,
-       0x68040702, 0xd0c9006a,
-       0x0000f702, 0xbf87fff7,
-       0xbef70000, 0xbef000ff,
-       0x00000400, 0xbefe00c1,
-       0xbeff00c1, 0xb8fb2a05,
-       0x807b817b, 0x8e7b827b,
-       0x8e76887b, 0xbef600ff,
+       0xbf87fff7, 0xbef70000,
+       0xbef000ff, 0x00000400,
+       0xbefe00c1, 0xbeff00c1,
+       0xb8fb2a05, 0x807b817b,
+       0x8e7b827b, 0xbef600ff,
        0x01000000, 0xbefc0084,
        0xbf0a7b7c, 0xbf84006d,
        0xbf11017c, 0x807bff7b,
@@ -1440,15 +1454,11 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
        0x701d0300, 0x807c847c,
        0x8070ff70, 0x00000400,
        0xbf0a7b7c, 0xbf85ffeb,
-       0xbf9c0000, 0xbf820106,
+       0xbf9c0000, 0xbf8200e3,
        0xbef4007e, 0x8675ff7f,
        0x0000ffff, 0x8775ff75,
        0x00040000, 0xbef60080,
        0xbef700ff, 0x00807fac,
-       0x866eff7f, 0x08000000,
-       0x8f6e836e, 0x87776e77,
-       0x866eff7f, 0x70000000,
-       0x8f6e816e, 0x87776e77,
        0x866eff7f, 0x04000000,
        0xbf84001f, 0xbefe00c1,
        0xbeff00c1, 0xb8ef4306,
@@ -1466,26 +1476,14 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
        0x807cff7c, 0x00000200,
        0x8078ff78, 0x00000200,
        0xbf0a6f7c, 0xbf85fff6,
-       0xbef80080, 0xbefe00c1,
-       0xbeff00c1, 0xb8ef2a05,
-       0x806f816f, 0x8e6f826f,
-       0x8e76886f, 0xbef90076,
+       0xbefe00c1, 0xbeff00c1,
        0xbef600ff, 0x01000000,
+       0xb8ef2a05, 0x806f816f,
+       0x8e6f826f, 0x806fff6f,
+       0x00008000, 0xbef80080,
        0xbeee0078, 0x8078ff78,
-       0x00000400, 0xbef30079,
-       0x8079ff79, 0x00000400,
-       0xbefc0084, 0xbf11087c,
-       0x806fff6f, 0x00008000,
-       0xe0524000, 0x791d0000,
-       0xe0524100, 0x791d0100,
-       0xe0524200, 0x791d0200,
-       0xe0524300, 0x791d0300,
-       0x8079ff79, 0x00000400,
-       0xbf8c0f70, 0xd3d94000,
-       0x18000100, 0xd3d94001,
-       0x18000101, 0xd3d94002,
-       0x18000102, 0xd3d94003,
-       0x18000103, 0xe0524000,
+       0x00000400, 0xbefc0084,
+       0xbf11087c, 0xe0524000,
        0x781d0000, 0xe0524100,
        0x781d0100, 0xe0524200,
        0x781d0200, 0xe0524300,
@@ -1494,20 +1492,24 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
        0x7e040302, 0x7e060303,
        0x807c847c, 0x8078ff78,
        0x00000400, 0xbf0a6f7c,
-       0xbf85ffdb, 0xbf9c0000,
-       0xe0524000, 0x731d0000,
-       0xe0524100, 0x731d0100,
-       0xe0524200, 0x731d0200,
-       0xe0524300, 0x731d0300,
-       0xbf8c0f70, 0xd3d94000,
-       0x18000100, 0xd3d94001,
-       0x18000101, 0xd3d94002,
-       0x18000102, 0xd3d94003,
-       0x18000103, 0xe0524000,
-       0x6e1d0000, 0xe0524100,
-       0x6e1d0100, 0xe0524200,
-       0x6e1d0200, 0xe0524300,
-       0x6e1d0300, 0xb8f82a05,
+       0xbf85ffee, 0xbefc0080,
+       0xbf11087c, 0xe0524000,
+       0x781d0000, 0xe0524100,
+       0x781d0100, 0xe0524200,
+       0x781d0200, 0xe0524300,
+       0x781d0300, 0xbf8c0f70,
+       0xd3d94000, 0x18000100,
+       0xd3d94001, 0x18000101,
+       0xd3d94002, 0x18000102,
+       0xd3d94003, 0x18000103,
+       0x807c847c, 0x8078ff78,
+       0x00000400, 0xbf0a6f7c,
+       0xbf85ffea, 0xbf9c0000,
+       0xe0524000, 0x6e1d0000,
+       0xe0524100, 0x6e1d0100,
+       0xe0524200, 0x6e1d0200,
+       0xe0524300, 0x6e1d0300,
+       0xbf8c0f70, 0xb8f82a05,
        0x80788178, 0x8e788a78,
        0x8e788178, 0xb8ee1605,
        0x806e816e, 0x8e6e866e,
@@ -1559,224 +1561,268 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
        0xc00b1c37, 0x00000050,
        0xc00b1d37, 0x00000060,
        0xc0031e77, 0x00000074,
-       0xbf8cc07f, 0x866fff6d,
-       0xf8000000, 0x8f6f9b6f,
-       0x8e6f906f, 0xbeee0080,
-       0x876e6f6e, 0x866fff6d,
-       0x04000000, 0x8f6f9a6f,
-       0x8e6f8f6f, 0x876e6f6e,
-       0x866fff7a, 0x00800000,
-       0x8f6f976f, 0xb96ef807,
-       0x866dff6d, 0x0000ffff,
-       0x86fe7e7e, 0x86ea6a6a,
-       0x8f6e837a, 0xb96ee0c2,
-       0xbf800002, 0xb97a0002,
-       0xbf8a0000, 0x95806f6c,
-       0xbf810000, 0x00000000,
+       0xbf8cc07f, 0x8f6e8b77,
+       0x866eff6e, 0x001f8000,
+       0xb96ef807, 0x866dff6d,
+       0x0000ffff, 0x86fe7e7e,
+       0x86ea6a6a, 0x8f6e837a,
+       0xb96ee0c2, 0xbf800002,
+       0xb97a0002, 0xbf8a0000,
+       0xbe801f6c, 0xbf810000,
 };
 
 static const uint32_t cwsr_trap_aldebaran_hex[] = {
-       0xbf820001, 0xbf8202ce,
-       0xb8f8f802, 0x89788678,
-       0xb8eef801, 0x866eff6e,
-       0x00000800, 0xbf840003,
+       0xbf820001, 0xbf8202db,
+       0xb8f8f802, 0x8978ff78,
+       0x00020006, 0xb8fbf803,
        0x866eff78, 0x00002000,
-       0xbf840016, 0xb8fbf803,
+       0xbf840009, 0x866eff6d,
+       0x00ff0000, 0xbf85001e,
        0x866eff7b, 0x00000400,
-       0xbf85003b, 0x866eff7b,
-       0x00000800, 0xbf850003,
-       0x866eff7b, 0x00000100,
-       0xbf84000c, 0x866eff78,
-       0x00002000, 0xbf840005,
-       0xbf8e0010, 0xb8eef803,
-       0x866eff6e, 0x00000400,
-       0xbf84fffb, 0x8778ff78,
-       0x00002000, 0x80ec886c,
-       0x82ed806d, 0xb8eef807,
-       0x866fff6e, 0x001f8000,
-       0x8e6f8b6f, 0x8977ff77,
-       0xfc000000, 0x87776f77,
-       0x896eff6e, 0x001f8000,
-       0xb96ef807, 0xb8faf812,
+       0xbf850051, 0xbf8e0010,
+       0xb8fbf803, 0xbf82fffa,
+       0x866eff7b, 0x00000900,
+       0xbf850015, 0x866eff7b,
+       0x000071ff, 0xbf840008,
+       0x866fff7b, 0x00007080,
+       0xbf840001, 0xbeee1a87,
+       0xb8eff801, 0x8e6e8c6e,
+       0x866e6f6e, 0xbf85000a,
+       0x866eff6d, 0x00ff0000,
+       0xbf850007, 0xb8eef801,
+       0x866eff6e, 0x00000800,
+       0xbf850003, 0x866eff7b,
+       0x00000400, 0xbf850036,
+       0xb8faf807, 0x867aff7a,
+       0x001f8000, 0x8e7a8b7a,
+       0x8977ff77, 0xfc000000,
+       0x87777a77, 0xba7ff807,
+       0x00000000, 0xb8faf812,
        0xb8fbf813, 0x8efa887a,
-       0xc0071bbd, 0x00000000,
-       0xbf8cc07f, 0xc0071ebd,
-       0x00000008, 0xbf8cc07f,
-       0x86ee6e6e, 0xbf840001,
-       0xbe801d6e, 0xb8fbf803,
-       0x867bff7b, 0x000001ff,
+       0xc0031bbd, 0x00000010,
+       0xbf8cc07f, 0x8e6e976e,
+       0x8977ff77, 0x00800000,
+       0x87776e77, 0xc0071bbd,
+       0x00000000, 0xbf8cc07f,
+       0xc0071ebd, 0x00000008,
+       0xbf8cc07f, 0x86ee6e6e,
+       0xbf840001, 0xbe801d6e,
+       0x866eff6d, 0x01ff0000,
+       0xbf850005, 0x8778ff78,
+       0x00002000, 0x80ec886c,
+       0x82ed806d, 0xbf820005,
+       0x866eff6d, 0x01000000,
        0xbf850002, 0x806c846c,
        0x826d806d, 0x866dff6d,
-       0x0000ffff, 0x8f6e8b77,
-       0x866eff6e, 0x001f8000,
-       0xb96ef807, 0x86fe7e7e,
+       0x0000ffff, 0x8f7a8b77,
+       0x867aff7a, 0x001f8000,
+       0xb97af807, 0x86fe7e7e,
        0x86ea6a6a, 0x8f6e8378,
        0xb96ee0c2, 0xbf800002,
        0xb9780002, 0xbe801f6c,
        0x866dff6d, 0x0000ffff,
        0xbefa0080, 0xb97a0283,
-       0xb8fa2407, 0x8e7a9b7a,
-       0x876d7a6d, 0xb8fa03c7,
-       0x8e7a9a7a, 0x876d7a6d,
        0xb8faf807, 0x867aff7a,
-       0x00007fff, 0xb97af807,
-       0xbeee007e, 0xbeef007f,
-       0xbefe0180, 0xbf900004,
-       0x877a8478, 0xb97af802,
-       0xbf8e0002, 0xbf88fffe,
-       0xb8fa2985, 0x807a817a,
-       0x8e7a8a7a, 0x8e7a817a,
-       0xb8fb1605, 0x807b817b,
-       0x8e7b867b, 0x807a7b7a,
-       0x807a7e7a, 0x827b807f,
-       0x867bff7b, 0x0000ffff,
-       0xc04b1c3d, 0x00000050,
-       0xbf8cc07f, 0xc04b1d3d,
-       0x00000060, 0xbf8cc07f,
-       0xc0431e7d, 0x00000074,
-       0xbf8cc07f, 0xbef4007e,
-       0x8675ff7f, 0x0000ffff,
-       0x8775ff75, 0x00040000,
-       0xbef60080, 0xbef700ff,
-       0x00807fac, 0x867aff7f,
-       0x08000000, 0x8f7a837a,
-       0x87777a77, 0x867aff7f,
-       0x70000000, 0x8f7a817a,
-       0x87777a77, 0xbef1007c,
-       0xbef00080, 0xb8f02985,
-       0x80708170, 0x8e708a70,
-       0x8e708170, 0xb8fa1605,
-       0x807a817a, 0x8e7a867a,
-       0x80707a70, 0xbef60084,
-       0xbef600ff, 0x01000000,
-       0xbefe007c, 0xbefc0070,
-       0xc0611c7a, 0x0000007c,
-       0xbf8cc07f, 0x80708470,
-       0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611b3a,
+       0x001f8000, 0x8e7a8b7a,
+       0x8977ff77, 0xfc000000,
+       0x87777a77, 0xba7ff807,
+       0x00000000, 0xbeee007e,
+       0xbeef007f, 0xbefe0180,
+       0xbf900004, 0x877a8478,
+       0xb97af802, 0xbf8e0002,
+       0xbf88fffe, 0xb8fa2985,
+       0x807a817a, 0x8e7a8a7a,
+       0x8e7a817a, 0xb8fb1605,
+       0x807b817b, 0x8e7b867b,
+       0x807a7b7a, 0x807a7e7a,
+       0x827b807f, 0x867bff7b,
+       0x0000ffff, 0xc04b1c3d,
+       0x00000050, 0xbf8cc07f,
+       0xc04b1d3d, 0x00000060,
+       0xbf8cc07f, 0xc0431e7d,
+       0x00000074, 0xbf8cc07f,
+       0xbef4007e, 0x8675ff7f,
+       0x0000ffff, 0x8775ff75,
+       0x00040000, 0xbef60080,
+       0xbef700ff, 0x00807fac,
+       0xbef1007c, 0xbef00080,
+       0xb8f02985, 0x80708170,
+       0x8e708a70, 0x8e708170,
+       0xb8fa1605, 0x807a817a,
+       0x8e7a867a, 0x80707a70,
+       0xbef60084, 0xbef600ff,
+       0x01000000, 0xbefe007c,
+       0xbefc0070, 0xc0611c7a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611b7a, 0x0000007c,
+       0xc0611b3a, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611bba,
+       0xbefc0070, 0xc0611b7a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611bfa, 0x0000007c,
+       0xc0611bba, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611e3a,
-       0x0000007c, 0xbf8cc07f,
-       0x80708470, 0xbefc007e,
-       0xb8fbf803, 0xbefe007c,
-       0xbefc0070, 0xc0611efa,
+       0xbefc0070, 0xc0611bfa,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
        0xbefe007c, 0xbefc0070,
-       0xc0611a3a, 0x0000007c,
+       0xc0611e3a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0xb8fbf803,
+       0xbefe007c, 0xbefc0070,
+       0xc0611efa, 0x0000007c,
        0xbf8cc07f, 0x80708470,
        0xbefc007e, 0xbefe007c,
-       0xbefc0070, 0xc0611a7a,
-       0x0000007c, 0xbf8cc07f,
-       0x80708470, 0xbefc007e,
-       0xb8f1f801, 0xbefe007c,
-       0xbefc0070, 0xc0611c7a,
+       0xbefc0070, 0xc0611a3a,
        0x0000007c, 0xbf8cc07f,
        0x80708470, 0xbefc007e,
-       0x867aff7f, 0x04000000,
-       0xbeef0080, 0x876f6f7a,
-       0xb8f02985, 0x80708170,
-       0x8e708a70, 0x8e708170,
-       0xb8fb1605, 0x807b817b,
-       0x8e7b847b, 0x8e76827b,
-       0xbef600ff, 0x01000000,
-       0xbef20174, 0x80747074,
-       0x82758075, 0xbefc0080,
-       0xbf800000, 0xbe802b00,
-       0xbe822b02, 0xbe842b04,
-       0xbe862b06, 0xbe882b08,
-       0xbe8a2b0a, 0xbe8c2b0c,
-       0xbe8e2b0e, 0xc06b003a,
-       0x00000000, 0xbf8cc07f,
-       0xc06b013a, 0x00000010,
-       0xbf8cc07f, 0xc06b023a,
-       0x00000020, 0xbf8cc07f,
-       0xc06b033a, 0x00000030,
-       0xbf8cc07f, 0x8074c074,
-       0x82758075, 0x807c907c,
-       0xbf0a7b7c, 0xbf85ffe7,
-       0xbef40172, 0xbef00080,
-       0xbefe00c1, 0xbeff00c1,
-       0xbee80080, 0xbee90080,
-       0xbef600ff, 0x01000000,
-       0x867aff78, 0x00400000,
-       0xbf850003, 0xb8faf803,
-       0x897a7aff, 0x10000000,
-       0xbf85004d, 0xbe840080,
-       0xd2890000, 0x00000900,
-       0x80048104, 0xd2890001,
+       0xbefe007c, 0xbefc0070,
+       0xc0611a7a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0xb8f1f801,
+       0xbefe007c, 0xbefc0070,
+       0xc0611c7a, 0x0000007c,
+       0xbf8cc07f, 0x80708470,
+       0xbefc007e, 0x867aff7f,
+       0x04000000, 0xbeef0080,
+       0x876f6f7a, 0xb8f02985,
+       0x80708170, 0x8e708a70,
+       0x8e708170, 0xb8fb1605,
+       0x807b817b, 0x8e7b847b,
+       0x8e76827b, 0xbef600ff,
+       0x01000000, 0xbef20174,
+       0x80747074, 0x82758075,
+       0xbefc0080, 0xbf800000,
+       0xbe802b00, 0xbe822b02,
+       0xbe842b04, 0xbe862b06,
+       0xbe882b08, 0xbe8a2b0a,
+       0xbe8c2b0c, 0xbe8e2b0e,
+       0xc06b003a, 0x00000000,
+       0xbf8cc07f, 0xc06b013a,
+       0x00000010, 0xbf8cc07f,
+       0xc06b023a, 0x00000020,
+       0xbf8cc07f, 0xc06b033a,
+       0x00000030, 0xbf8cc07f,
+       0x8074c074, 0x82758075,
+       0x807c907c, 0xbf0a7b7c,
+       0xbf85ffe7, 0xbef40172,
+       0xbef00080, 0xbefe00c1,
+       0xbeff00c1, 0xbee80080,
+       0xbee90080, 0xbef600ff,
+       0x01000000, 0x867aff78,
+       0x00400000, 0xbf850003,
+       0xb8faf803, 0x897a7aff,
+       0x10000000, 0xbf85004d,
+       0xbe840080, 0xd2890000,
        0x00000900, 0x80048104,
-       0xd2890002, 0x00000900,
-       0x80048104, 0xd2890003,
+       0xd2890001, 0x00000900,
+       0x80048104, 0xd2890002,
        0x00000900, 0x80048104,
+       0xd2890003, 0x00000900,
+       0x80048104, 0xc069003a,
+       0x00000070, 0xbf8cc07f,
+       0x80709070, 0xbf06c004,
+       0xbf84ffee, 0xbe840080,
+       0xd2890000, 0x00000901,
+       0x80048104, 0xd2890001,
+       0x00000901, 0x80048104,
+       0xd2890002, 0x00000901,
+       0x80048104, 0xd2890003,
+       0x00000901, 0x80048104,
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
        0xbe840080, 0xd2890000,
-       0x00000901, 0x80048104,
-       0xd2890001, 0x00000901,
+       0x00000902, 0x80048104,
+       0xd2890001, 0x00000902,
        0x80048104, 0xd2890002,
-       0x00000901, 0x80048104,
-       0xd2890003, 0x00000901,
+       0x00000902, 0x80048104,
+       0xd2890003, 0x00000902,
        0x80048104, 0xc069003a,
        0x00000070, 0xbf8cc07f,
        0x80709070, 0xbf06c004,
        0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000902,
+       0xd2890000, 0x00000903,
        0x80048104, 0xd2890001,
-       0x00000902, 0x80048104,
-       0xd2890002, 0x00000902,
+       0x00000903, 0x80048104,
+       0xd2890002, 0x00000903,
        0x80048104, 0xd2890003,
-       0x00000902, 0x80048104,
+       0x00000903, 0x80048104,
+       0xc069003a, 0x00000070,
+       0xbf8cc07f, 0x80709070,
+       0xbf06c004, 0xbf84ffee,
+       0xbf820008, 0xe0724000,
+       0x701d0000, 0xe0724100,
+       0x701d0100, 0xe0724200,
+       0x701d0200, 0xe0724300,
+       0x701d0300, 0xbefe00c1,
+       0xbeff00c1, 0xb8fb4306,
+       0x867bc17b, 0xbf840064,
+       0xbf8a0000, 0x867aff6f,
+       0x04000000, 0xbf840060,
+       0x8e7b867b, 0x8e7b827b,
+       0xbef6007b, 0xb8f02985,
+       0x80708170, 0x8e708a70,
+       0x8e708170, 0xb8fa1605,
+       0x807a817a, 0x8e7a867a,
+       0x80707a70, 0x8070ff70,
+       0x00000080, 0xbef600ff,
+       0x01000000, 0xbefc0080,
+       0xd28c0002, 0x000100c1,
+       0xd28d0003, 0x000204c1,
+       0x867aff78, 0x00400000,
+       0xbf850003, 0xb8faf803,
+       0x897a7aff, 0x10000000,
+       0xbf850030, 0x24040682,
+       0xd86e4000, 0x00000002,
+       0xbf8cc07f, 0xbe840080,
+       0xd2890000, 0x00000900,
+       0x80048104, 0xd2890001,
+       0x00000900, 0x80048104,
+       0xd2890002, 0x00000900,
+       0x80048104, 0xd2890003,
+       0x00000900, 0x80048104,
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
        0xbe840080, 0xd2890000,
-       0x00000903, 0x80048104,
-       0xd2890001, 0x00000903,
+       0x00000901, 0x80048104,
+       0xd2890001, 0x00000901,
        0x80048104, 0xd2890002,
-       0x00000903, 0x80048104,
-       0xd2890003, 0x00000903,
+       0x00000901, 0x80048104,
+       0xd2890003, 0x00000901,
        0x80048104, 0xc069003a,
        0x00000070, 0xbf8cc07f,
        0x80709070, 0xbf06c004,
-       0xbf84ffee, 0xbf820008,
-       0xe0724000, 0x701d0000,
-       0xe0724100, 0x701d0100,
-       0xe0724200, 0x701d0200,
-       0xe0724300, 0x701d0300,
+       0xbf84ffee, 0x680404ff,
+       0x00000200, 0xd0c9006a,
+       0x0000f702, 0xbf87ffd2,
+       0xbf820015, 0xd1060002,
+       0x00011103, 0x7e0602ff,
+       0x00000200, 0xbefc00ff,
+       0x00010000, 0xbe800077,
+       0x8677ff77, 0xff7fffff,
+       0x8777ff77, 0x00058000,
+       0xd8ec0000, 0x00000002,
+       0xbf8cc07f, 0xe0765000,
+       0x701d0002, 0x68040702,
+       0xd0c9006a, 0x0000f702,
+       0xbf87fff7, 0xbef70000,
+       0xbef000ff, 0x00000400,
        0xbefe00c1, 0xbeff00c1,
-       0xb8fb4306, 0x867bc17b,
-       0xbf840064, 0xbf8a0000,
-       0x867aff6f, 0x04000000,
-       0xbf840060, 0x8e7b867b,
-       0x8e7b827b, 0xbef6007b,
-       0xb8f02985, 0x80708170,
-       0x8e708a70, 0x8e708170,
-       0xb8fa1605, 0x807a817a,
-       0x8e7a867a, 0x80707a70,
-       0x8070ff70, 0x00000080,
-       0xbef600ff, 0x01000000,
-       0xbefc0080, 0xd28c0002,
-       0x000100c1, 0xd28d0003,
-       0x000204c1, 0x867aff78,
+       0xb8fb2b05, 0x807b817b,
+       0x8e7b827b, 0xbef600ff,
+       0x01000000, 0xbefc0084,
+       0xbf0a7b7c, 0xbf84006d,
+       0xbf11017c, 0x807bff7b,
+       0x00001000, 0x867aff78,
        0x00400000, 0xbf850003,
        0xb8faf803, 0x897a7aff,
-       0x10000000, 0xbf850030,
-       0x24040682, 0xd86e4000,
-       0x00000002, 0xbf8cc07f,
+       0x10000000, 0xbf850051,
        0xbe840080, 0xd2890000,
        0x00000900, 0x80048104,
        0xd2890001, 0x00000900,
@@ -1796,31 +1842,51 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
        0xc069003a, 0x00000070,
        0xbf8cc07f, 0x80709070,
        0xbf06c004, 0xbf84ffee,
-       0x680404ff, 0x00000200,
-       0xd0c9006a, 0x0000f702,
-       0xbf87ffd2, 0xbf820015,
-       0xd1060002, 0x00011103,
-       0x7e0602ff, 0x00000200,
-       0xbefc00ff, 0x00010000,
-       0xbe800077, 0x8677ff77,
-       0xff7fffff, 0x8777ff77,
-       0x00058000, 0xd8ec0000,
-       0x00000002, 0xbf8cc07f,
-       0xe0765000, 0x701d0002,
-       0x68040702, 0xd0c9006a,
-       0x0000f702, 0xbf87fff7,
-       0xbef70000, 0xbef000ff,
-       0x00000400, 0xbefe00c1,
-       0xbeff00c1, 0xb8fb2b05,
-       0x807b817b, 0x8e7b827b,
-       0xbef600ff, 0x01000000,
-       0xbefc0084, 0xbf0a7b7c,
-       0xbf84006d, 0xbf11017c,
+       0xbe840080, 0xd2890000,
+       0x00000902, 0x80048104,
+       0xd2890001, 0x00000902,
+       0x80048104, 0xd2890002,
+       0x00000902, 0x80048104,
+       0xd2890003, 0x00000902,
+       0x80048104, 0xc069003a,
+       0x00000070, 0xbf8cc07f,
+       0x80709070, 0xbf06c004,
+       0xbf84ffee, 0xbe840080,
+       0xd2890000, 0x00000903,
+       0x80048104, 0xd2890001,
+       0x00000903, 0x80048104,
+       0xd2890002, 0x00000903,
+       0x80048104, 0xd2890003,
+       0x00000903, 0x80048104,
+       0xc069003a, 0x00000070,
+       0xbf8cc07f, 0x80709070,
+       0xbf06c004, 0xbf84ffee,
+       0x807c847c, 0xbf0a7b7c,
+       0xbf85ffb1, 0xbf9c0000,
+       0xbf820012, 0x7e000300,
+       0x7e020301, 0x7e040302,
+       0x7e060303, 0xe0724000,
+       0x701d0000, 0xe0724100,
+       0x701d0100, 0xe0724200,
+       0x701d0200, 0xe0724300,
+       0x701d0300, 0x807c847c,
+       0x8070ff70, 0x00000400,
+       0xbf0a7b7c, 0xbf85ffef,
+       0xbf9c0000, 0xb8fb2985,
+       0x807b817b, 0x8e7b837b,
+       0xb8fa2b05, 0x807a817a,
+       0x8e7a827a, 0x80fb7a7b,
+       0x867b7b7b, 0xbf84007a,
        0x807bff7b, 0x00001000,
+       0xbefc0080, 0xbf11017c,
        0x867aff78, 0x00400000,
        0xbf850003, 0xb8faf803,
        0x897a7aff, 0x10000000,
-       0xbf850051, 0xbe840080,
+       0xbf850059, 0xd3d84000,
+       0x18000100, 0xd3d84001,
+       0x18000101, 0xd3d84002,
+       0x18000102, 0xd3d84003,
+       0x18000103, 0xbe840080,
        0xd2890000, 0x00000900,
        0x80048104, 0xd2890001,
        0x00000900, 0x80048104,
@@ -1859,233 +1925,178 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
        0x00000070, 0xbf8cc07f,
        0x80709070, 0xbf06c004,
        0xbf84ffee, 0x807c847c,
-       0xbf0a7b7c, 0xbf85ffb1,
-       0xbf9c0000, 0xbf820012,
-       0x7e000300, 0x7e020301,
-       0x7e040302, 0x7e060303,
+       0xbf0a7b7c, 0xbf85ffa9,
+       0xbf9c0000, 0xbf820016,
+       0xd3d84000, 0x18000100,
+       0xd3d84001, 0x18000101,
+       0xd3d84002, 0x18000102,
+       0xd3d84003, 0x18000103,
        0xe0724000, 0x701d0000,
        0xe0724100, 0x701d0100,
        0xe0724200, 0x701d0200,
        0xe0724300, 0x701d0300,
        0x807c847c, 0x8070ff70,
        0x00000400, 0xbf0a7b7c,
-       0xbf85ffef, 0xbf9c0000,
-       0xb8fb2985, 0x807b817b,
-       0x8e7b837b, 0xb8fa2b05,
-       0x807a817a, 0x8e7a827a,
-       0x80fb7a7b, 0x867b7b7b,
-       0xbf84007a, 0x807bff7b,
-       0x00001000, 0xbefc0080,
-       0xbf11017c, 0x867aff78,
-       0x00400000, 0xbf850003,
-       0xb8faf803, 0x897a7aff,
-       0x10000000, 0xbf850059,
-       0xd3d84000, 0x18000100,
-       0xd3d84001, 0x18000101,
-       0xd3d84002, 0x18000102,
-       0xd3d84003, 0x18000103,
-       0xbe840080, 0xd2890000,
-       0x00000900, 0x80048104,
-       0xd2890001, 0x00000900,
-       0x80048104, 0xd2890002,
-       0x00000900, 0x80048104,
-       0xd2890003, 0x00000900,
-       0x80048104, 0xc069003a,
-       0x00000070, 0xbf8cc07f,
-       0x80709070, 0xbf06c004,
-       0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000901,
-       0x80048104, 0xd2890001,
-       0x00000901, 0x80048104,
-       0xd2890002, 0x00000901,
-       0x80048104, 0xd2890003,
-       0x00000901, 0x80048104,
-       0xc069003a, 0x00000070,
-       0xbf8cc07f, 0x80709070,
-       0xbf06c004, 0xbf84ffee,
-       0xbe840080, 0xd2890000,
-       0x00000902, 0x80048104,
-       0xd2890001, 0x00000902,
-       0x80048104, 0xd2890002,
-       0x00000902, 0x80048104,
-       0xd2890003, 0x00000902,
-       0x80048104, 0xc069003a,
-       0x00000070, 0xbf8cc07f,
-       0x80709070, 0xbf06c004,
-       0xbf84ffee, 0xbe840080,
-       0xd2890000, 0x00000903,
-       0x80048104, 0xd2890001,
-       0x00000903, 0x80048104,
-       0xd2890002, 0x00000903,
-       0x80048104, 0xd2890003,
-       0x00000903, 0x80048104,
-       0xc069003a, 0x00000070,
-       0xbf8cc07f, 0x80709070,
-       0xbf06c004, 0xbf84ffee,
-       0x807c847c, 0xbf0a7b7c,
-       0xbf85ffa9, 0xbf9c0000,
-       0xbf820016, 0xd3d84000,
-       0x18000100, 0xd3d84001,
-       0x18000101, 0xd3d84002,
-       0x18000102, 0xd3d84003,
-       0x18000103, 0xe0724000,
-       0x701d0000, 0xe0724100,
-       0x701d0100, 0xe0724200,
-       0x701d0200, 0xe0724300,
-       0x701d0300, 0x807c847c,
-       0x8070ff70, 0x00000400,
-       0xbf0a7b7c, 0xbf85ffeb,
-       0xbf9c0000, 0xbf820101,
-       0xbef4007e, 0x8675ff7f,
-       0x0000ffff, 0x8775ff75,
-       0x00040000, 0xbef60080,
-       0xbef700ff, 0x00807fac,
-       0x866eff7f, 0x08000000,
-       0x8f6e836e, 0x87776e77,
-       0x866eff7f, 0x70000000,
-       0x8f6e816e, 0x87776e77,
-       0x866eff7f, 0x04000000,
-       0xbf84001f, 0xbefe00c1,
-       0xbeff00c1, 0xb8ef4306,
-       0x866fc16f, 0xbf84001a,
-       0x8e6f866f, 0x8e6f826f,
-       0xbef6006f, 0xb8f82985,
-       0x80788178, 0x8e788a78,
-       0x8e788178, 0xb8ee1605,
-       0x806e816e, 0x8e6e866e,
-       0x80786e78, 0x8078ff78,
-       0x00000080, 0xbef600ff,
-       0x01000000, 0xbefc0080,
-       0xe0510000, 0x781d0000,
-       0xe0510100, 0x781d0000,
-       0x807cff7c, 0x00000200,
-       0x8078ff78, 0x00000200,
-       0xbf0a6f7c, 0xbf85fff6,
+       0xbf85ffeb, 0xbf9c0000,
+       0xbf8200ee, 0xbef4007e,
+       0x8675ff7f, 0x0000ffff,
+       0x8775ff75, 0x00040000,
+       0xbef60080, 0xbef700ff,
+       0x00807fac, 0x866eff7f,
+       0x04000000, 0xbf84001f,
        0xbefe00c1, 0xbeff00c1,
+       0xb8ef4306, 0x866fc16f,
+       0xbf84001a, 0x8e6f866f,
+       0x8e6f826f, 0xbef6006f,
+       0xb8f82985, 0x80788178,
+       0x8e788a78, 0x8e788178,
+       0xb8ee1605, 0x806e816e,
+       0x8e6e866e, 0x80786e78,
+       0x8078ff78, 0x00000080,
        0xbef600ff, 0x01000000,
-       0xb8ef2b05, 0x806f816f,
-       0x8e6f826f, 0x806fff6f,
-       0x00008000, 0xbef80080,
-       0xbeee0078, 0x8078ff78,
-       0x00000400, 0xbefc0084,
+       0xbefc0080, 0xe0510000,
+       0x781d0000, 0xe0510100,
+       0x781d0000, 0x807cff7c,
+       0x00000200, 0x8078ff78,
+       0x00000200, 0xbf0a6f7c,
+       0xbf85fff6, 0xbefe00c1,
+       0xbeff00c1, 0xbef600ff,
+       0x01000000, 0xb8ef2b05,
+       0x806f816f, 0x8e6f826f,
+       0x806fff6f, 0x00008000,
+       0xbef80080, 0xbeee0078,
+       0x8078ff78, 0x00000400,
+       0xbefc0084, 0xbf11087c,
+       0xe0524000, 0x781d0000,
+       0xe0524100, 0x781d0100,
+       0xe0524200, 0x781d0200,
+       0xe0524300, 0x781d0300,
+       0xbf8c0f70, 0x7e000300,
+       0x7e020301, 0x7e040302,
+       0x7e060303, 0x807c847c,
+       0x8078ff78, 0x00000400,
+       0xbf0a6f7c, 0xbf85ffee,
+       0xb8ef2985, 0x806f816f,
+       0x8e6f836f, 0xb8f92b05,
+       0x80798179, 0x8e798279,
+       0x80ef796f, 0x866f6f6f,
+       0xbf84001a, 0x806fff6f,
+       0x00008000, 0xbefc0080,
        0xbf11087c, 0xe0524000,
        0x781d0000, 0xe0524100,
        0x781d0100, 0xe0524200,
        0x781d0200, 0xe0524300,
        0x781d0300, 0xbf8c0f70,
-       0x7e000300, 0x7e020301,
-       0x7e040302, 0x7e060303,
+       0xd3d94000, 0x18000100,
+       0xd3d94001, 0x18000101,
+       0xd3d94002, 0x18000102,
+       0xd3d94003, 0x18000103,
        0x807c847c, 0x8078ff78,
        0x00000400, 0xbf0a6f7c,
-       0xbf85ffee, 0xb8ef2985,
-       0x806f816f, 0x8e6f836f,
-       0xb8f92b05, 0x80798179,
-       0x8e798279, 0x80ef796f,
-       0x866f6f6f, 0xbf84001a,
-       0x806fff6f, 0x00008000,
-       0xbefc0080, 0xbf11087c,
-       0xe0524000, 0x781d0000,
-       0xe0524100, 0x781d0100,
-       0xe0524200, 0x781d0200,
-       0xe0524300, 0x781d0300,
-       0xbf8c0f70, 0xd3d94000,
-       0x18000100, 0xd3d94001,
-       0x18000101, 0xd3d94002,
-       0x18000102, 0xd3d94003,
-       0x18000103, 0x807c847c,
-       0x8078ff78, 0x00000400,
-       0xbf0a6f7c, 0xbf85ffea,
-       0xbf9c0000, 0xe0524000,
-       0x6e1d0000, 0xe0524100,
-       0x6e1d0100, 0xe0524200,
-       0x6e1d0200, 0xe0524300,
-       0x6e1d0300, 0xbf8c0f70,
-       0xb8f82985, 0x80788178,
-       0x8e788a78, 0x8e788178,
-       0xb8ee1605, 0x806e816e,
-       0x8e6e866e, 0x80786e78,
-       0x80f8c078, 0xb8ef1605,
-       0x806f816f, 0x8e6f846f,
-       0x8e76826f, 0xbef600ff,
-       0x01000000, 0xbefc006f,
-       0xc031003a, 0x00000078,
-       0x80f8c078, 0xbf8cc07f,
-       0x80fc907c, 0xbf800000,
-       0xbe802d00, 0xbe822d02,
-       0xbe842d04, 0xbe862d06,
-       0xbe882d08, 0xbe8a2d0a,
-       0xbe8c2d0c, 0xbe8e2d0e,
-       0xbf06807c, 0xbf84fff0,
-       0xb8f82985, 0x80788178,
-       0x8e788a78, 0x8e788178,
-       0xb8ee1605, 0x806e816e,
-       0x8e6e866e, 0x80786e78,
-       0xbef60084, 0xbef600ff,
-       0x01000000, 0xc0211bfa,
+       0xbf85ffea, 0xbf9c0000,
+       0xe0524000, 0x6e1d0000,
+       0xe0524100, 0x6e1d0100,
+       0xe0524200, 0x6e1d0200,
+       0xe0524300, 0x6e1d0300,
+       0xbf8c0f70, 0xb8f82985,
+       0x80788178, 0x8e788a78,
+       0x8e788178, 0xb8ee1605,
+       0x806e816e, 0x8e6e866e,
+       0x80786e78, 0x80f8c078,
+       0xb8ef1605, 0x806f816f,
+       0x8e6f846f, 0x8e76826f,
+       0xbef600ff, 0x01000000,
+       0xbefc006f, 0xc031003a,
+       0x00000078, 0x80f8c078,
+       0xbf8cc07f, 0x80fc907c,
+       0xbf800000, 0xbe802d00,
+       0xbe822d02, 0xbe842d04,
+       0xbe862d06, 0xbe882d08,
+       0xbe8a2d0a, 0xbe8c2d0c,
+       0xbe8e2d0e, 0xbf06807c,
+       0xbf84fff0, 0xb8f82985,
+       0x80788178, 0x8e788a78,
+       0x8e788178, 0xb8ee1605,
+       0x806e816e, 0x8e6e866e,
+       0x80786e78, 0xbef60084,
+       0xbef600ff, 0x01000000,
+       0xc0211bfa, 0x00000078,
+       0x80788478, 0xc0211b3a,
        0x00000078, 0x80788478,
-       0xc0211b3a, 0x00000078,
-       0x80788478, 0xc0211b7a,
+       0xc0211b7a, 0x00000078,
+       0x80788478, 0xc0211c3a,
        0x00000078, 0x80788478,
-       0xc0211c3a, 0x00000078,
-       0x80788478, 0xc0211c7a,
+       0xc0211c7a, 0x00000078,
+       0x80788478, 0xc0211eba,
        0x00000078, 0x80788478,
-       0xc0211eba, 0x00000078,
-       0x80788478, 0xc0211efa,
+       0xc0211efa, 0x00000078,
+       0x80788478, 0xc0211a3a,
        0x00000078, 0x80788478,
-       0xc0211a3a, 0x00000078,
-       0x80788478, 0xc0211a7a,
+       0xc0211a7a, 0x00000078,
+       0x80788478, 0xc0211cfa,
        0x00000078, 0x80788478,
-       0xc0211cfa, 0x00000078,
-       0x80788478, 0xbf8cc07f,
-       0xbefc006f, 0xbefe0070,
-       0xbeff0071, 0x866f7bff,
-       0x000003ff, 0xb96f4803,
-       0x866f7bff, 0xfffff800,
-       0x8f6f8b6f, 0xb96fa2c3,
-       0xb973f801, 0xb8ee2985,
-       0x806e816e, 0x8e6e8a6e,
-       0x8e6e816e, 0xb8ef1605,
-       0x806f816f, 0x8e6f866f,
-       0x806e6f6e, 0x806e746e,
-       0x826f8075, 0x866fff6f,
-       0x0000ffff, 0xc00b1c37,
-       0x00000050, 0xc00b1d37,
-       0x00000060, 0xc0031e77,
-       0x00000074, 0xbf8cc07f,
-       0x866fff6d, 0xf8000000,
-       0x8f6f9b6f, 0x8e6f906f,
-       0xbeee0080, 0x876e6f6e,
-       0x866fff6d, 0x04000000,
-       0x8f6f9a6f, 0x8e6f8f6f,
-       0x876e6f6e, 0x866fff7a,
-       0x00800000, 0x8f6f976f,
+       0xbf8cc07f, 0xbefc006f,
+       0xbefe0070, 0xbeff0071,
+       0x866f7bff, 0x000003ff,
+       0xb96f4803, 0x866f7bff,
+       0xfffff800, 0x8f6f8b6f,
+       0xb96fa2c3, 0xb973f801,
+       0xb8ee2985, 0x806e816e,
+       0x8e6e8a6e, 0x8e6e816e,
+       0xb8ef1605, 0x806f816f,
+       0x8e6f866f, 0x806e6f6e,
+       0x806e746e, 0x826f8075,
+       0x866fff6f, 0x0000ffff,
+       0xc00b1c37, 0x00000050,
+       0xc00b1d37, 0x00000060,
+       0xc0031e77, 0x00000074,
+       0xbf8cc07f, 0x8f6e8b77,
+       0x866eff6e, 0x001f8000,
        0xb96ef807, 0x866dff6d,
        0x0000ffff, 0x86fe7e7e,
        0x86ea6a6a, 0x8f6e837a,
        0xb96ee0c2, 0xbf800002,
        0xb97a0002, 0xbf8a0000,
-       0x95806f6c, 0xbf810000,
+       0xbe801f6c, 0xbf810000,
 };
 
 static const uint32_t cwsr_trap_gfx10_hex[] = {
-       0xbf820001, 0xbf8201cf,
+       0xbf820001, 0xbf82021c,
        0xb0804004, 0xb978f802,
-       0x8a788678, 0xb96ef801,
-       0x876eff6e, 0x00000800,
-       0xbf840003, 0x876eff78,
+       0x8a78ff78, 0x00020006,
+       0xb97bf803, 0x876eff78,
        0x00002000, 0xbf840009,
-       0xb97bf803, 0x876eff7b,
-       0x00000400, 0xbf85001d,
-       0x876eff7b, 0x00000100,
-       0xbf840002, 0x8878ff78,
-       0x00002000, 0xb97af812,
+       0x876eff6d, 0x00ff0000,
+       0xbf85001e, 0x876eff7b,
+       0x00000400, 0xbf850041,
+       0xbf8e0010, 0xb97bf803,
+       0xbf82fffa, 0x876eff7b,
+       0x00000900, 0xbf850015,
+       0x876eff7b, 0x000071ff,
+       0xbf840008, 0x876fff7b,
+       0x00007080, 0xbf840001,
+       0xbeee1d87, 0xb96ff801,
+       0x8f6e8c6e, 0x876e6f6e,
+       0xbf85000a, 0x876eff6d,
+       0x00ff0000, 0xbf850007,
+       0xb96ef801, 0x876eff6e,
+       0x00000800, 0xbf850003,
+       0x876eff7b, 0x00000400,
+       0xbf850026, 0xb97af812,
        0xb97bf813, 0x8ffa887a,
-       0xf4051bbd, 0xfa000000,
-       0xbf8cc07f, 0xf4051ebd,
-       0xfa000008, 0xbf8cc07f,
-       0x87ee6e6e, 0xbf840001,
-       0xbe80206e, 0xb97bf803,
-       0x877bff7b, 0x000001ff,
+       0xf4011bbd, 0xfa000010,
+       0xbf8cc07f, 0x8f6e976e,
+       0x8a77ff77, 0x00800000,
+       0x88776e77, 0xf4051bbd,
+       0xfa000000, 0xbf8cc07f,
+       0xf4051ebd, 0xfa000008,
+       0xbf8cc07f, 0x87ee6e6e,
+       0xbf840001, 0xbe80206e,
+       0x876eff6d, 0x01ff0000,
+       0xbf850005, 0x8878ff78,
+       0x00002000, 0x80ec886c,
+       0x82ed806d, 0xbf820005,
+       0x876eff6d, 0x01000000,
        0xbf850002, 0x806c846c,
        0x826d806d, 0x876dff6d,
        0x0000ffff, 0x87fe7e7e,
@@ -2095,37 +2106,55 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0xb9fa0283, 0xbeee037e,
        0xbeef037f, 0xbefe0480,
        0xbf900004, 0xbf8cc07f,
+       0x877aff7f, 0x04000000,
+       0x8f7a857a, 0x886d7a6d,
+       0xbefa037e, 0x877bff7f,
+       0x0000ffff, 0xbefe03c1,
+       0xbeff03c1, 0xdc5f8000,
+       0x007a0000, 0x7e000280,
+       0xbefe037a, 0xbeff037b,
        0xb97b02dc, 0x8f7b997b,
-       0x887b7b7f, 0xb97a2a05,
-       0x807a817a, 0xbf0d997b,
-       0xbf850002, 0x8f7a897a,
-       0xbf820001, 0x8f7a8a7a,
+       0xb97a2a05, 0x807a817a,
+       0xbf0d997b, 0xbf850002,
+       0x8f7a897a, 0xbf820001,
+       0x8f7a8a7a, 0xb97b1e06,
+       0x8f7b8a7b, 0x807a7b7a,
        0x877bff7f, 0x0000ffff,
        0x807aff7a, 0x00000200,
        0x807a7e7a, 0x827b807b,
-       0xbef4037e, 0x8775ff7f,
-       0x0000ffff, 0x8875ff75,
-       0x00040000, 0xbef60380,
-       0xbef703ff, 0x10807fac,
-       0x877aff7f, 0x08000000,
-       0x907a837a, 0x88777a77,
-       0x877aff7f, 0x70000000,
-       0x907a817a, 0x88777a77,
-       0xbef1037c, 0xbef00380,
-       0xb97302dc, 0x8f739973,
-       0x8873737f, 0xbefe03c1,
+       0xd7610000, 0x00010870,
+       0xd7610000, 0x00010a71,
+       0xd7610000, 0x00010c72,
+       0xd7610000, 0x00010e73,
+       0xd7610000, 0x00011074,
+       0xd7610000, 0x00011275,
+       0xd7610000, 0x00011476,
+       0xd7610000, 0x00011677,
+       0xd7610000, 0x00011a79,
+       0xd7610000, 0x00011c7e,
+       0xd7610000, 0x00011e7f,
+       0xbefe03ff, 0x00003fff,
+       0xbeff0380, 0xdc5f8040,
+       0x007a0000, 0xd760007a,
+       0x00011d00, 0xd760007b,
+       0x00011f00, 0xbefe037a,
+       0xbeff037b, 0xbef4037e,
+       0x8775ff7f, 0x0000ffff,
+       0x8875ff75, 0x00040000,
+       0xbef60380, 0xbef703ff,
+       0x10807fac, 0xbef1037c,
+       0xbef00380, 0xb97302dc,
+       0x8f739973, 0xbefe03c1,
        0x907c9973, 0x877c817c,
        0xbf06817c, 0xbf850002,
        0xbeff0380, 0xbf820002,
-       0xbeff03c1, 0xbf82000b,
+       0xbeff03c1, 0xbf820009,
        0xbef603ff, 0x01000000,
-       0xe0704000, 0x705d0000,
        0xe0704080, 0x705d0100,
        0xe0704100, 0x705d0200,
        0xe0704180, 0x705d0300,
-       0xbf82000a, 0xbef603ff,
-       0x01000000, 0xe0704000,
-       0x705d0000, 0xe0704100,
+       0xbf820008, 0xbef603ff,
+       0x01000000, 0xe0704100,
        0x705d0100, 0xe0704200,
        0x705d0200, 0xe0704300,
        0x705d0300, 0xb9702a05,
@@ -2140,8 +2169,9 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0xbefc0380, 0xd7610002,
        0x0000f871, 0x807c817c,
        0xd7610002, 0x0000f86c,
-       0x807c817c, 0xd7610002,
-       0x0000f86d, 0x807c817c,
+       0x807c817c, 0x8a7aff6d,
+       0x80000000, 0xd7610002,
+       0x0000f87a, 0x807c817c,
        0xd7610002, 0x0000f86e,
        0x807c817c, 0xd7610002,
        0x0000f86f, 0x807c817c,
@@ -2156,160 +2186,157 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0x0000f871, 0x807c817c,
        0xb971f815, 0xd7610002,
        0x0000f871, 0x807c817c,
+       0xbefe03ff, 0x0000ffff,
        0xbeff0380, 0xe0704000,
-       0x705d0200, 0xb9702a05,
-       0x80708170, 0xbf0d9973,
-       0xbf850002, 0x8f708970,
-       0xbf820001, 0x8f708a70,
-       0xb97a1e06, 0x8f7a8a7a,
-       0x80707a70, 0xbef603ff,
-       0x01000000, 0xbef90380,
-       0xbefc0380, 0xbf800000,
-       0xbe802f00, 0xbe822f02,
-       0xbe842f04, 0xbe862f06,
-       0xbe882f08, 0xbe8a2f0a,
-       0xbe8c2f0c, 0xbe8e2f0e,
-       0xd7610002, 0x0000f200,
-       0x80798179, 0xd7610002,
-       0x0000f201, 0x80798179,
-       0xd7610002, 0x0000f202,
-       0x80798179, 0xd7610002,
-       0x0000f203, 0x80798179,
-       0xd7610002, 0x0000f204,
+       0x705d0200, 0xbefe03c1,
+       0xb9702a05, 0x80708170,
+       0xbf0d9973, 0xbf850002,
+       0x8f708970, 0xbf820001,
+       0x8f708a70, 0xb97a1e06,
+       0x8f7a8a7a, 0x80707a70,
+       0xbef603ff, 0x01000000,
+       0xbef90380, 0xbefc0380,
+       0xbf800000, 0xbe802f00,
+       0xbe822f02, 0xbe842f04,
+       0xbe862f06, 0xbe882f08,
+       0xbe8a2f0a, 0xbe8c2f0c,
+       0xbe8e2f0e, 0xd7610002,
+       0x0000f200, 0x80798179,
+       0xd7610002, 0x0000f201,
        0x80798179, 0xd7610002,
-       0x0000f205, 0x80798179,
-       0xd7610002, 0x0000f206,
+       0x0000f202, 0x80798179,
+       0xd7610002, 0x0000f203,
        0x80798179, 0xd7610002,
-       0x0000f207, 0x80798179,
-       0xd7610002, 0x0000f208,
+       0x0000f204, 0x80798179,
+       0xd7610002, 0x0000f205,
        0x80798179, 0xd7610002,
-       0x0000f209, 0x80798179,
-       0xd7610002, 0x0000f20a,
+       0x0000f206, 0x80798179,
+       0xd7610002, 0x0000f207,
        0x80798179, 0xd7610002,
-       0x0000f20b, 0x80798179,
-       0xd7610002, 0x0000f20c,
+       0x0000f208, 0x80798179,
+       0xd7610002, 0x0000f209,
        0x80798179, 0xd7610002,
-       0x0000f20d, 0x80798179,
-       0xd7610002, 0x0000f20e,
+       0x0000f20a, 0x80798179,
+       0xd7610002, 0x0000f20b,
        0x80798179, 0xd7610002,
-       0x0000f20f, 0x80798179,
-       0xbf06a079, 0xbf840006,
-       0xe0704000, 0x705d0200,
-       0x8070ff70, 0x00000080,
-       0xbef90380, 0x7e040280,
-       0x807c907c, 0xbf0aff7c,
-       0x00000060, 0xbf85ffbc,
-       0xbe802f00, 0xbe822f02,
-       0xbe842f04, 0xbe862f06,
-       0xbe882f08, 0xbe8a2f0a,
-       0xd7610002, 0x0000f200,
+       0x0000f20c, 0x80798179,
+       0xd7610002, 0x0000f20d,
        0x80798179, 0xd7610002,
-       0x0000f201, 0x80798179,
-       0xd7610002, 0x0000f202,
+       0x0000f20e, 0x80798179,
+       0xd7610002, 0x0000f20f,
+       0x80798179, 0xbf06a079,
+       0xbf840006, 0xe0704000,
+       0x705d0200, 0x8070ff70,
+       0x00000080, 0xbef90380,
+       0x7e040280, 0x807c907c,
+       0xbf0aff7c, 0x00000060,
+       0xbf85ffbc, 0xbe802f00,
+       0xbe822f02, 0xbe842f04,
+       0xbe862f06, 0xbe882f08,
+       0xbe8a2f0a, 0xd7610002,
+       0x0000f200, 0x80798179,
+       0xd7610002, 0x0000f201,
        0x80798179, 0xd7610002,
-       0x0000f203, 0x80798179,
-       0xd7610002, 0x0000f204,
+       0x0000f202, 0x80798179,
+       0xd7610002, 0x0000f203,
        0x80798179, 0xd7610002,
-       0x0000f205, 0x80798179,
-       0xd7610002, 0x0000f206,
+       0x0000f204, 0x80798179,
+       0xd7610002, 0x0000f205,
        0x80798179, 0xd7610002,
-       0x0000f207, 0x80798179,
-       0xd7610002, 0x0000f208,
+       0x0000f206, 0x80798179,
+       0xd7610002, 0x0000f207,
        0x80798179, 0xd7610002,
-       0x0000f209, 0x80798179,
-       0xd7610002, 0x0000f20a,
+       0x0000f208, 0x80798179,
+       0xd7610002, 0x0000f209,
        0x80798179, 0xd7610002,
-       0x0000f20b, 0x80798179,
-       0xe0704000, 0x705d0200,
+       0x0000f20a, 0x80798179,
+       0xd7610002, 0x0000f20b,
+       0x80798179, 0xe0704000,
+       0x705d0200, 0xbefe03c1,
+       0x907c9973, 0x877c817c,
+       0xbf06817c, 0xbf850002,
+       0xbeff0380, 0xbf820001,
+       0xbeff03c1, 0xb97b4306,
+       0x877bc17b, 0xbf840044,
+       0xbf8a0000, 0x877aff6d,
+       0x80000000, 0xbf840040,
+       0x8f7b867b, 0x8f7b827b,
+       0xbef6037b, 0xb9703a05,
+       0x80708170, 0xbf0d9973,
+       0xbf850002, 0x8f708970,
+       0xbf820001, 0x8f708a70,
+       0xb97a1e06, 0x8f7a8a7a,
+       0x80707a70, 0x8070ff70,
+       0x00000200, 0x8070ff70,
+       0x00000080, 0xbef603ff,
+       0x01000000, 0xd7650000,
+       0x000100c1, 0xd7660000,
+       0x000200c1, 0x16000084,
+       0x907c9973, 0x877c817c,
+       0xbf06817c, 0xbefc0380,
+       0xbf850012, 0xbe8303ff,
+       0x00000080, 0xbf800000,
+       0xbf800000, 0xbf800000,
+       0xd8d80000, 0x01000000,
+       0xbf8c0000, 0xe0704000,
+       0x705d0100, 0x807c037c,
+       0x80700370, 0xd5250000,
+       0x0001ff00, 0x00000080,
+       0xbf0a7b7c, 0xbf85fff4,
+       0xbf820011, 0xbe8303ff,
+       0x00000100, 0xbf800000,
+       0xbf800000, 0xbf800000,
+       0xd8d80000, 0x01000000,
+       0xbf8c0000, 0xe0704000,
+       0x705d0100, 0x807c037c,
+       0x80700370, 0xd5250000,
+       0x0001ff00, 0x00000100,
+       0xbf0a7b7c, 0xbf85fff4,
        0xbefe03c1, 0x907c9973,
        0x877c817c, 0xbf06817c,
-       0xbf850002, 0xbeff0380,
-       0xbf820001, 0xbeff03c1,
-       0xb97b4306, 0x877bc17b,
-       0xbf840044, 0xbf8a0000,
-       0x877aff73, 0x04000000,
-       0xbf840040, 0x8f7b867b,
-       0x8f7b827b, 0xbef6037b,
-       0xb9702a05, 0x80708170,
-       0xbf0d9973, 0xbf850002,
-       0x8f708970, 0xbf820001,
-       0x8f708a70, 0xb97a1e06,
-       0x8f7a8a7a, 0x80707a70,
-       0x8070ff70, 0x00000200,
-       0x8070ff70, 0x00000080,
-       0xbef603ff, 0x01000000,
-       0xd7650000, 0x000100c1,
-       0xd7660000, 0x000200c1,
-       0x16000084, 0x907c9973,
+       0xbf850004, 0xbef003ff,
+       0x00000200, 0xbeff0380,
+       0xbf820003, 0xbef003ff,
+       0x00000400, 0xbeff03c1,
+       0xb97b3a05, 0x807b817b,
+       0x8f7b827b, 0x907c9973,
        0x877c817c, 0xbf06817c,
-       0xbefc0380, 0xbf850012,
-       0xbe8303ff, 0x00000080,
-       0xbf800000, 0xbf800000,
-       0xbf800000, 0xd8d80000,
-       0x01000000, 0xbf8c0000,
-       0xe0704000, 0x705d0100,
-       0x807c037c, 0x80700370,
-       0xd5250000, 0x0001ff00,
-       0x00000080, 0xbf0a7b7c,
-       0xbf85fff4, 0xbf820011,
-       0xbe8303ff, 0x00000100,
-       0xbf800000, 0xbf800000,
-       0xbf800000, 0xd8d80000,
-       0x01000000, 0xbf8c0000,
-       0xe0704000, 0x705d0100,
-       0x807c037c, 0x80700370,
-       0xd5250000, 0x0001ff00,
-       0x00000100, 0xbf0a7b7c,
-       0xbf85fff4, 0xbefe03c1,
-       0x907c9973, 0x877c817c,
-       0xbf06817c, 0xbf850004,
-       0xbef003ff, 0x00000200,
-       0xbeff0380, 0xbf820003,
-       0xbef003ff, 0x00000400,
-       0xbeff03c1, 0xb97b2a05,
-       0x807b817b, 0x8f7b827b,
-       0x907c9973, 0x877c817c,
-       0xbf06817c, 0xbf850017,
+       0xbf850017, 0xbef603ff,
+       0x01000000, 0xbefc0384,
+       0xbf0a7b7c, 0xbf840037,
+       0x7e008700, 0x7e028701,
+       0x7e048702, 0x7e068703,
+       0xe0704000, 0x705d0000,
+       0xe0704080, 0x705d0100,
+       0xe0704100, 0x705d0200,
+       0xe0704180, 0x705d0300,
+       0x807c847c, 0x8070ff70,
+       0x00000200, 0xbf0a7b7c,
+       0xbf85ffef, 0xbf820025,
        0xbef603ff, 0x01000000,
        0xbefc0384, 0xbf0a7b7c,
-       0xbf840037, 0x7e008700,
+       0xbf840011, 0x7e008700,
        0x7e028701, 0x7e048702,
        0x7e068703, 0xe0704000,
-       0x705d0000, 0xe0704080,
-       0x705d0100, 0xe0704100,
-       0x705d0200, 0xe0704180,
+       0x705d0000, 0xe0704100,
+       0x705d0100, 0xe0704200,
+       0x705d0200, 0xe0704300,
        0x705d0300, 0x807c847c,
-       0x8070ff70, 0x00000200,
+       0x8070ff70, 0x00000400,
        0xbf0a7b7c, 0xbf85ffef,
-       0xbf820025, 0xbef603ff,
-       0x01000000, 0xbefc0384,
-       0xbf0a7b7c, 0xbf840020,
-       0x7e008700, 0x7e028701,
-       0x7e048702, 0x7e068703,
+       0xb97b1e06, 0x877bc17b,
+       0xbf84000c, 0x8f7b837b,
+       0x807b7c7b, 0xbefe03c1,
+       0xbeff0380, 0x7e008700,
        0xe0704000, 0x705d0000,
-       0xe0704100, 0x705d0100,
-       0xe0704200, 0x705d0200,
-       0xe0704300, 0x705d0300,
-       0x807c847c, 0x8070ff70,
-       0x00000400, 0xbf0a7b7c,
-       0xbf85ffef, 0xb97b1e06,
-       0x877bc17b, 0xbf84000c,
-       0x8f7b837b, 0x807b7c7b,
-       0xbefe03c1, 0xbeff0380,
-       0x7e008700, 0xe0704000,
-       0x705d0000, 0x807c817c,
-       0x8070ff70, 0x00000080,
-       0xbf0a7b7c, 0xbf85fff8,
-       0xbf82013c, 0xbef4037e,
-       0x8775ff7f, 0x0000ffff,
-       0x8875ff75, 0x00040000,
-       0xbef60380, 0xbef703ff,
-       0x10807fac, 0x876eff7f,
-       0x08000000, 0x906e836e,
-       0x88776e77, 0x876eff7f,
-       0x70000000, 0x906e816e,
-       0x88776e77, 0xb97202dc,
-       0x8f729972, 0x8872727f,
+       0x807c817c, 0x8070ff70,
+       0x00000080, 0xbf0a7b7c,
+       0xbf85fff8, 0xbf82013b,
+       0xbef4037e, 0x8775ff7f,
+       0x0000ffff, 0x8875ff75,
+       0x00040000, 0xbef60380,
+       0xbef703ff, 0x10807fac,
+       0xb97202dc, 0x8f729972,
        0x876eff7f, 0x04000000,
        0xbf840034, 0xbefe03c1,
        0x907c9972, 0x877c817c,
@@ -2318,7 +2345,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0xbeff03c1, 0xb96f4306,
        0x876fc16f, 0xbf840029,
        0x8f6f866f, 0x8f6f826f,
-       0xbef6036f, 0xb9782a05,
+       0xbef6036f, 0xb9783a05,
        0x80788178, 0xbf0d9972,
        0xbf850002, 0x8f788978,
        0xbf820001, 0x8f788a78,
@@ -2342,13 +2369,14 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0x877c817c, 0xbf06817c,
        0xbf850002, 0xbeff0380,
        0xbf820001, 0xbeff03c1,
-       0xb96f2a05, 0x806f816f,
+       0xb96f3a05, 0x806f816f,
        0x8f6f826f, 0x907c9972,
        0x877c817c, 0xbf06817c,
-       0xbf850021, 0xbef603ff,
+       0xbf850024, 0xbef603ff,
        0x01000000, 0xbeee0378,
        0x8078ff78, 0x00000200,
-       0xbefc0384, 0xe0304000,
+       0xbefc0384, 0xbf0a6f7c,
+       0xbf840050, 0xe0304000,
        0x785d0000, 0xe0304080,
        0x785d0100, 0xe0304100,
        0x785d0200, 0xe0304180,
@@ -2361,94 +2389,97 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0x6e5d0000, 0xe0304080,
        0x6e5d0100, 0xe0304100,
        0x6e5d0200, 0xe0304180,
-       0x6e5d0300, 0xbf820032,
-       0xbef603ff, 0x01000000,
-       0xbeee0378, 0x8078ff78,
-       0x00000400, 0xbefc0384,
+       0x6e5d0300, 0xbf8c3f70,
+       0xbf820034, 0xbef603ff,
+       0x01000000, 0xbeee0378,
+       0x8078ff78, 0x00000400,
+       0xbefc0384, 0xbf0a6f7c,
+       0xbf840012, 0xe0304000,
+       0x785d0000, 0xe0304100,
+       0x785d0100, 0xe0304200,
+       0x785d0200, 0xe0304300,
+       0x785d0300, 0xbf8c3f70,
+       0x7e008500, 0x7e028501,
+       0x7e048502, 0x7e068503,
+       0x807c847c, 0x8078ff78,
+       0x00000400, 0xbf0a6f7c,
+       0xbf85ffee, 0xb96f1e06,
+       0x876fc16f, 0xbf84000e,
+       0x8f6f836f, 0x806f7c6f,
+       0xbefe03c1, 0xbeff0380,
        0xe0304000, 0x785d0000,
-       0xe0304100, 0x785d0100,
-       0xe0304200, 0x785d0200,
-       0xe0304300, 0x785d0300,
        0xbf8c3f70, 0x7e008500,
-       0x7e028501, 0x7e048502,
-       0x7e068503, 0x807c847c,
-       0x8078ff78, 0x00000400,
-       0xbf0a6f7c, 0xbf85ffee,
-       0xb96f1e06, 0x876fc16f,
-       0xbf84000e, 0x8f6f836f,
-       0x806f7c6f, 0xbefe03c1,
-       0xbeff0380, 0xe0304000,
-       0x785d0000, 0xbf8c3f70,
-       0x7e008500, 0x807c817c,
-       0x8078ff78, 0x00000080,
-       0xbf0a6f7c, 0xbf85fff7,
-       0xbeff03c1, 0xe0304000,
-       0x6e5d0000, 0xe0304100,
-       0x6e5d0100, 0xe0304200,
-       0x6e5d0200, 0xe0304300,
-       0x6e5d0300, 0xbf8c3f70,
-       0xb9782a05, 0x80788178,
+       0x807c817c, 0x8078ff78,
+       0x00000080, 0xbf0a6f7c,
+       0xbf85fff7, 0xbeff03c1,
+       0xe0304000, 0x6e5d0000,
+       0xe0304100, 0x6e5d0100,
+       0xe0304200, 0x6e5d0200,
+       0xe0304300, 0x6e5d0300,
+       0xbf8c3f70, 0xb9783a05,
+       0x80788178, 0xbf0d9972,
+       0xbf850002, 0x8f788978,
+       0xbf820001, 0x8f788a78,
+       0xb96e1e06, 0x8f6e8a6e,
+       0x80786e78, 0x8078ff78,
+       0x00000200, 0x80f8ff78,
+       0x00000050, 0xbef603ff,
+       0x01000000, 0xbefc03ff,
+       0x0000006c, 0x80f89078,
+       0xf429003a, 0xf0000000,
+       0xbf8cc07f, 0x80fc847c,
+       0xbf800000, 0xbe803100,
+       0xbe823102, 0x80f8a078,
+       0xf42d003a, 0xf0000000,
+       0xbf8cc07f, 0x80fc887c,
+       0xbf800000, 0xbe803100,
+       0xbe823102, 0xbe843104,
+       0xbe863106, 0x80f8c078,
+       0xf431003a, 0xf0000000,
+       0xbf8cc07f, 0x80fc907c,
+       0xbf800000, 0xbe803100,
+       0xbe823102, 0xbe843104,
+       0xbe863106, 0xbe883108,
+       0xbe8a310a, 0xbe8c310c,
+       0xbe8e310e, 0xbf06807c,
+       0xbf84fff0, 0xba80f801,
+       0x00000000, 0xbf8a0000,
+       0xb9783a05, 0x80788178,
        0xbf0d9972, 0xbf850002,
        0x8f788978, 0xbf820001,
        0x8f788a78, 0xb96e1e06,
        0x8f6e8a6e, 0x80786e78,
        0x8078ff78, 0x00000200,
-       0x80f8ff78, 0x00000050,
        0xbef603ff, 0x01000000,
-       0xbefc03ff, 0x0000006c,
-       0x80f89078, 0xf429003a,
-       0xf0000000, 0xbf8cc07f,
-       0x80fc847c, 0xbf800000,
-       0xbe803100, 0xbe823102,
-       0x80f8a078, 0xf42d003a,
-       0xf0000000, 0xbf8cc07f,
-       0x80fc887c, 0xbf800000,
-       0xbe803100, 0xbe823102,
-       0xbe843104, 0xbe863106,
-       0x80f8c078, 0xf431003a,
-       0xf0000000, 0xbf8cc07f,
-       0x80fc907c, 0xbf800000,
-       0xbe803100, 0xbe823102,
-       0xbe843104, 0xbe863106,
-       0xbe883108, 0xbe8a310a,
-       0xbe8c310c, 0xbe8e310e,
-       0xbf06807c, 0xbf84fff0,
-       0xba80f801, 0x00000000,
-       0xbf8a0000, 0xb9782a05,
-       0x80788178, 0xbf0d9972,
-       0xbf850002, 0x8f788978,
-       0xbf820001, 0x8f788a78,
-       0xb96e1e06, 0x8f6e8a6e,
-       0x80786e78, 0x8078ff78,
-       0x00000200, 0xbef603ff,
-       0x01000000, 0xf4211bfa,
+       0xf4211bfa, 0xf0000000,
+       0x80788478, 0xf4211b3a,
        0xf0000000, 0x80788478,
-       0xf4211b3a, 0xf0000000,
-       0x80788478, 0xf4211b7a,
+       0xf4211b7a, 0xf0000000,
+       0x80788478, 0xf4211c3a,
        0xf0000000, 0x80788478,
-       0xf4211c3a, 0xf0000000,
-       0x80788478, 0xf4211c7a,
+       0xf4211c7a, 0xf0000000,
+       0x80788478, 0xf4211eba,
        0xf0000000, 0x80788478,
-       0xf4211eba, 0xf0000000,
-       0x80788478, 0xf4211efa,
+       0xf4211efa, 0xf0000000,
+       0x80788478, 0xf4211e7a,
        0xf0000000, 0x80788478,
-       0xf4211e7a, 0xf0000000,
-       0x80788478, 0xf4211cfa,
+       0xf4211cfa, 0xf0000000,
+       0x80788478, 0xf4211bba,
        0xf0000000, 0x80788478,
+       0xbf8cc07f, 0xb9eef814,
        0xf4211bba, 0xf0000000,
        0x80788478, 0xbf8cc07f,
-       0xb9eef814, 0xf4211bba,
-       0xf0000000, 0x80788478,
-       0xbf8cc07f, 0xb9eef815,
-       0xbefc036f, 0xbefe0370,
-       0xbeff0371, 0x876f7bff,
-       0x000003ff, 0xb9ef4803,
-       0x876f7bff, 0xfffff800,
-       0x906f8b6f, 0xb9efa2c3,
-       0xb9f3f801, 0xb96e2a05,
-       0x806e816e, 0xbf0d9972,
-       0xbf850002, 0x8f6e896e,
-       0xbf820001, 0x8f6e8a6e,
+       0xb9eef815, 0xbefc036f,
+       0xbefe0370, 0xbeff0371,
+       0x876f7bff, 0x000003ff,
+       0xb9ef4803, 0x876f7bff,
+       0xfffff800, 0x906f8b6f,
+       0xb9efa2c3, 0xb9f3f801,
+       0xb96e2a05, 0x806e816e,
+       0xbf0d9972, 0xbf850002,
+       0x8f6e896e, 0xbf820001,
+       0x8f6e8a6e, 0xb96f1e06,
+       0x8f6f8a6f, 0x806e6f6e,
        0x806eff6e, 0x00000200,
        0x806e746e, 0x826f8075,
        0x876fff6f, 0x0000ffff,
@@ -2463,3 +2494,440 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0xbf9f0000, 0xbf9f0000,
        0xbf9f0000, 0x00000000,
 };
+
+static const uint32_t cwsr_trap_gfx11_hex[] = {
+       0xbfa00001, 0xbfa0021b,
+       0xb0804006, 0xb8f8f802,
+       0x91788678, 0xb8fbf803,
+       0x8b6eff78, 0x00002000,
+       0xbfa10009, 0x8b6eff6d,
+       0x00ff0000, 0xbfa2001e,
+       0x8b6eff7b, 0x00000400,
+       0xbfa20041, 0xbf830010,
+       0xb8fbf803, 0xbfa0fffa,
+       0x8b6eff7b, 0x00000900,
+       0xbfa20015, 0x8b6eff7b,
+       0x000071ff, 0xbfa10008,
+       0x8b6fff7b, 0x00007080,
+       0xbfa10001, 0xbeee1287,
+       0xb8eff801, 0x846e8c6e,
+       0x8b6e6f6e, 0xbfa2000a,
+       0x8b6eff6d, 0x00ff0000,
+       0xbfa20007, 0xb8eef801,
+       0x8b6eff6e, 0x00000800,
+       0xbfa20003, 0x8b6eff7b,
+       0x00000400, 0xbfa20026,
+       0xbefa4d82, 0xbf89fc07,
+       0x84fa887a, 0xf4005bbd,
+       0xf8000010, 0xbf89fc07,
+       0x846e976e, 0x9177ff77,
+       0x00800000, 0x8c776e77,
+       0xf4045bbd, 0xf8000000,
+       0xbf89fc07, 0xf4045ebd,
+       0xf8000008, 0xbf89fc07,
+       0x8bee6e6e, 0xbfa10001,
+       0xbe80486e, 0x8b6eff6d,
+       0x01ff0000, 0xbfa20005,
+       0x8c78ff78, 0x00002000,
+       0x80ec886c, 0x82ed806d,
+       0xbfa00005, 0x8b6eff6d,
+       0x01000000, 0xbfa20002,
+       0x806c846c, 0x826d806d,
+       0x8b6dff6d, 0x0000ffff,
+       0x8bfe7e7e, 0x8bea6a6a,
+       0xb978f802, 0xbe804a6c,
+       0x8b6dff6d, 0x0000ffff,
+       0xbefa0080, 0xb97a0283,
+       0xbeee007e, 0xbeef007f,
+       0xbefe0180, 0xbefe4d84,
+       0xbf89fc07, 0x8b7aff7f,
+       0x04000000, 0x847a857a,
+       0x8c6d7a6d, 0xbefa007e,
+       0x8b7bff7f, 0x0000ffff,
+       0xbefe00c1, 0xbeff00c1,
+       0xdca6c000, 0x007a0000,
+       0x7e000280, 0xbefe007a,
+       0xbeff007b, 0xb8fb02dc,
+       0x847b997b, 0xb8fa3b05,
+       0x807a817a, 0xbf0d997b,
+       0xbfa20002, 0x847a897a,
+       0xbfa00001, 0x847a8a7a,
+       0xb8fb1e06, 0x847b8a7b,
+       0x807a7b7a, 0x8b7bff7f,
+       0x0000ffff, 0x807aff7a,
+       0x00000200, 0x807a7e7a,
+       0x827b807b, 0xd7610000,
+       0x00010870, 0xd7610000,
+       0x00010a71, 0xd7610000,
+       0x00010c72, 0xd7610000,
+       0x00010e73, 0xd7610000,
+       0x00011074, 0xd7610000,
+       0x00011275, 0xd7610000,
+       0x00011476, 0xd7610000,
+       0x00011677, 0xd7610000,
+       0x00011a79, 0xd7610000,
+       0x00011c7e, 0xd7610000,
+       0x00011e7f, 0xbefe00ff,
+       0x00003fff, 0xbeff0080,
+       0xdca6c040, 0x007a0000,
+       0xd760007a, 0x00011d00,
+       0xd760007b, 0x00011f00,
+       0xbefe007a, 0xbeff007b,
+       0xbef4007e, 0x8b75ff7f,
+       0x0000ffff, 0x8c75ff75,
+       0x00040000, 0xbef60080,
+       0xbef700ff, 0x10807fac,
+       0xbef1007d, 0xbef00080,
+       0xb8f302dc, 0x84739973,
+       0xbefe00c1, 0x857d9973,
+       0x8b7d817d, 0xbf06817d,
+       0xbfa20002, 0xbeff0080,
+       0xbfa00002, 0xbeff00c1,
+       0xbfa00009, 0xbef600ff,
+       0x01000000, 0xe0685080,
+       0x701d0100, 0xe0685100,
+       0x701d0200, 0xe0685180,
+       0x701d0300, 0xbfa00008,
+       0xbef600ff, 0x01000000,
+       0xe0685100, 0x701d0100,
+       0xe0685200, 0x701d0200,
+       0xe0685300, 0x701d0300,
+       0xb8f03b05, 0x80708170,
+       0xbf0d9973, 0xbfa20002,
+       0x84708970, 0xbfa00001,
+       0x84708a70, 0xb8fa1e06,
+       0x847a8a7a, 0x80707a70,
+       0x8070ff70, 0x00000200,
+       0xbef600ff, 0x01000000,
+       0x7e000280, 0x7e020280,
+       0x7e040280, 0xbefd0080,
+       0xd7610002, 0x0000fa71,
+       0x807d817d, 0xd7610002,
+       0x0000fa6c, 0x807d817d,
+       0x917aff6d, 0x80000000,
+       0xd7610002, 0x0000fa7a,
+       0x807d817d, 0xd7610002,
+       0x0000fa6e, 0x807d817d,
+       0xd7610002, 0x0000fa6f,
+       0x807d817d, 0xd7610002,
+       0x0000fa78, 0x807d817d,
+       0xb8faf803, 0xd7610002,
+       0x0000fa7a, 0x807d817d,
+       0xd7610002, 0x0000fa7b,
+       0x807d817d, 0xb8f1f801,
+       0xd7610002, 0x0000fa71,
+       0x807d817d, 0xb8f1f814,
+       0xd7610002, 0x0000fa71,
+       0x807d817d, 0xb8f1f815,
+       0xd7610002, 0x0000fa71,
+       0x807d817d, 0xbefe00ff,
+       0x0000ffff, 0xbeff0080,
+       0xe0685000, 0x701d0200,
+       0xbefe00c1, 0xb8f03b05,
+       0x80708170, 0xbf0d9973,
+       0xbfa20002, 0x84708970,
+       0xbfa00001, 0x84708a70,
+       0xb8fa1e06, 0x847a8a7a,
+       0x80707a70, 0xbef600ff,
+       0x01000000, 0xbef90080,
+       0xbefd0080, 0xbf800000,
+       0xbe804100, 0xbe824102,
+       0xbe844104, 0xbe864106,
+       0xbe884108, 0xbe8a410a,
+       0xbe8c410c, 0xbe8e410e,
+       0xd7610002, 0x0000f200,
+       0x80798179, 0xd7610002,
+       0x0000f201, 0x80798179,
+       0xd7610002, 0x0000f202,
+       0x80798179, 0xd7610002,
+       0x0000f203, 0x80798179,
+       0xd7610002, 0x0000f204,
+       0x80798179, 0xd7610002,
+       0x0000f205, 0x80798179,
+       0xd7610002, 0x0000f206,
+       0x80798179, 0xd7610002,
+       0x0000f207, 0x80798179,
+       0xd7610002, 0x0000f208,
+       0x80798179, 0xd7610002,
+       0x0000f209, 0x80798179,
+       0xd7610002, 0x0000f20a,
+       0x80798179, 0xd7610002,
+       0x0000f20b, 0x80798179,
+       0xd7610002, 0x0000f20c,
+       0x80798179, 0xd7610002,
+       0x0000f20d, 0x80798179,
+       0xd7610002, 0x0000f20e,
+       0x80798179, 0xd7610002,
+       0x0000f20f, 0x80798179,
+       0xbf06a079, 0xbfa10006,
+       0xe0685000, 0x701d0200,
+       0x8070ff70, 0x00000080,
+       0xbef90080, 0x7e040280,
+       0x807d907d, 0xbf0aff7d,
+       0x00000060, 0xbfa2ffbc,
+       0xbe804100, 0xbe824102,
+       0xbe844104, 0xbe864106,
+       0xbe884108, 0xbe8a410a,
+       0xd7610002, 0x0000f200,
+       0x80798179, 0xd7610002,
+       0x0000f201, 0x80798179,
+       0xd7610002, 0x0000f202,
+       0x80798179, 0xd7610002,
+       0x0000f203, 0x80798179,
+       0xd7610002, 0x0000f204,
+       0x80798179, 0xd7610002,
+       0x0000f205, 0x80798179,
+       0xd7610002, 0x0000f206,
+       0x80798179, 0xd7610002,
+       0x0000f207, 0x80798179,
+       0xd7610002, 0x0000f208,
+       0x80798179, 0xd7610002,
+       0x0000f209, 0x80798179,
+       0xd7610002, 0x0000f20a,
+       0x80798179, 0xd7610002,
+       0x0000f20b, 0x80798179,
+       0xe0685000, 0x701d0200,
+       0xbefe00c1, 0x857d9973,
+       0x8b7d817d, 0xbf06817d,
+       0xbfa20002, 0xbeff0080,
+       0xbfa00001, 0xbeff00c1,
+       0xb8fb4306, 0x8b7bc17b,
+       0xbfa10044, 0xbfbd0000,
+       0x8b7aff6d, 0x80000000,
+       0xbfa10040, 0x847b867b,
+       0x847b827b, 0xbef6007b,
+       0xb8f03b05, 0x80708170,
+       0xbf0d9973, 0xbfa20002,
+       0x84708970, 0xbfa00001,
+       0x84708a70, 0xb8fa1e06,
+       0x847a8a7a, 0x80707a70,
+       0x8070ff70, 0x00000200,
+       0x8070ff70, 0x00000080,
+       0xbef600ff, 0x01000000,
+       0xd71f0000, 0x000100c1,
+       0xd7200000, 0x000200c1,
+       0x16000084, 0x857d9973,
+       0x8b7d817d, 0xbf06817d,
+       0xbefd0080, 0xbfa20012,
+       0xbe8300ff, 0x00000080,
+       0xbf800000, 0xbf800000,
+       0xbf800000, 0xd8d80000,
+       0x01000000, 0xbf890000,
+       0xe0685000, 0x701d0100,
+       0x807d037d, 0x80700370,
+       0xd5250000, 0x0001ff00,
+       0x00000080, 0xbf0a7b7d,
+       0xbfa2fff4, 0xbfa00011,
+       0xbe8300ff, 0x00000100,
+       0xbf800000, 0xbf800000,
+       0xbf800000, 0xd8d80000,
+       0x01000000, 0xbf890000,
+       0xe0685000, 0x701d0100,
+       0x807d037d, 0x80700370,
+       0xd5250000, 0x0001ff00,
+       0x00000100, 0xbf0a7b7d,
+       0xbfa2fff4, 0xbefe00c1,
+       0x857d9973, 0x8b7d817d,
+       0xbf06817d, 0xbfa20004,
+       0xbef000ff, 0x00000200,
+       0xbeff0080, 0xbfa00003,
+       0xbef000ff, 0x00000400,
+       0xbeff00c1, 0xb8fb3b05,
+       0x807b817b, 0x847b827b,
+       0x857d9973, 0x8b7d817d,
+       0xbf06817d, 0xbfa20017,
+       0xbef600ff, 0x01000000,
+       0xbefd0084, 0xbf0a7b7d,
+       0xbfa10037, 0x7e008700,
+       0x7e028701, 0x7e048702,
+       0x7e068703, 0xe0685000,
+       0x701d0000, 0xe0685080,
+       0x701d0100, 0xe0685100,
+       0x701d0200, 0xe0685180,
+       0x701d0300, 0x807d847d,
+       0x8070ff70, 0x00000200,
+       0xbf0a7b7d, 0xbfa2ffef,
+       0xbfa00025, 0xbef600ff,
+       0x01000000, 0xbefd0084,
+       0xbf0a7b7d, 0xbfa10011,
+       0x7e008700, 0x7e028701,
+       0x7e048702, 0x7e068703,
+       0xe0685000, 0x701d0000,
+       0xe0685100, 0x701d0100,
+       0xe0685200, 0x701d0200,
+       0xe0685300, 0x701d0300,
+       0x807d847d, 0x8070ff70,
+       0x00000400, 0xbf0a7b7d,
+       0xbfa2ffef, 0xb8fb1e06,
+       0x8b7bc17b, 0xbfa1000c,
+       0x847b837b, 0x807b7d7b,
+       0xbefe00c1, 0xbeff0080,
+       0x7e008700, 0xe0685000,
+       0x701d0000, 0x807d817d,
+       0x8070ff70, 0x00000080,
+       0xbf0a7b7d, 0xbfa2fff8,
+       0xbfa00141, 0xbef4007e,
+       0x8b75ff7f, 0x0000ffff,
+       0x8c75ff75, 0x00040000,
+       0xbef60080, 0xbef700ff,
+       0x10807fac, 0xb8f202dc,
+       0x84729972, 0x8b6eff7f,
+       0x04000000, 0xbfa1003a,
+       0xbefe00c1, 0x857d9972,
+       0x8b7d817d, 0xbf06817d,
+       0xbfa20002, 0xbeff0080,
+       0xbfa00001, 0xbeff00c1,
+       0xb8ef4306, 0x8b6fc16f,
+       0xbfa1002f, 0x846f866f,
+       0x846f826f, 0xbef6006f,
+       0xb8f83b05, 0x80788178,
+       0xbf0d9972, 0xbfa20002,
+       0x84788978, 0xbfa00001,
+       0x84788a78, 0xb8ee1e06,
+       0x846e8a6e, 0x80786e78,
+       0x8078ff78, 0x00000200,
+       0x8078ff78, 0x00000080,
+       0xbef600ff, 0x01000000,
+       0x857d9972, 0x8b7d817d,
+       0xbf06817d, 0xbefd0080,
+       0xbfa2000c, 0xe0500000,
+       0x781d0000, 0xbf8903f7,
+       0xdac00000, 0x00000000,
+       0x807dff7d, 0x00000080,
+       0x8078ff78, 0x00000080,
+       0xbf0a6f7d, 0xbfa2fff5,
+       0xbfa0000b, 0xe0500000,
+       0x781d0000, 0xbf8903f7,
+       0xdac00000, 0x00000000,
+       0x807dff7d, 0x00000100,
+       0x8078ff78, 0x00000100,
+       0xbf0a6f7d, 0xbfa2fff5,
+       0xbef80080, 0xbefe00c1,
+       0x857d9972, 0x8b7d817d,
+       0xbf06817d, 0xbfa20002,
+       0xbeff0080, 0xbfa00001,
+       0xbeff00c1, 0xb8ef3b05,
+       0x806f816f, 0x846f826f,
+       0x857d9972, 0x8b7d817d,
+       0xbf06817d, 0xbfa20024,
+       0xbef600ff, 0x01000000,
+       0xbeee0078, 0x8078ff78,
+       0x00000200, 0xbefd0084,
+       0xbf0a6f7d, 0xbfa10050,
+       0xe0505000, 0x781d0000,
+       0xe0505080, 0x781d0100,
+       0xe0505100, 0x781d0200,
+       0xe0505180, 0x781d0300,
+       0xbf8903f7, 0x7e008500,
+       0x7e028501, 0x7e048502,
+       0x7e068503, 0x807d847d,
+       0x8078ff78, 0x00000200,
+       0xbf0a6f7d, 0xbfa2ffee,
+       0xe0505000, 0x6e1d0000,
+       0xe0505080, 0x6e1d0100,
+       0xe0505100, 0x6e1d0200,
+       0xe0505180, 0x6e1d0300,
+       0xbf8903f7, 0xbfa00034,
+       0xbef600ff, 0x01000000,
+       0xbeee0078, 0x8078ff78,
+       0x00000400, 0xbefd0084,
+       0xbf0a6f7d, 0xbfa10012,
+       0xe0505000, 0x781d0000,
+       0xe0505100, 0x781d0100,
+       0xe0505200, 0x781d0200,
+       0xe0505300, 0x781d0300,
+       0xbf8903f7, 0x7e008500,
+       0x7e028501, 0x7e048502,
+       0x7e068503, 0x807d847d,
+       0x8078ff78, 0x00000400,
+       0xbf0a6f7d, 0xbfa2ffee,
+       0xb8ef1e06, 0x8b6fc16f,
+       0xbfa1000e, 0x846f836f,
+       0x806f7d6f, 0xbefe00c1,
+       0xbeff0080, 0xe0505000,
+       0x781d0000, 0xbf8903f7,
+       0x7e008500, 0x807d817d,
+       0x8078ff78, 0x00000080,
+       0xbf0a6f7d, 0xbfa2fff7,
+       0xbeff00c1, 0xe0505000,
+       0x6e1d0000, 0xe0505100,
+       0x6e1d0100, 0xe0505200,
+       0x6e1d0200, 0xe0505300,
+       0x6e1d0300, 0xbf8903f7,
+       0xb8f83b05, 0x80788178,
+       0xbf0d9972, 0xbfa20002,
+       0x84788978, 0xbfa00001,
+       0x84788a78, 0xb8ee1e06,
+       0x846e8a6e, 0x80786e78,
+       0x8078ff78, 0x00000200,
+       0x80f8ff78, 0x00000050,
+       0xbef600ff, 0x01000000,
+       0xbefd00ff, 0x0000006c,
+       0x80f89078, 0xf428403a,
+       0xf0000000, 0xbf89fc07,
+       0x80fd847d, 0xbf800000,
+       0xbe804300, 0xbe824302,
+       0x80f8a078, 0xf42c403a,
+       0xf0000000, 0xbf89fc07,
+       0x80fd887d, 0xbf800000,
+       0xbe804300, 0xbe824302,
+       0xbe844304, 0xbe864306,
+       0x80f8c078, 0xf430403a,
+       0xf0000000, 0xbf89fc07,
+       0x80fd907d, 0xbf800000,
+       0xbe804300, 0xbe824302,
+       0xbe844304, 0xbe864306,
+       0xbe884308, 0xbe8a430a,
+       0xbe8c430c, 0xbe8e430e,
+       0xbf06807d, 0xbfa1fff0,
+       0xb980f801, 0x00000000,
+       0xbfbd0000, 0xb8f83b05,
+       0x80788178, 0xbf0d9972,
+       0xbfa20002, 0x84788978,
+       0xbfa00001, 0x84788a78,
+       0xb8ee1e06, 0x846e8a6e,
+       0x80786e78, 0x8078ff78,
+       0x00000200, 0xbef600ff,
+       0x01000000, 0xf4205bfa,
+       0xf0000000, 0x80788478,
+       0xf4205b3a, 0xf0000000,
+       0x80788478, 0xf4205b7a,
+       0xf0000000, 0x80788478,
+       0xf4205c3a, 0xf0000000,
+       0x80788478, 0xf4205c7a,
+       0xf0000000, 0x80788478,
+       0xf4205eba, 0xf0000000,
+       0x80788478, 0xf4205efa,
+       0xf0000000, 0x80788478,
+       0xf4205e7a, 0xf0000000,
+       0x80788478, 0xf4205cfa,
+       0xf0000000, 0x80788478,
+       0xf4205bba, 0xf0000000,
+       0x80788478, 0xbf89fc07,
+       0xb96ef814, 0xf4205bba,
+       0xf0000000, 0x80788478,
+       0xbf89fc07, 0xb96ef815,
+       0xbefd006f, 0xbefe0070,
+       0xbeff0071, 0x8b6f7bff,
+       0x000003ff, 0xb96f4803,
+       0x8b6f7bff, 0xfffff800,
+       0x856f8b6f, 0xb96fa2c3,
+       0xb973f801, 0xb8ee3b05,
+       0x806e816e, 0xbf0d9972,
+       0xbfa20002, 0x846e896e,
+       0xbfa00001, 0x846e8a6e,
+       0xb8ef1e06, 0x846f8a6f,
+       0x806e6f6e, 0x806eff6e,
+       0x00000200, 0x806e746e,
+       0x826f8075, 0x8b6fff6f,
+       0x0000ffff, 0xf4085c37,
+       0xf8000050, 0xf4085d37,
+       0xf8000060, 0xf4005e77,
+       0xf8000074, 0xbf89fc07,
+       0x8b6dff6d, 0x0000ffff,
+       0x8bfe7e7e, 0x8bea6a6a,
+       0xb97af802, 0xbe804a6c,
+       0xbfb00000, 0xbf9f0000,
+       0xbf9f0000, 0xbf9f0000,
+       0xbf9f0000, 0xbf9f0000,
+};
index 5081f91..250ab00 100644 (file)
 /* To compile this assembly code:
  *
  * Navi1x:
- *   cpp -DASIC_TARGET_NAVI1X=1 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
- *   sp3-nv1x nv1x.sp3 -hex nv1x.hex
+ *   cpp -DASIC_FAMILY=CHIP_NAVI10 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
+ *   sp3 nv1x.sp3 -hex nv1x.hex
  *
- * Others:
- *   cpp -DASIC_TARGET_NAVI1X=0 cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
- *   sp3-gfx10 gfx10.sp3 -hex gfx10.hex
+ * gfx10:
+ *   cpp -DASIC_FAMILY=CHIP_SIENNA_CICHLID cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
+ *   sp3 gfx10.sp3 -hex gfx10.hex
+ *
+ * gfx11:
+ *   cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
+ *   sp3 gfx11.sp3 -hex gfx11.hex
  */
 
-#define NO_SQC_STORE !ASIC_TARGET_NAVI1X
+#define CHIP_NAVI10 26
+#define CHIP_SIENNA_CICHLID 30
+#define CHIP_PLUM_BONITO 36
+
+#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
+#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
+#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
+#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
 
 var SINGLE_STEP_MISSED_WORKAROUND              = 1     //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
 
-var SQ_WAVE_STATUS_INST_ATC_SHIFT              = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK               = 0x00800000
 var SQ_WAVE_STATUS_SPI_PRIO_MASK               = 0x00000006
 var SQ_WAVE_STATUS_HALT_MASK                   = 0x2000
+var SQ_WAVE_STATUS_ECC_ERR_MASK                        = 0x20000
 
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT           = 12
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE            = 9
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT          = 8
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE           = 6
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT          = 24
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE           = 4
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE           = 8
 var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT   = 24
 var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE    = 4
 var SQ_WAVE_IB_STS2_WAVE64_SHIFT               = 11
 var SQ_WAVE_IB_STS2_WAVE64_SIZE                        = 1
 
+#if ASIC_FAMILY < CHIP_PLUM_BONITO
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT          = 8
+#else
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT          = 12
+#endif
+
 var SQ_WAVE_TRAPSTS_SAVECTX_MASK               = 0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK                  = 0x1FF
+var SQ_WAVE_TRAPSTS_EXCP_MASK                  = 0x1FF
 var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT              = 10
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK            = 0x80
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT           = 7
 var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK              = 0x100
 var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT             = 8
 var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK           = 0x3FF
@@ -63,46 +78,37 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK               = 0xFFFFF800
 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT         = 11
 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE          = 21
 var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK          = 0x800
+var SQ_WAVE_TRAPSTS_EXCP_HI_MASK               = 0x7000
+
+var SQ_WAVE_MODE_EXCP_EN_SHIFT                 = 12
+var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT      = 19
 
-var SQ_WAVE_IB_STS_RCNT_SHIFT                  = 16
 var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT          = 15
 var SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT           = 25
-var SQ_WAVE_IB_STS_REPLAY_W64H_SIZE            = 1
 var SQ_WAVE_IB_STS_REPLAY_W64H_MASK            = 0x02000000
-var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE           = 1
-var SQ_WAVE_IB_STS_RCNT_SIZE                   = 6
 var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK      = 0x003F8000
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG  = 0x00007FFF
 
 var SQ_WAVE_MODE_DEBUG_EN_MASK                 = 0x800
 
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT                        = 24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT              = 27
-
 // bits [31:24] unused by SPI debug data
 var TTMP11_SAVE_REPLAY_W64H_SHIFT              = 31
 var TTMP11_SAVE_REPLAY_W64H_MASK               = 0x80000000
 var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT                = 24
 var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK         = 0x7F000000
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT            = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK             = 0x800000
 
 // SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
 // when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
 var S_SAVE_BUF_RSRC_WORD1_STRIDE               = 0x00040000
 var S_SAVE_BUF_RSRC_WORD3_MISC                 = 0x10807FAC
-
-var S_SAVE_SPI_INIT_ATC_MASK                   = 0x08000000
-var S_SAVE_SPI_INIT_ATC_SHIFT                  = 27
-var S_SAVE_SPI_INIT_MTYPE_MASK                 = 0x70000000
-var S_SAVE_SPI_INIT_MTYPE_SHIFT                        = 28
+var S_SAVE_PC_HI_TRAP_ID_MASK                  = 0x00FF0000
+var S_SAVE_PC_HI_HT_MASK                       = 0x01000000
 var S_SAVE_SPI_INIT_FIRST_WAVE_MASK            = 0x04000000
 var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT           = 26
 
-var S_SAVE_PC_HI_RCNT_SHIFT                    = 26
-var S_SAVE_PC_HI_RCNT_MASK                     = 0xFC000000
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT            = 25
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK             = 0x02000000
-var S_SAVE_PC_HI_REPLAY_W64H_SHIFT             = 24
-var S_SAVE_PC_HI_REPLAY_W64H_MASK              = 0x01000000
+var S_SAVE_PC_HI_FIRST_WAVE_MASK               = 0x80000000
+var S_SAVE_PC_HI_FIRST_WAVE_SHIFT              = 31
 
 var s_sgpr_save_num                            = 108
 
@@ -130,19 +136,10 @@ var s_save_ttmps_hi                               = s_save_trapsts
 var S_RESTORE_BUF_RSRC_WORD1_STRIDE            = S_SAVE_BUF_RSRC_WORD1_STRIDE
 var S_RESTORE_BUF_RSRC_WORD3_MISC              = S_SAVE_BUF_RSRC_WORD3_MISC
 
-var S_RESTORE_SPI_INIT_ATC_MASK                        = 0x08000000
-var S_RESTORE_SPI_INIT_ATC_SHIFT               = 27
-var S_RESTORE_SPI_INIT_MTYPE_MASK              = 0x70000000
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT             = 28
 var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK         = 0x04000000
 var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT                = 26
 var S_WAVE_SIZE                                        = 25
 
-var S_RESTORE_PC_HI_RCNT_SHIFT                 = S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK                  = S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT         = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK          = S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
 var s_restore_spi_init_lo                      = exec_lo
 var s_restore_spi_init_hi                      = exec_hi
 var s_restore_mem_offset                       = ttmp12
@@ -179,84 +176,133 @@ L_JUMP_TO_RESTORE:
 
 L_SKIP_RESTORE:
        s_getreg_b32    s_save_status, hwreg(HW_REG_STATUS)                     //save STATUS since we will change SCC
-       s_andn2_b32     s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK
 
-if SINGLE_STEP_MISSED_WORKAROUND
-       // No single step exceptions if MODE.DEBUG_EN=0.
-       s_getreg_b32    ttmp2, hwreg(HW_REG_MODE)
-       s_and_b32       ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
-       s_cbranch_scc0  L_NO_SINGLE_STEP_WORKAROUND
+       // Clear SPI_PRIO: do not save with elevated priority.
+       // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+       s_andn2_b32     s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_ECC_ERR_MASK
+
+       s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
 
-       // Second-level trap already handled exception if STATUS.HALT=1.
        s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+       s_cbranch_scc0  L_NOT_HALTED
+
+L_HALTED:
+       // Host trap may occur while wave is halted.
+       s_and_b32       ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+       s_cbranch_scc1  L_FETCH_2ND_TRAP
 
+L_CHECK_SAVE:
+       s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+       s_cbranch_scc1  L_SAVE
+
+       // Wave is halted but neither host trap nor SAVECTX is raised.
+       // Caused by instruction fetch memory violation.
+       // Spin wait until context saved to prevent interrupt storm.
+       s_sleep         0x10
+       s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+       s_branch        L_CHECK_SAVE
+
+L_NOT_HALTED:
+       // Let second-level handle non-SAVECTX exception or trap.
+       // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+       // Check non-maskable exceptions. memory_violation, illegal_instruction
+       // and xnack_error exceptions always cause the wave to enter the trap
+       // handler.
+       s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+       s_cbranch_scc1  L_FETCH_2ND_TRAP
+
+       // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+       // Maskable exceptions only cause the wave to enter the trap handler if
+       // their respective bit in mode.excp_en is set.
+       s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+       s_cbranch_scc0  L_CHECK_TRAP_ID
+
+       s_and_b32       ttmp3, s_save_trapsts, SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+       s_cbranch_scc0  L_NOT_ADDR_WATCH
+       s_bitset1_b32   ttmp2, SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT // Check all addr_watch[123] exceptions against excp_en.addr_watch
+
+L_NOT_ADDR_WATCH:
+       s_getreg_b32    ttmp3, hwreg(HW_REG_MODE)
+       s_lshl_b32      ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+       s_and_b32       ttmp2, ttmp2, ttmp3
+       s_cbranch_scc1  L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+       // Check trap_id != 0
+       s_and_b32       ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+       s_cbranch_scc1  L_FETCH_2ND_TRAP
+
+if SINGLE_STEP_MISSED_WORKAROUND
        // Prioritize single step exception over context save.
        // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
-       s_cbranch_scc0  L_FETCH_2ND_TRAP
-
-L_NO_SINGLE_STEP_WORKAROUND:
+       s_getreg_b32    ttmp2, hwreg(HW_REG_MODE)
+       s_and_b32       ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+       s_cbranch_scc1  L_FETCH_2ND_TRAP
 end
 
-
-       s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
-       s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK     //check whether this is for save
+       s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
        s_cbranch_scc1  L_SAVE
 
-       // If STATUS.MEM_VIOL is asserted then halt the wave to prevent
-       // the exception raising again and blocking context save.
-       s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
-       s_cbranch_scc0  L_FETCH_2ND_TRAP
-       s_or_b32        s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
-
 L_FETCH_2ND_TRAP:
-
-#if ASIC_TARGET_NAVI1X
-       // Preserve and clear scalar XNACK state before issuing scalar loads.
-       // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
-       // unused space ttmp11[31:24].
-       s_andn2_b32     ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
-       s_getreg_b32    ttmp2, hwreg(HW_REG_IB_STS)
-       s_and_b32       ttmp3, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
-       s_lshl_b32      ttmp3, ttmp3, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
-       s_or_b32        ttmp11, ttmp11, ttmp3
-       s_and_b32       ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
-       s_lshl_b32      ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
-       s_or_b32        ttmp11, ttmp11, ttmp3
-       s_andn2_b32     ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
-       s_setreg_b32    hwreg(HW_REG_IB_STS), ttmp2
+#if HAVE_XNACK
+       save_and_clear_ib_sts(ttmp14, ttmp15)
 #endif
 
        // Read second-level TBA/TMA from first-level TMA and jump if available.
        // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
        // ttmp12 holds SQ_WAVE_STATUS
+#if HAVE_SENDMSG_RTN
+       s_sendmsg_rtn_b64       [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
+       s_waitcnt       lgkmcnt(0)
+#else
        s_getreg_b32    ttmp14, hwreg(HW_REG_SHADER_TMA_LO)
        s_getreg_b32    ttmp15, hwreg(HW_REG_SHADER_TMA_HI)
+#endif
        s_lshl_b64      [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+       s_load_dword    ttmp2, [ttmp14, ttmp15], 0x10 glc:1                     // debug trap enabled flag
+       s_waitcnt       lgkmcnt(0)
+       s_lshl_b32      ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+       s_andn2_b32     ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+       s_or_b32        ttmp11, ttmp11, ttmp2
+
        s_load_dwordx2  [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1             // second-level TBA
        s_waitcnt       lgkmcnt(0)
        s_load_dwordx2  [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1           // second-level TMA
        s_waitcnt       lgkmcnt(0)
+
        s_and_b64       [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
        s_cbranch_scc0  L_NO_NEXT_TRAP                                          // second-level trap handler not been set
        s_setpc_b64     [ttmp2, ttmp3]                                          // jump to second-level trap handler
 
 L_NO_NEXT_TRAP:
-       s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
-       s_and_b32       s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK
-       s_cbranch_scc1  L_EXCP_CASE                                             // Exception, jump back to the shader program directly.
-       s_add_u32       ttmp0, ttmp0, 4                                         // S_TRAP case, add 4 to ttmp0
-       s_addc_u32      ttmp1, ttmp1, 0
-L_EXCP_CASE:
+       // If not caused by trap then halt wave to prevent re-entry.
+       s_and_b32       ttmp2, s_save_pc_hi, (S_SAVE_PC_HI_TRAP_ID_MASK|S_SAVE_PC_HI_HT_MASK)
+       s_cbranch_scc1  L_TRAP_CASE
+       s_or_b32        s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+       // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
+       // Rewind the PC to prevent this from occurring.
+       s_sub_u32       ttmp0, ttmp0, 0x8
+       s_subb_u32      ttmp1, ttmp1, 0x0
+
+       s_branch        L_EXIT_TRAP
+
+L_TRAP_CASE:
+       // Host trap will not cause trap re-entry.
+       s_and_b32       ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+       s_cbranch_scc1  L_EXIT_TRAP
+
+       // Advance past trap instruction to prevent re-entry.
+       s_add_u32       ttmp0, ttmp0, 0x4
+       s_addc_u32      ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
        s_and_b32       ttmp1, ttmp1, 0xFFFF
 
-#if ASIC_TARGET_NAVI1X
-       // Restore SQ_WAVE_IB_STS.
-       s_lshr_b32      ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
-       s_and_b32       ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
-       s_lshr_b32      ttmp2, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
-       s_and_b32       ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
-       s_or_b32        ttmp2, ttmp2, ttmp3
-       s_setreg_b32    hwreg(HW_REG_IB_STS), ttmp2
+#if HAVE_XNACK
+       restore_ib_sts(ttmp14, ttmp15)
 #endif
 
        // Restore SQ_WAVE_STATUS.
@@ -271,20 +317,8 @@ L_SAVE:
        s_mov_b32       s_save_tmp, 0
        s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp     //clear saveCtx bit
 
-#if ASIC_TARGET_NAVI1X
-       s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)
-       s_lshl_b32      s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
-       s_or_b32        s_save_pc_hi, s_save_pc_hi, s_save_tmp
-       s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)
-       s_lshl_b32      s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-       s_or_b32        s_save_pc_hi, s_save_pc_hi, s_save_tmp
-       s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT, SQ_WAVE_IB_STS_REPLAY_W64H_SIZE)
-       s_lshl_b32      s_save_tmp, s_save_tmp, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
-       s_or_b32        s_save_pc_hi, s_save_pc_hi, s_save_tmp
-       s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS)                        //clear RCNT and FIRST_REPLAY and REPLAY_W64H in IB_STS
-       s_and_b32       s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
-
-       s_setreg_b32    hwreg(HW_REG_IB_STS), s_save_tmp
+#if HAVE_XNACK
+       save_and_clear_ib_sts(s_save_tmp, s_save_trapsts)
 #endif
 
        /* inform SPI the readiness and wait for SPI's go signal */
@@ -292,9 +326,13 @@ L_SAVE:
        s_mov_b32       s_save_exec_hi, exec_hi
        s_mov_b64       exec, 0x0                                               //clear EXEC to get ready to receive
 
+#if HAVE_SENDMSG_RTN
+       s_sendmsg_rtn_b64       [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
+#else
        s_sendmsg       sendmsg(MSG_SAVEWAVE)                                   //send SPI a message and wait for SPI's write to EXEC
+#endif
 
-#if ASIC_TARGET_NAVI1X
+#if ASIC_FAMILY < CHIP_SIENNA_CICHLID
 L_SLEEP:
        // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause
        // SQ hang, since the 7,8th wave could not get arbit to exec inst, while
@@ -305,16 +343,57 @@ L_SLEEP:
        s_waitcnt       lgkmcnt(0)
 #endif
 
+       // Save first_wave flag so we can clear high bits of save address.
+       s_and_b32       s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+       s_lshl_b32      s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
+       s_or_b32        s_save_pc_hi, s_save_pc_hi, s_save_tmp
+
+#if NO_SQC_STORE
+       // Trap temporaries must be saved via VGPR but all VGPRs are in use.
+       // There is no ttmp space to hold the resource constant for VGPR save.
+       // Save v0 by itself since it requires only two SGPRs.
+       s_mov_b32       s_save_ttmps_lo, exec_lo
+       s_and_b32       s_save_ttmps_hi, exec_hi, 0xFFFF
+       s_mov_b32       exec_lo, 0xFFFFFFFF
+       s_mov_b32       exec_hi, 0xFFFFFFFF
+       global_store_dword_addtid       v0, [s_save_ttmps_lo, s_save_ttmps_hi] slc:1 glc:1
+       v_mov_b32       v0, 0x0
+       s_mov_b32       exec_lo, s_save_ttmps_lo
+       s_mov_b32       exec_hi, s_save_ttmps_hi
+#endif
+
        // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
-       // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+       // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
        get_wave_size(s_save_ttmps_hi)
        get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
+       get_svgpr_size_bytes(s_save_ttmps_hi)
+       s_add_u32       s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
        s_and_b32       s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
        s_add_u32       s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
        s_add_u32       s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
        s_addc_u32      s_save_ttmps_hi, s_save_ttmps_hi, 0x0
 
-#if ASIC_TARGET_NAVI1X
+#if NO_SQC_STORE
+       v_writelane_b32 v0, ttmp4, 0x4
+       v_writelane_b32 v0, ttmp5, 0x5
+       v_writelane_b32 v0, ttmp6, 0x6
+       v_writelane_b32 v0, ttmp7, 0x7
+       v_writelane_b32 v0, ttmp8, 0x8
+       v_writelane_b32 v0, ttmp9, 0x9
+       v_writelane_b32 v0, ttmp10, 0xA
+       v_writelane_b32 v0, ttmp11, 0xB
+       v_writelane_b32 v0, ttmp13, 0xD
+       v_writelane_b32 v0, exec_lo, 0xE
+       v_writelane_b32 v0, exec_hi, 0xF
+
+       s_mov_b32       exec_lo, 0x3FFF
+       s_mov_b32       exec_hi, 0x0
+       global_store_dword_addtid       v0, [s_save_ttmps_lo, s_save_ttmps_hi] inst_offset:0x40 slc:1 glc:1
+       v_readlane_b32  ttmp14, v0, 0xE
+       v_readlane_b32  ttmp15, v0, 0xF
+       s_mov_b32       exec_lo, ttmp14
+       s_mov_b32       exec_hi, ttmp15
+#else
        s_store_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x50 glc:1
        s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x60 glc:1
        s_store_dword   ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x74 glc:1
@@ -326,12 +405,6 @@ L_SLEEP:
        s_or_b32        s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
        s_mov_b32       s_save_buf_rsrc2, 0                                     //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
        s_mov_b32       s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
-       s_and_b32       s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
-       s_lshr_b32      s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
-       s_or_b32        s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp          //or ATC
-       s_and_b32       s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
-       s_lshr_b32      s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
-       s_or_b32        s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp          //or MTYPE
 
        s_mov_b32       s_save_m0, m0
 
@@ -339,7 +412,7 @@ L_SLEEP:
        s_mov_b32       s_save_mem_offset, 0x0
        get_wave_size(s_wave_size)
 
-#if ASIC_TARGET_NAVI1X
+#if HAVE_XNACK
        // Save and clear vector XNACK state late to free up SGPRs.
        s_getreg_b32    s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
        s_setreg_imm32_b32      hwreg(HW_REG_SHADER_XNACK_MASK), 0x0
@@ -361,7 +434,9 @@ L_SAVE_4VGPR_WAVE32:
 
        // VGPR Allocated in 4-GPR granularity
 
+#if !NO_SQC_STORE
        buffer_store_dword      v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+#endif
        buffer_store_dword      v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
        buffer_store_dword      v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
        buffer_store_dword      v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
@@ -372,7 +447,9 @@ L_SAVE_4VGPR_WAVE64:
 
        // VGPR Allocated in 4-GPR granularity
 
+#if !NO_SQC_STORE
        buffer_store_dword      v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+#endif
        buffer_store_dword      v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
        buffer_store_dword      v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
        buffer_store_dword      v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
@@ -397,7 +474,8 @@ L_SAVE_HWREG:
 
        write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
        write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
-       write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+       s_andn2_b32     s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+       write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
        write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)
        write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
        write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)
@@ -418,9 +496,13 @@ L_SAVE_HWREG:
        write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
 
 #if NO_SQC_STORE
-       // Write HWREG/SGPRs with 32 VGPR lanes, wave32 is common case.
+       // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+       s_mov_b32       exec_lo, 0xFFFF
        s_mov_b32       exec_hi, 0x0
        buffer_store_dword      v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+       // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
+       s_mov_b32       exec_lo, 0xFFFFFFFF
 #endif
 
        /* save SGPRs */
@@ -506,7 +588,7 @@ L_SAVE_LDS_NORMAL:
        s_cbranch_scc0  L_SAVE_LDS_DONE                                         //no lds used? jump to L_SAVE_DONE
 
        s_barrier                                                               //LDS is used? wait for other waves in the same TG
-       s_and_b32       s_save_tmp, s_wave_size, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+       s_and_b32       s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
        s_cbranch_scc0  L_SAVE_LDS_DONE
 
        // first wave do LDS save;
@@ -628,7 +710,7 @@ L_SAVE_VGPR_WAVE64:
        // VGPR store using dw burst
        s_mov_b32       m0, 0x4                                                 //VGPR initial index value =4
        s_cmp_lt_u32    m0, s_save_alloc_size
-       s_cbranch_scc0  L_SAVE_VGPR_END
+       s_cbranch_scc0  L_SAVE_SHARED_VGPR
 
 L_SAVE_VGPR_W64_LOOP:
        v_movrels_b32   v0, v0                                                  //v0 = v[0+m0]
@@ -646,6 +728,7 @@ L_SAVE_VGPR_W64_LOOP:
        s_cmp_lt_u32    m0, s_save_alloc_size                                   //scc = (m0 < s_save_alloc_size) ? 1 : 0
        s_cbranch_scc1  L_SAVE_VGPR_W64_LOOP                                    //VGPR save is complete?
 
+L_SAVE_SHARED_VGPR:
        //Below part will be the save shared vgpr part (new for gfx10)
        s_getreg_b32    s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
        s_and_b32       s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF        //shared_vgpr_size is zero?
@@ -674,12 +757,7 @@ L_RESTORE:
        s_or_b32        s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
        s_mov_b32       s_restore_buf_rsrc2, 0                                  //NUM_RECORDS initial value = 0 (in bytes)
        s_mov_b32       s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
-       s_and_b32       s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
-       s_lshr_b32      s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
-       s_or_b32        s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
-       s_and_b32       s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
-       s_lshr_b32      s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
-       s_or_b32        s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
+
        //determine it is wave32 or wave64
        get_wave_size(s_restore_size)
 
@@ -722,7 +800,13 @@ L_RESTORE_LDS_NORMAL:
        s_cbranch_scc1  L_RESTORE_LDS_LOOP_W64
 
 L_RESTORE_LDS_LOOP_W32:
+#if HAVE_BUFFER_LDS_LOAD
        buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+#else
+       buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+       s_waitcnt       vmcnt(0)
+       ds_store_addtid_b32     v0
+#endif
        s_add_u32       m0, m0, 128                                             // 128 DW
        s_add_u32       s_restore_mem_offset, s_restore_mem_offset, 128         //mem offset increased by 128DW
        s_cmp_lt_u32    m0, s_restore_alloc_size                                //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -730,7 +814,13 @@ L_RESTORE_LDS_LOOP_W32:
        s_branch        L_RESTORE_VGPR
 
 L_RESTORE_LDS_LOOP_W64:
+#if HAVE_BUFFER_LDS_LOAD
        buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+#else
+       buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+       s_waitcnt       vmcnt(0)
+       ds_store_addtid_b32     v0
+#endif
        s_add_u32       m0, m0, 256                                             // 256 DW
        s_add_u32       s_restore_mem_offset, s_restore_mem_offset, 256         //mem offset increased by 256DW
        s_cmp_lt_u32    m0, s_restore_alloc_size                                //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -765,6 +855,8 @@ L_RESTORE_VGPR_NORMAL:
        s_mov_b32       s_restore_mem_offset_save, s_restore_mem_offset         // restore start with v1, v0 will be the last
        s_add_u32       s_restore_mem_offset, s_restore_mem_offset, 128*4
        s_mov_b32       m0, 4                                                   //VGPR initial index value = 4
+       s_cmp_lt_u32    m0, s_restore_alloc_size
+       s_cbranch_scc0  L_RESTORE_SGPR
 
 L_RESTORE_VGPR_WAVE32_LOOP:
        buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
@@ -786,6 +878,7 @@ L_RESTORE_VGPR_WAVE32_LOOP:
        buffer_load_dword       v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128
        buffer_load_dword       v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2
        buffer_load_dword       v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3
+       s_waitcnt       vmcnt(0)
 
        s_branch        L_RESTORE_SGPR
 
@@ -796,6 +889,8 @@ L_RESTORE_VGPR_WAVE64:
        s_mov_b32       s_restore_mem_offset_save, s_restore_mem_offset         // restore start with v4, v0 will be the last
        s_add_u32       s_restore_mem_offset, s_restore_mem_offset, 256*4
        s_mov_b32       m0, 4                                                   //VGPR initial index value = 4
+       s_cmp_lt_u32    m0, s_restore_alloc_size
+       s_cbranch_scc0  L_RESTORE_SHARED_VGPR
 
 L_RESTORE_VGPR_WAVE64_LOOP:
        buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
@@ -812,6 +907,7 @@ L_RESTORE_VGPR_WAVE64_LOOP:
        s_cmp_lt_u32    m0, s_restore_alloc_size                                //scc = (m0 < s_restore_alloc_size) ? 1 : 0
        s_cbranch_scc1  L_RESTORE_VGPR_WAVE64_LOOP                              //VGPR restore (except v0) is complete?
 
+L_RESTORE_SHARED_VGPR:
        //Below part will be the restore shared vgpr part (new for gfx10)
        s_getreg_b32    s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)  //shared_vgpr_size
        s_and_b32       s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF  //shared_vgpr_size is zero?
@@ -935,7 +1031,7 @@ L_RESTORE_HWREG:
        s_and_b32       s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
        s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
 
-#if ASIC_TARGET_NAVI1X
+#if HAVE_XNACK
        s_setreg_b32    hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
 #endif
 
@@ -945,8 +1041,10 @@ L_RESTORE_HWREG:
        s_setreg_b32    hwreg(HW_REG_MODE), s_restore_mode
 
        // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
-       // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+       // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
        get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
+       get_svgpr_size_bytes(s_restore_ttmps_hi)
+       s_add_u32       s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
        s_add_u32       s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
        s_add_u32       s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
        s_addc_u32      s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
@@ -956,24 +1054,8 @@ L_RESTORE_HWREG:
        s_load_dword    ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
        s_waitcnt       lgkmcnt(0)
 
-#if ASIC_TARGET_NAVI1X
-       s_and_b32       s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
-       s_lshr_b32      s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
-       s_lshl_b32      s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
-       s_mov_b32       s_restore_tmp, 0x0
-       s_or_b32        s_restore_tmp, s_restore_tmp, s_restore_m0
-       s_and_b32       s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
-       s_lshr_b32      s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-       s_lshl_b32      s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
-       s_or_b32        s_restore_tmp, s_restore_tmp, s_restore_m0
-       s_and_b32       s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_REPLAY_W64H_MASK
-       s_lshr_b32      s_restore_m0, s_restore_m0, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
-       s_lshl_b32      s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT
-       s_or_b32        s_restore_tmp, s_restore_tmp, s_restore_m0
-
-       s_and_b32       s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
-       s_lshr_b32      s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
-       s_setreg_b32    hwreg(HW_REG_IB_STS), s_restore_tmp
+#if HAVE_XNACK
+       restore_ib_sts(s_restore_tmp, s_restore_m0)
 #endif
 
        s_and_b32       s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff            //pc[47:32] //Do it here in order not to affect STATUS
@@ -1089,5 +1171,29 @@ end
 function get_wave_size(s_reg)
        s_getreg_b32    s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
        s_lshl_b32      s_reg, s_reg, S_WAVE_SIZE
-       s_or_b32        s_reg, s_save_spi_init_hi, s_reg                        //share with exec_hi, it's at bit25
+end
+
+function save_and_clear_ib_sts(tmp1, tmp2)
+       // Preserve and clear scalar XNACK state before issuing scalar loads.
+       // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
+       // unused space ttmp11[31:24].
+       s_andn2_b32     ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
+       s_getreg_b32    tmp1, hwreg(HW_REG_IB_STS)
+       s_and_b32       tmp2, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+       s_lshl_b32      tmp2, tmp2, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+       s_or_b32        ttmp11, ttmp11, tmp2
+       s_and_b32       tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+       s_lshl_b32      tmp2, tmp2, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+       s_or_b32        ttmp11, ttmp11, tmp2
+       s_andn2_b32     tmp1, tmp1, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
+       s_setreg_b32    hwreg(HW_REG_IB_STS), tmp1
+end
+
+function restore_ib_sts(tmp1, tmp2)
+       s_lshr_b32      tmp1, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+       s_and_b32       tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+       s_lshr_b32      tmp1, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+       s_and_b32       tmp1, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+       s_or_b32        tmp1, tmp1, tmp2
+       s_setreg_b32    hwreg(HW_REG_IB_STS), tmp1
 end
index eed78a0..6770cbe 100644 (file)
@@ -46,8 +46,6 @@ var SINGLE_STEP_MISSED_WORKAROUND   = 1                   //workaround for lost MODE.DEBUG_EN
 /**************************************************************************/
 /*                     variables                                         */
 /**************************************************************************/
-var SQ_WAVE_STATUS_INST_ATC_SHIFT  = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK   = 0x00800000
 var SQ_WAVE_STATUS_SPI_PRIO_SHIFT  = 1
 var SQ_WAVE_STATUS_SPI_PRIO_MASK   = 0x00000006
 var SQ_WAVE_STATUS_HALT_MASK       = 0x2000
@@ -56,6 +54,7 @@ var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE    = 1
 var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT  = 3
 var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE   = 29
 var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK    = 0x400000
+var SQ_WAVE_STATUS_ECC_ERR_MASK         = 0x20000
 
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT   = 12
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE    = 9
@@ -72,8 +71,10 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT        = 8
 #endif
 
 var SQ_WAVE_TRAPSTS_SAVECTX_MASK    =  0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK      =   0x1FF                   // Exception mask
+var SQ_WAVE_TRAPSTS_EXCP_MASK      =   0x1FF
 var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT   =  10
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK =  0x80
+var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT = 7
 var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK   =  0x100
 var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT  =  8
 var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK   =   0x3FF
@@ -83,37 +84,30 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK       =   0xFFFFF800
 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT =   11
 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE  =   21
 var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK  =   0x800
+var SQ_WAVE_TRAPSTS_EXCP_HI_MASK       =   0x7000
 var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK   =   0x10000000
 
-var SQ_WAVE_IB_STS_RCNT_SHIFT          =   16                  //FIXME
+var SQ_WAVE_MODE_EXCP_EN_SHIFT         =   12
+var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT      = 19
+
 var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT  =   15                  //FIXME
 var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK      = 0x1F8000
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG  = 0x00007FFF    //FIXME
 
 var SQ_WAVE_MODE_DEBUG_EN_MASK         =   0x800
 
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT            =   24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT   =  27
-
 var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT        =   26                  // bits [31:26] unused by SPI debug data
 var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK =   0xFC000000
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT    =   23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK     =   0x800000
 
 /*     Save        */
 var S_SAVE_BUF_RSRC_WORD1_STRIDE       =   0x00040000          //stride is 4 bytes
 var S_SAVE_BUF_RSRC_WORD3_MISC         =   0x00807FAC          //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
-
-var S_SAVE_SPI_INIT_ATC_MASK           =   0x08000000          //bit[27]: ATC bit
-var S_SAVE_SPI_INIT_ATC_SHIFT          =   27
-var S_SAVE_SPI_INIT_MTYPE_MASK         =   0x70000000          //bit[30:28]: Mtype
-var S_SAVE_SPI_INIT_MTYPE_SHIFT                =   28
+var S_SAVE_PC_HI_TRAP_ID_MASK          =   0x00FF0000
+var S_SAVE_PC_HI_HT_MASK               =   0x01000000
 var S_SAVE_SPI_INIT_FIRST_WAVE_MASK    =   0x04000000          //bit[26]: FirstWaveInTG
 var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT   =   26
 
-var S_SAVE_PC_HI_RCNT_SHIFT            =   27                  //FIXME  check with Brian to ensure all fields other than PC[47:0] can be used
-var S_SAVE_PC_HI_RCNT_MASK             =   0xF8000000          //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT    =   26                  //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK     =   0x04000000          //FIXME
-
 var s_save_spi_init_lo             =   exec_lo
 var s_save_spi_init_hi             =   exec_hi
 
@@ -140,18 +134,9 @@ var s_save_ttmps_hi            =   s_save_trapsts          //no conflict
 var S_RESTORE_BUF_RSRC_WORD1_STRIDE        =   S_SAVE_BUF_RSRC_WORD1_STRIDE
 var S_RESTORE_BUF_RSRC_WORD3_MISC          =   S_SAVE_BUF_RSRC_WORD3_MISC
 
-var S_RESTORE_SPI_INIT_ATC_MASK                    =   0x08000000          //bit[27]: ATC bit
-var S_RESTORE_SPI_INIT_ATC_SHIFT           =   27
-var S_RESTORE_SPI_INIT_MTYPE_MASK          =   0x70000000          //bit[30:28]: Mtype
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT         =   28
 var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK     =   0x04000000          //bit[26]: FirstWaveInTG
 var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT            =   26
 
-var S_RESTORE_PC_HI_RCNT_SHIFT             =   S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK              =   S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT     =   S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK      =   S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
 var s_restore_spi_init_lo                  =   exec_lo
 var s_restore_spi_init_hi                  =   exec_hi
 
@@ -199,71 +184,77 @@ L_JUMP_TO_RESTORE:
 L_SKIP_RESTORE:
 
     s_getreg_b32    s_save_status, hwreg(HW_REG_STATUS)                                    //save STATUS since we will change SCC
-    s_andn2_b32            s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK      //check whether this is for save
 
-if SINGLE_STEP_MISSED_WORKAROUND
-    // No single step exceptions if MODE.DEBUG_EN=0.
-    s_getreg_b32    ttmp2, hwreg(HW_REG_MODE)
-    s_and_b32       ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
-    s_cbranch_scc0  L_NO_SINGLE_STEP_WORKAROUND
+    // Clear SPI_PRIO: do not save with elevated priority.
+    // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+    s_andn2_b32     s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_ECC_ERR_MASK
 
-    // Second-level trap already handled exception if STATUS.HALT=1.
-    s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
 
-    // Prioritize single step exception over context save.
-    // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
-    s_cbranch_scc0  L_FETCH_2ND_TRAP
+    s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+    s_cbranch_scc0  L_NOT_HALTED
 
-L_NO_SINGLE_STEP_WORKAROUND:
-end
+L_HALTED:
+    // Host trap may occur while wave is halted.
+    s_and_b32       ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+    s_cbranch_scc1  L_FETCH_2ND_TRAP
 
-    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+L_CHECK_SAVE:
     s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK    //check whether this is for save
     s_cbranch_scc1  L_SAVE                                     //this is the operation for save
 
-    // *********    Handle non-CWSR traps      *******************
-
-    // Illegal instruction is a non-maskable exception which blocks context save.
-    // Halt the wavefront and return from the trap.
-    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
-    s_cbranch_scc1  L_HALT_WAVE
-
-    // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
-    // Instead, halt the wavefront and return from the trap.
-    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
-    s_cbranch_scc0  L_FETCH_2ND_TRAP
-
-L_HALT_WAVE:
-    // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
-    // We cannot prevent further faults. Spin wait until context saved.
-    s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
-    s_cbranch_scc0  L_NOT_ALREADY_HALTED
-
-L_WAIT_CTX_SAVE:
+    // Wave is halted but neither host trap nor SAVECTX is raised.
+    // Caused by instruction fetch memory violation.
+    // Spin wait until context saved to prevent interrupt storm.
     s_sleep         0x10
-    s_getreg_b32    ttmp2, hwreg(HW_REG_TRAPSTS)
-    s_and_b32       ttmp2, ttmp2, SQ_WAVE_TRAPSTS_SAVECTX_MASK
-    s_cbranch_scc0  L_WAIT_CTX_SAVE
+    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+    s_branch        L_CHECK_SAVE
+
+L_NOT_HALTED:
+    // Let second-level handle non-SAVECTX exception or trap.
+    // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+    // Check non-maskable exceptions. memory_violation, illegal_instruction
+    // and xnack_error exceptions always cause the wave to enter the trap
+    // handler.
+    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+    s_cbranch_scc1  L_FETCH_2ND_TRAP
+
+    // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+    // Maskable exceptions only cause the wave to enter the trap handler if
+    // their respective bit in mode.excp_en is set.
+    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+    s_cbranch_scc0  L_CHECK_TRAP_ID
+
+    s_and_b32       ttmp3, s_save_trapsts, SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+    s_cbranch_scc0  L_NOT_ADDR_WATCH
+    s_bitset1_b32   ttmp2, SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT // Check all addr_watch[123] exceptions against excp_en.addr_watch
+
+L_NOT_ADDR_WATCH:
+    s_getreg_b32    ttmp3, hwreg(HW_REG_MODE)
+    s_lshl_b32      ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+    s_and_b32       ttmp2, ttmp2, ttmp3
+    s_cbranch_scc1  L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+    // Check trap_id != 0
+    s_and_b32       ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+    s_cbranch_scc1  L_FETCH_2ND_TRAP
 
-L_NOT_ALREADY_HALTED:
-    s_or_b32        s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+if SINGLE_STEP_MISSED_WORKAROUND
+    // Prioritize single step exception over context save.
+    // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+    s_getreg_b32    ttmp2, hwreg(HW_REG_MODE)
+    s_and_b32       ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+    s_cbranch_scc1  L_FETCH_2ND_TRAP
+end
 
-    // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
-    // Rewind the PC to prevent this from occurring. The debugger compensates for this.
-    s_sub_u32       ttmp0, ttmp0, 0x8
-    s_subb_u32      ttmp1, ttmp1, 0x0
+    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+    s_cbranch_scc1  L_SAVE
 
 L_FETCH_2ND_TRAP:
     // Preserve and clear scalar XNACK state before issuing scalar reads.
-    // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
-    s_getreg_b32    ttmp2, hwreg(HW_REG_IB_STS)
-    s_and_b32       ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
-    s_lshl_b32      ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
-    s_andn2_b32     ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
-    s_or_b32        ttmp11, ttmp11, ttmp3
-
-    s_andn2_b32     ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
-    s_setreg_b32    hwreg(HW_REG_IB_STS), ttmp2
+    save_and_clear_ib_sts(ttmp14)
 
     // Read second-level TBA/TMA from first-level TMA and jump if available.
     // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
@@ -271,27 +262,48 @@ L_FETCH_2ND_TRAP:
     s_getreg_b32    ttmp14, hwreg(HW_REG_SQ_SHADER_TMA_LO)
     s_getreg_b32    ttmp15, hwreg(HW_REG_SQ_SHADER_TMA_HI)
     s_lshl_b64      [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+    s_load_dword    ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
+    s_waitcnt       lgkmcnt(0)
+    s_lshl_b32      ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+    s_andn2_b32     ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+    s_or_b32        ttmp11, ttmp11, ttmp2
+
     s_load_dwordx2  [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
     s_waitcnt       lgkmcnt(0)
     s_load_dwordx2  [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
     s_waitcnt       lgkmcnt(0)
+
     s_and_b64       [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
     s_cbranch_scc0  L_NO_NEXT_TRAP // second-level trap handler not been set
     s_setpc_b64     [ttmp2, ttmp3] // jump to second-level trap handler
 
 L_NO_NEXT_TRAP:
-    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
-    s_and_b32      s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
-    s_cbranch_scc1  L_EXCP_CASE          // Exception, jump back to the shader program directly.
-    s_add_u32      ttmp0, ttmp0, 4   // S_TRAP case, add 4 to ttmp0
-    s_addc_u32 ttmp1, ttmp1, 0
-L_EXCP_CASE:
+    // If not caused by trap then halt wave to prevent re-entry.
+    s_and_b32       ttmp2, s_save_pc_hi, (S_SAVE_PC_HI_TRAP_ID_MASK|S_SAVE_PC_HI_HT_MASK)
+    s_cbranch_scc1  L_TRAP_CASE
+    s_or_b32        s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+    // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
+    // Rewind the PC to prevent this from occurring.
+    s_sub_u32       ttmp0, ttmp0, 0x8
+    s_subb_u32      ttmp1, ttmp1, 0x0
+
+    s_branch        L_EXIT_TRAP
+
+L_TRAP_CASE:
+    // Host trap will not cause trap re-entry.
+    s_and_b32       ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+    s_cbranch_scc1  L_EXIT_TRAP
+
+    // Advance past trap instruction to prevent re-entry.
+    s_add_u32       ttmp0, ttmp0, 0x4
+    s_addc_u32      ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
     s_and_b32  ttmp1, ttmp1, 0xFFFF
 
-    // Restore SQ_WAVE_IB_STS.
-    s_lshr_b32      ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
-    s_and_b32       ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
-    s_setreg_b32    hwreg(HW_REG_IB_STS), ttmp2
+    restore_ib_sts(ttmp14)
 
     // Restore SQ_WAVE_STATUS.
     s_and_b64       exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
@@ -312,16 +324,7 @@ L_SAVE:
     s_mov_b32      s_save_tmp, 0                                                           //clear saveCtx bit
     s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp            //clear saveCtx bit
 
-    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)                  //save RCNT
-    s_lshl_b32     s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
-    s_or_b32       s_save_pc_hi, s_save_pc_hi, s_save_tmp
-    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)   //save FIRST_REPLAY
-    s_lshl_b32     s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-    s_or_b32       s_save_pc_hi, s_save_pc_hi, s_save_tmp
-    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS)                                       //clear RCNT and FIRST_REPLAY in IB_STS
-    s_and_b32      s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
-
-    s_setreg_b32    hwreg(HW_REG_IB_STS), s_save_tmp
+    save_and_clear_ib_sts(s_save_tmp)
 
     /*     inform SPI the readiness and wait for SPI's go signal */
     s_mov_b32      s_save_exec_lo, exec_lo                                                 //save EXEC and use EXEC for the go signal from SPI
@@ -360,12 +363,6 @@ L_SAVE:
     s_or_b32       s_save_buf_rsrc1,   s_save_buf_rsrc1,  S_SAVE_BUF_RSRC_WORD1_STRIDE
     s_mov_b32      s_save_buf_rsrc2,   0                                                                       //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
     s_mov_b32      s_save_buf_rsrc3,   S_SAVE_BUF_RSRC_WORD3_MISC
-    s_and_b32      s_save_tmp,         s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
-    s_lshr_b32     s_save_tmp,         s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)         //get ATC bit into position
-    s_or_b32       s_save_buf_rsrc3,   s_save_buf_rsrc3,  s_save_tmp                                           //or ATC
-    s_and_b32      s_save_tmp,         s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
-    s_lshr_b32     s_save_tmp,         s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)     //get MTYPE bits into position
-    s_or_b32       s_save_buf_rsrc3,   s_save_buf_rsrc3,  s_save_tmp                                           //or MTYPE
 
     //FIXME  right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi  (might need to save them before using them?)
     s_mov_b32      s_save_m0,          m0                                                                  //save M0
@@ -690,12 +687,6 @@ L_RESTORE:
     s_or_b32       s_restore_buf_rsrc1,    s_restore_buf_rsrc1,  S_RESTORE_BUF_RSRC_WORD1_STRIDE
     s_mov_b32      s_restore_buf_rsrc2,    0                                                                               //NUM_RECORDS initial value = 0 (in bytes)
     s_mov_b32      s_restore_buf_rsrc3,    S_RESTORE_BUF_RSRC_WORD3_MISC
-    s_and_b32      s_restore_tmp,          s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
-    s_lshr_b32     s_restore_tmp,          s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)       //get ATC bit into position
-    s_or_b32       s_restore_buf_rsrc3,    s_restore_buf_rsrc3,  s_restore_tmp                                             //or ATC
-    s_and_b32      s_restore_tmp,          s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
-    s_lshr_b32     s_restore_tmp,          s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)   //get MTYPE bits into position
-    s_or_b32       s_restore_buf_rsrc3,    s_restore_buf_rsrc3,  s_restore_tmp                                             //or MTYPE
 
     /*     global mem offset           */
 //  s_mov_b32      s_restore_mem_offset, 0x0                               //mem offset initial value = 0
@@ -889,19 +880,7 @@ L_RESTORE:
     s_load_dword    ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
     s_waitcnt      lgkmcnt(0)
 
-    //reuse s_restore_m0 as a temp register
-    s_and_b32      s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
-    s_lshr_b32     s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
-    s_lshl_b32     s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
-    s_mov_b32      s_restore_tmp, 0x0                                                                              //IB_STS is zero
-    s_or_b32       s_restore_tmp, s_restore_tmp, s_restore_m0
-    s_and_b32      s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
-    s_lshr_b32     s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-    s_lshl_b32     s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
-    s_or_b32       s_restore_tmp, s_restore_tmp, s_restore_m0
-    s_and_b32      s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
-    s_lshr_b32     s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
-    s_setreg_b32    hwreg(HW_REG_IB_STS),   s_restore_tmp
+    restore_ib_sts(s_restore_tmp)
 
     s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff     //pc[47:32]        //Do it here in order not to affect STATUS
     s_and_b64   exec, exec, exec  // Restore STATUS.EXECZ, not writable by s_setreg_b32
@@ -910,8 +889,7 @@ L_RESTORE:
 
     s_barrier                                                  //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
 
-//  s_rfe_b64 s_restore_pc_lo                                  //Return to the main shader program and resume execution
-    s_rfe_restore_b64  s_restore_pc_lo, s_restore_m0           // s_restore_m0[0] is used to set STATUS.inst_atc
+    s_rfe_b64 s_restore_pc_lo                                  //Return to the main shader program and resume execution
 
 
 /**************************************************************************/
@@ -1078,3 +1056,19 @@ function set_status_without_spi_prio(status, tmp)
     s_nop           0x2 // avoid S_SETREG => S_SETREG hazard
     s_setreg_b32    hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
 end
+
+function save_and_clear_ib_sts(tmp)
+    // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
+    s_getreg_b32    tmp, hwreg(HW_REG_IB_STS)
+    s_and_b32       tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+    s_lshl_b32      tmp, tmp, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+    s_andn2_b32     ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
+    s_or_b32        ttmp11, ttmp11, tmp
+    s_setreg_imm32_b32 hwreg(HW_REG_IB_STS), 0x0
+end
+
+function restore_ib_sts(tmp)
+    s_lshr_b32      tmp, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+    s_and_b32       tmp, tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+    s_setreg_b32    hwreg(HW_REG_IB_STS), tmp
+end
index f1a225a..8667e3d 100644 (file)
@@ -441,10 +441,14 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
                        BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
                        kfd->cwsr_isa = cwsr_trap_nv1x_hex;
                        kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
-               } else {
+               } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
                        BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
                        kfd->cwsr_isa = cwsr_trap_gfx10_hex;
                        kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
+               } else {
+                       BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
+                       kfd->cwsr_isa = cwsr_trap_gfx11_hex;
+                       kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
                }
 
                kfd->cwsr_enabled = true;
index 29e9ebf..2ebf013 100644 (file)
@@ -531,7 +531,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
        bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
        bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
        bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
-       bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
+       bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
        bp.type = ttm_bo_type_device;
        bp.resv = NULL;
 
index 2e20f54..8d50d20 100644 (file)
@@ -1271,6 +1271,12 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
                if (!peer_dev)
                        continue;
 
+               /* Include the CPU peer in GPU hive if connected over xGMI. */
+               if (!peer_dev->gpu && !peer_dev->node_props.hive_id &&
+                               dev->node_props.hive_id &&
+                               dev->gpu->adev->gmc.xgmi.connected_to_cpu)
+                       peer_dev->node_props.hive_id = dev->node_props.hive_id;
+
                list_for_each_entry(inbound_link, &peer_dev->io_link_props,
                                                                        list) {
                        if (inbound_link->node_to != link->node_from)
@@ -1302,22 +1308,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 
        pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
 
-       /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
-       if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
-               struct kfd_topology_device *top_dev;
-
-               down_read(&topology_lock);
-
-               list_for_each_entry(top_dev, &topology_device_list, list) {
-                       if (top_dev->gpu)
-                               break;
-
-                       top_dev->node_props.hive_id = gpu->hive_id;
-               }
-
-               up_read(&topology_lock);
-       }
-
        /* Check to see if this gpu device exists in the topology_device_list.
         * If so, assign the gpu to that device,
         * else create a Virtual CRAT for this gpu device and then parse that
index a92cfb0..70be67a 100644 (file)
@@ -769,7 +769,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
 
                do {
                        dc_stat_get_dmub_notification(adev->dm.dc, &notify);
-                       if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+                       if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
                                DRM_ERROR("DM: notify type %d invalid!", notify.type);
                                continue;
                        }
@@ -5381,17 +5381,19 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
 
 static void
 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
-                              bool *per_pixel_alpha, bool *global_alpha,
-                              int *global_alpha_value)
+                              bool *per_pixel_alpha, bool *pre_multiplied_alpha,
+                              bool *global_alpha, int *global_alpha_value)
 {
        *per_pixel_alpha = false;
+       *pre_multiplied_alpha = true;
        *global_alpha = false;
        *global_alpha_value = 0xff;
 
        if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
                return;
 
-       if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+       if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
+               plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
                static const uint32_t alpha_formats[] = {
                        DRM_FORMAT_ARGB8888,
                        DRM_FORMAT_RGBA8888,
@@ -5406,6 +5408,9 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
                                break;
                        }
                }
+
+               if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+                       *pre_multiplied_alpha = false;
        }
 
        if (plane_state->alpha < 0xffff) {
@@ -5568,7 +5573,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                return ret;
 
        fill_blending_from_plane_state(
-               plane_state, &plane_info->per_pixel_alpha,
+               plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
                &plane_info->global_alpha, &plane_info->global_alpha_value);
 
        return 0;
@@ -5615,6 +5620,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        dc_plane_state->tiling_info = plane_info.tiling_info;
        dc_plane_state->visible = plane_info.visible;
        dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+       dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
        dc_plane_state->global_alpha = plane_info.global_alpha;
        dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
        dc_plane_state->dcc = plane_info.dcc;
@@ -7911,7 +7917,8 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
        if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
            plane_cap && plane_cap->per_pixel_alpha) {
                unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
-                                         BIT(DRM_MODE_BLEND_PREMULTI);
+                                         BIT(DRM_MODE_BLEND_PREMULTI) |
+                                         BIT(DRM_MODE_BLEND_COVERAGE);
 
                drm_plane_create_alpha_property(plane);
                drm_plane_create_blend_mode_property(plane, blend_caps);
index 02943ca..cf1b5f3 100644 (file)
@@ -122,7 +122,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
                dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst;
                dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
 
-               prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+               prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[dpp_inst];
 
                if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
                        clk_mgr->dccg->funcs->update_dpp_dto(
index 27501b7..a2ade6e 100644 (file)
@@ -91,7 +91,8 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
 
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
-               if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+               if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+                                    dc_is_virtual_signal(pipe->stream->signal))) {
                        if (disable)
                                pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
                        else
index 3121dd2..fc3af81 100644 (file)
@@ -122,7 +122,8 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
 
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
-               if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+               if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+                                    dc_is_virtual_signal(pipe->stream->signal))) {
                        if (disable)
                                pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
                        else
index e41a48f..f144494 100644 (file)
@@ -2901,14 +2901,15 @@ static void commit_planes_for_stream(struct dc *dc,
                                                top_pipe_to_program->stream_res.tg);
                }
 
-       if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+       if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
                dc->hwss.interdependent_update_lock(dc, context, true);
-       else
+       } else {
                /* Lock the top pipe while updating plane addrs, since freesync requires
                 *  plane addr update event triggers to be synchronized.
                 *  top_pipe_to_program is expected to never be NULL
                 */
                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+       }
 
        // Stream updates
        if (stream_update)
@@ -2924,10 +2925,11 @@ static void commit_planes_for_stream(struct dc *dc,
                if (dc->hwss.program_front_end_for_ctx)
                        dc->hwss.program_front_end_for_ctx(dc, context);
 
-               if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+               if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
                        dc->hwss.interdependent_update_lock(dc, context, false);
-               else
+               } else {
                        dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+               }
                dc->hwss.post_unlock_program_front_end(dc, context);
                return;
        }
@@ -3052,10 +3054,11 @@ static void commit_planes_for_stream(struct dc *dc,
 
        }
 
-       if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
+       if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
                dc->hwss.interdependent_update_lock(dc, context, false);
-       else
+       } else {
                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+       }
 
        if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
index 67ef357..a789ea8 100644 (file)
@@ -33,6 +33,7 @@
 #include "gpio_service_interface.h"
 #include "core_status.h"
 #include "dc_link_dp.h"
+#include "dc_link_dpia.h"
 #include "dc_link_ddc.h"
 #include "link_hwss.h"
 #include "opp.h"
@@ -240,7 +241,7 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
 
        /* Link may not have physical HPD pin. */
        if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
-               if (link->is_hpd_pending || !link->hpd_status)
+               if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
                        *type = dc_connection_none;
                else
                        *type = dc_connection_single;
@@ -1604,8 +1605,25 @@ static bool dc_link_construct_legacy(struct dc_link *link,
                if (link->hpd_gpio) {
                        if (!link->dc->config.allow_edp_hotplug_detection)
                                link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
-                       link->irq_source_hpd_rx =
-                                       dal_irq_get_rx_source(link->hpd_gpio);
+
+                       switch (link->dc->config.allow_edp_hotplug_detection) {
+                       case 1: // only the 1st eDP handles hotplug
+                               if (link->link_index == 0)
+                                       link->irq_source_hpd_rx =
+                                               dal_irq_get_rx_source(link->hpd_gpio);
+                               else
+                                       link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+                               break;
+                       case 2: // only the 2nd eDP handles hotplug
+                               if (link->link_index == 1)
+                                       link->irq_source_hpd_rx =
+                                               dal_irq_get_rx_source(link->hpd_gpio);
+                               else
+                                       link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+                               break;
+                       default:
+                               break;
+                       }
                }
 
                break;
index 340b5f9..dc30ac3 100644 (file)
@@ -2783,31 +2783,37 @@ bool perform_link_training_with_retries(
        struct dc_link *link = stream->link;
        enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
        enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
-       struct dc_link_settings current_setting = *link_setting;
+       struct dc_link_settings cur_link_settings = *link_setting;
        const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        int fail_count = 0;
+       bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
+       bool is_link_bw_min = /* RBR x 1 */
+               (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+               (cur_link_settings.lane_count <= LANE_COUNT_ONE);
 
        dp_trace_commit_lt_init(link);
 
 
-       if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING)
+       if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
                /* We need to do this before the link training to ensure the idle
                 * pattern in SST mode will be sent right after the link training
                 */
                link_hwss->setup_stream_encoder(pipe_ctx);
 
        dp_trace_set_lt_start_timestamp(link, false);
-       for (j = 0; j < attempts; ++j) {
+       j = 0;
+       while (j < attempts && fail_count < (attempts * 10)) {
 
-               DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
-                       __func__, (unsigned int)j + 1, attempts);
+               DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d @ rate(%d) x lane(%d)\n",
+                       __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+                       cur_link_settings.lane_count);
 
                dp_enable_link_phy(
                        link,
                        &pipe_ctx->link_res,
                        signal,
                        pipe_ctx->clock_source->id,
-                       &current_setting);
+                       &cur_link_settings);
 
                if (stream->sink_patches.dppowerup_delay > 0) {
                        int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
@@ -2832,30 +2838,30 @@ bool perform_link_training_with_retries(
                dp_set_panel_mode(link, panel_mode);
 
                if (link->aux_access_disabled) {
-                       dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &current_setting);
+                       dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
                        return true;
                } else {
                        /** @todo Consolidate USB4 DP and DPx.x training. */
                        if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
                                status = dc_link_dpia_perform_link_training(link,
                                                &pipe_ctx->link_res,
-                                               &current_setting,
+                                               &cur_link_settings,
                                                skip_video_pattern);
 
                                /* Transmit idle pattern once training successful. */
-                               if (status == LINK_TRAINING_SUCCESS)
+                               if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
                                        dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
                        } else {
                                status = dc_link_dp_perform_link_training(link,
                                                &pipe_ctx->link_res,
-                                               &current_setting,
+                                               &cur_link_settings,
                                                skip_video_pattern);
                        }
 
                        dp_trace_lt_total_count_increment(link, false);
                        dp_trace_lt_result_update(link, status, false);
                        dp_trace_set_lt_end_timestamp(link, false);
-                       if (status == LINK_TRAINING_SUCCESS)
+                       if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
                                return true;
                }
 
@@ -2866,8 +2872,9 @@ bool perform_link_training_with_retries(
                if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
                        break;
 
-               DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
-                       __func__, (unsigned int)j + 1, attempts);
+               DC_LOG_WARNING("%s: Link training attempt %u of %d failed @ rate(%d) x lane(%d)\n",
+                       __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+                       cur_link_settings.lane_count);
 
                dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
 
@@ -2876,27 +2883,49 @@ bool perform_link_training_with_retries(
                        enum dc_connection_type type = dc_connection_none;
 
                        dc_link_detect_sink(link, &type);
-                       if (type == dc_connection_none)
+                       if (type == dc_connection_none) {
+                               DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
                                break;
-               } else if (do_fallback) {
+                       }
+               }
+
+               /* Try to train again at original settings if:
+                * - not falling back between training attempts;
+                * - aborted previous attempt due to reasons other than sink unplug;
+                * - successfully trained but at a link rate lower than that required by stream;
+                * - reached minimum link bandwidth.
+                */
+               if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
+                               (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
+                               is_link_bw_min) {
+                       j++;
+                       cur_link_settings = *link_setting;
+                       delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+                       is_link_bw_low = false;
+                       is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+                               (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+               } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
                        uint32_t req_bw;
                        uint32_t link_bw;
 
-                       decide_fallback_link_setting(link, *link_setting, &current_setting, status);
-                       /* Fail link training if reduced link bandwidth no longer meets
-                        * stream requirements.
+                       decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
+                       /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
+                        * minimum link bandwidth.
                         */
                        req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
-                       link_bw = dc_link_bandwidth_kbps(link, &current_setting);
-                       if (req_bw > link_bw)
-                               break;
+                       link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+                       is_link_bw_low = (req_bw > link_bw);
+                       is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+                               (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+
+                       if (is_link_bw_low)
+                               DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+                                       __func__, req_bw, link_bw);
                }
 
                msleep(delay_between_attempts);
-
-               delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
        }
-
        return false;
 }
 
@@ -5097,13 +5126,16 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
        return true;
 }
 
-void dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
 {
+       uint8_t lttpr_dpcd_data[8];
        bool allow_lttpr_non_transparent_mode = 0;
+       bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
        bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
        enum dc_status status = DC_ERROR_UNEXPECTED;
+       bool is_lttpr_present = false;
 
-       memset(link->lttpr_dpcd_data, '\0', sizeof(link->lttpr_dpcd_data));
+       memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
 
        if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
                        link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
@@ -5113,116 +5145,82 @@ void dp_retrieve_lttpr_cap(struct dc_link *link)
                allow_lttpr_non_transparent_mode = 1;
        }
 
-       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-       link->lttpr_support = LTTPR_UNSUPPORTED;
-
        /*
-        * Logic to determine LTTPR support
+        * Logic to determine LTTPR mode
         */
-       if (vbios_lttpr_interop)
-               link->lttpr_support = LTTPR_SUPPORTED;
-       else if (link->dc->config.allow_lttpr_non_transparent_mode.raw == 0
-                       || !link->dc->caps.extended_aux_timeout_support)
-                       link->lttpr_support = LTTPR_UNSUPPORTED;
-       else
-               link->lttpr_support = LTTPR_CHECK_EXT_SUPPORT;
+       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+       if (vbios_lttpr_enable && vbios_lttpr_interop)
+               link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+       else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+               if (allow_lttpr_non_transparent_mode)
+                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+               else
+                       link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
+       } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+               if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
+                       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+               else
+                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+       }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* Check DP tunnel LTTPR mode debug option. */
        if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
            link->dc->debug.dpia_debug.bits.force_non_lttpr)
-               link->lttpr_support = LTTPR_UNSUPPORTED;
+               link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+#endif
 
-       if (link->lttpr_support > LTTPR_UNSUPPORTED) {
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
                /* By reading LTTPR capability, RX assumes that we will enable
                 * LTTPR extended aux timeout if LTTPR is present.
                 */
                status = core_link_read_dpcd(
                                link,
                                DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
-                               link->lttpr_dpcd_data,
-                               sizeof(link->lttpr_dpcd_data));
-       }
-}
-
-bool dp_parse_lttpr_mode(struct dc_link *link)
-{
-       bool dpcd_allow_lttpr_non_transparent_mode = false;
-       bool is_lttpr_present = false;
-
-       bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
-
-       if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
-                       link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
-               dpcd_allow_lttpr_non_transparent_mode = true;
-       } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
-                       !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
-               dpcd_allow_lttpr_non_transparent_mode = true;
+                               lttpr_dpcd_data,
+                               sizeof(lttpr_dpcd_data));
+
+               link->dpcd_caps.lttpr_caps.revision.raw =
+                               lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+               link->dpcd_caps.lttpr_caps.max_link_rate =
+                               lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+               link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+                               lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+               link->dpcd_caps.lttpr_caps.max_lane_count =
+                               lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+               link->dpcd_caps.lttpr_caps.mode =
+                               lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+               link->dpcd_caps.lttpr_caps.max_ext_timeout =
+                               lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+               link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+                               lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+               link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+                               lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+                                                               DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+               /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+               is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+                               link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+                               link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+               if (is_lttpr_present) {
+                       CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+                       configure_lttpr_mode_transparent(link);
+               } else
+                       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
        }
-
-       /*
-        * Logic to determine LTTPR mode
-        */
-       if (link->lttpr_support == LTTPR_SUPPORTED)
-               if (vbios_lttpr_enable)
-                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
-               else if (dpcd_allow_lttpr_non_transparent_mode)
-                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
-               else
-                       link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
-       else    // lttpr_support == LTTPR_CHECK_EXT_SUPPORT
-               if (dpcd_allow_lttpr_non_transparent_mode) {
-                       link->lttpr_support = LTTPR_SUPPORTED;
-                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
-               } else {
-                       link->lttpr_support = LTTPR_UNSUPPORTED;
-               }
-
-       if (link->lttpr_support == LTTPR_UNSUPPORTED)
-               return false;
-
-       link->dpcd_caps.lttpr_caps.revision.raw =
-                       link->lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-       link->dpcd_caps.lttpr_caps.max_link_rate =
-                       link->lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-       link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
-                       link->lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-       link->dpcd_caps.lttpr_caps.max_lane_count =
-                       link->lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-       link->dpcd_caps.lttpr_caps.mode =
-                       link->lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-       link->dpcd_caps.lttpr_caps.max_ext_timeout =
-                       link->lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-       link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
-                       link->lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-       link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
-                       link->lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
-                                                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
-
-       /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
-       is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
-                       link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
-                       link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
-       if (is_lttpr_present) {
-               CONN_DATA_DETECT(link, link->lttpr_dpcd_data, sizeof(link->lttpr_dpcd_data), "LTTPR Caps: ");
-               configure_lttpr_mode_transparent(link);
-       } else
-               link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-
        return is_lttpr_present;
 }
 
@@ -5374,8 +5372,7 @@ static bool retrieve_link_cap(struct dc_link *link)
                status = wa_try_to_wake_dprx(link, timeout_ms);
        }
 
-       dp_retrieve_lttpr_cap(link);
-
+       is_lttpr_present = dp_retrieve_lttpr_cap(link);
        /* Read DP tunneling information. */
        status = dpcd_get_tunneling_device_data(link);
 
@@ -5411,9 +5408,6 @@ static bool retrieve_link_cap(struct dc_link *link)
                return false;
        }
 
-       if (link->lttpr_support > LTTPR_UNSUPPORTED)
-               is_lttpr_present = dp_parse_lttpr_mode(link);
-
        if (!is_lttpr_present)
                dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
 
index a5765f3..1b7a877 100644 (file)
@@ -34,6 +34,7 @@
 #include "dm_helpers.h"
 #include "dmub/inc/dmub_cmd.h"
 #include "inc/link_dpcd.h"
+#include "dc_dmub_srv.h"
 
 #define DC_LOGGER \
        link->ctx->logger
@@ -69,6 +70,24 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
        return status;
 }
 
+bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+{
+       union dmub_rb_cmd cmd = {0};
+       struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
+       bool is_hpd_high = false;
+
+       /* prepare QUERY_HPD command */
+       cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+       cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
+       cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+       /* Return HPD status reported by DMUB if query successfully executed. */
+       if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+               is_hpd_high = cmd.query_hpd.data.result;
+
+       return is_hpd_high;
+}
+
 /* Configure link as prescribed in link_setting; set LTTPR mode; and
  * Initialize link training settings.
  * Abort link training if sink unplug detected.
index e6b9c6a..5bc6ff2 100644 (file)
@@ -61,6 +61,8 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
                plane_state->blend_tf->type = TF_TYPE_BYPASS;
        }
 
+       plane_state->pre_multiplied_alpha = true;
+
 }
 
 static void dc_plane_destruct(struct dc_plane_state *plane_state)
index 26c24db..3960c74 100644 (file)
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.185"
+#define DC_VER "3.2.186"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -329,7 +329,7 @@ struct dc_config {
        bool disable_dmcu;
        bool enable_4to1MPC;
        bool enable_windowed_mpo_odm;
-       bool allow_edp_hotplug_detection;
+       uint32_t allow_edp_hotplug_detection;
        bool clamp_min_dcfclk;
        uint64_t vblank_alignment_dto_params;
        uint8_t  vblank_alignment_max_frame_time_diff;
@@ -1011,6 +1011,7 @@ struct dc_plane_state {
 
        bool is_tiling_rotated;
        bool per_pixel_alpha;
+       bool pre_multiplied_alpha;
        bool global_alpha;
        int  global_alpha_value;
        bool visible;
@@ -1045,6 +1046,7 @@ struct dc_plane_info {
        bool horizontal_mirror;
        bool visible;
        bool per_pixel_alpha;
+       bool pre_multiplied_alpha;
        bool global_alpha;
        int  global_alpha_value;
        bool input_csc_enabled;
index 251f2bb..a3c37ee 100644 (file)
@@ -129,8 +129,6 @@ struct dc_link {
        bool link_state_valid;
        bool aux_access_disabled;
        bool sync_lt_in_progress;
-       uint8_t lttpr_dpcd_data[8];
-       enum lttpr_support lttpr_support;
        enum lttpr_mode lttpr_mode;
        bool is_internal_display;
 
index 29e20d9..9e39cd7 100644 (file)
@@ -87,7 +87,8 @@ static void release_engine(
 
        engine->ddc = NULL;
 
-       REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+       REG_UPDATE_2(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1,
+               AUX_SW_USE_AUX_REG_REQ, 0);
 }
 
 #define SW_CAN_ACCESS_AUX 1
index 5e6fea8..845aa8a 100644 (file)
@@ -1101,9 +1101,12 @@ static bool get_pixel_clk_frequency_100hz(
                         * not be programmed equal to DPREFCLK
                         */
                        modulo_hz = REG_READ(MODULO[inst]);
-                       *pixel_clk_khz = div_u64((uint64_t)clock_hz*
-                               clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
-                               modulo_hz);
+                       if (modulo_hz)
+                               *pixel_clk_khz = div_u64((uint64_t)clock_hz*
+                                       clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+                                       modulo_hz);
+                       else
+                               *pixel_clk_khz = 0;
                } else {
                        /* NOTE: There is agreement with VBIOS here that MODULO is
                         * programmed equal to DPREFCLK, in which case PHASE will be
index e02ac75..e3a6287 100644 (file)
@@ -2550,12 +2550,21 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
-       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
-               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
-       } else if (per_pixel_alpha) {
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       if (per_pixel_alpha) {
+               /* DCN1.0 has output CM before MPC which seems to screw with
+                * pre-multiplied alpha.
+                */
+               blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
+                               pipe_ctx->stream->output_color_space)
+                                               && pipe_ctx->plane_state->pre_multiplied_alpha);
+               if (pipe_ctx->plane_state->global_alpha) {
+                       blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+                       blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+               } else {
+                       blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+               }
        } else {
+               blnd_cfg.pre_multiplied_alpha = false;
                blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
        }
 
@@ -2564,14 +2573,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        else
                blnd_cfg.global_alpha = 0xff;
 
-       /* DCN1.0 has output CM before MPC which seems to screw with
-        * pre-multiplied alpha.
-        */
-       blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
-                       pipe_ctx->stream->output_color_space)
-                                       && per_pixel_alpha;
-
-
        /*
         * TODO: remove hack
         * Note: currently there is a bug in init_hw such that
index e1f87bd..ec6aa8d 100644 (file)
@@ -1773,7 +1773,6 @@ void dcn20_post_unlock_program_front_end(
         */
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
                if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
                        struct hubp *hubp = pipe->plane_res.hubp;
                        int j = 0;
@@ -2346,12 +2345,16 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
-       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
-               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
-       } else if (per_pixel_alpha) {
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       if (per_pixel_alpha) {
+               blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+               if (pipe_ctx->plane_state->global_alpha) {
+                       blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+                       blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+               } else {
+                       blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+               }
        } else {
+               blnd_cfg.pre_multiplied_alpha = false;
                blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
        }
 
@@ -2365,7 +2368,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        blnd_cfg.top_gain = 0x1f000;
        blnd_cfg.bottom_inside_gain = 0x1f000;
        blnd_cfg.bottom_outside_gain = 0x1f000;
-       blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
        if (pipe_ctx->plane_state->format
                        == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
                blnd_cfg.pre_multiplied_alpha = false;
index f5e8916..b604fb2 100644 (file)
@@ -28,6 +28,8 @@
 #include "dc.h"
 #include "dcn_calc_math.h"
 
+#include "dml/dcn30/dcn30_fpu.h"
+
 #define REG(reg)\
        optc1->tg_regs->reg
 
@@ -184,6 +186,14 @@ void optc3_set_dsc_config(struct timing_generator *optc,
 
 }
 
+void optc3_set_vrr_m_const(struct timing_generator *optc,
+               double vtotal_avg)
+{
+       DC_FP_START();
+       optc3_fpu_set_vrr_m_const(optc, vtotal_avg);
+       DC_FP_END();
+}
+
 void optc3_set_odm_bypass(struct timing_generator *optc,
                const struct dc_crtc_timing *dc_crtc_timing)
 {
index 336b2ce..1c1a67c 100644 (file)
@@ -84,6 +84,7 @@
 #include "dce/dce_aux.h"
 #include "dce/dce_i2c.h"
 
+#include "dml/dcn30/dcn30_fpu.h"
 #include "dml/dcn30/display_mode_vba_30.h"
 #include "vm_helper.h"
 #include "dcn20/dcn20_vmid.h"
 
 #define DC_LOGGER_INIT(logger)
 
-struct _vcs_dpi_ip_params_st dcn3_0_ip = {
-       .use_min_dcfclk = 0,
-       .clamp_min_dcfclk = 0,
-       .odm_capable = 1,
-       .gpuvm_enable = 0,
-       .hostvm_enable = 0,
-       .gpuvm_max_page_table_levels = 4,
-       .hostvm_max_page_table_levels = 4,
-       .hostvm_cached_page_table_levels = 0,
-       .pte_group_size_bytes = 2048,
-       .num_dsc = 6,
-       .rob_buffer_size_kbytes = 184,
-       .det_buffer_size_kbytes = 184,
-       .dpte_buffer_size_in_pte_reqs_luma = 84,
-       .pde_proc_buffer_size_64k_reqs = 48,
-       .dpp_output_buffer_pixels = 2560,
-       .opp_output_buffer_lines = 1,
-       .pixel_chunk_size_kbytes = 8,
-       .pte_enable = 1,
-       .max_page_table_levels = 2,
-       .pte_chunk_size_kbytes = 2,  // ?
-       .meta_chunk_size_kbytes = 2,
-       .writeback_chunk_size_kbytes = 8,
-       .line_buffer_size_bits = 789504,
-       .is_line_buffer_bpp_fixed = 0,  // ?
-       .line_buffer_fixed_bpp = 0,     // ?
-       .dcc_supported = true,
-       .writeback_interface_buffer_size_kbytes = 90,
-       .writeback_line_buffer_buffer_size = 0,
-       .max_line_buffer_lines = 12,
-       .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
-       .writeback_chroma_buffer_size_kbytes = 8,
-       .writeback_chroma_line_buffer_width_pixels = 4,
-       .writeback_max_hscl_ratio = 1,
-       .writeback_max_vscl_ratio = 1,
-       .writeback_min_hscl_ratio = 1,
-       .writeback_min_vscl_ratio = 1,
-       .writeback_max_hscl_taps = 1,
-       .writeback_max_vscl_taps = 1,
-       .writeback_line_buffer_luma_buffer_size = 0,
-       .writeback_line_buffer_chroma_buffer_size = 14643,
-       .cursor_buffer_size = 8,
-       .cursor_chunk_size = 2,
-       .max_num_otg = 6,
-       .max_num_dpp = 6,
-       .max_num_wb = 1,
-       .max_dchub_pscl_bw_pix_per_clk = 4,
-       .max_pscl_lb_bw_pix_per_clk = 2,
-       .max_lb_vscl_bw_pix_per_clk = 4,
-       .max_vscl_hscl_bw_pix_per_clk = 4,
-       .max_hscl_ratio = 6,
-       .max_vscl_ratio = 6,
-       .hscl_mults = 4,
-       .vscl_mults = 4,
-       .max_hscl_taps = 8,
-       .max_vscl_taps = 8,
-       .dispclk_ramp_margin_percent = 1,
-       .underscan_factor = 1.11,
-       .min_vblank_lines = 32,
-       .dppclk_delay_subtotal = 46,
-       .dynamic_metadata_vm_enabled = true,
-       .dppclk_delay_scl_lb_only = 16,
-       .dppclk_delay_scl = 50,
-       .dppclk_delay_cnvc_formatter = 27,
-       .dppclk_delay_cnvc_cursor = 6,
-       .dispclk_delay_subtotal = 119,
-       .dcfclk_cstate_latency = 5.2, // SRExitTime
-       .max_inter_dcn_tile_repeaters = 8,
-       .odm_combine_4to1_supported = true,
-
-       .xfc_supported = false,
-       .xfc_fill_bw_overhead_percent = 10.0,
-       .xfc_fill_constant_bytes = 0,
-       .gfx7_compat_tiling_supported = 0,
-       .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
-       .clock_limits = {
-                       {
-                               .state = 0,
-                               .dispclk_mhz = 562.0,
-                               .dppclk_mhz = 300.0,
-                               .phyclk_mhz = 300.0,
-                               .phyclk_d18_mhz = 667.0,
-                               .dscclk_mhz = 405.6,
-                       },
-               },
-       .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
-       .num_states = 1,
-       .sr_exit_time_us = 15.5,
-       .sr_enter_plus_exit_time_us = 20,
-       .urgent_latency_us = 4.0,
-       .urgent_latency_pixel_data_only_us = 4.0,
-       .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
-       .urgent_latency_vm_data_only_us = 4.0,
-       .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
-       .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
-       .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
-       .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
-       .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
-       .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
-       .max_avg_sdp_bw_use_normal_percent = 60.0,
-       .max_avg_dram_bw_use_normal_percent = 40.0,
-       .writeback_latency_us = 12.0,
-       .max_request_size_bytes = 256,
-       .fabric_datapath_to_dcn_data_return_bytes = 64,
-       .dcn_downspread_percent = 0.5,
-       .downspread_percent = 0.38,
-       .dram_page_open_time_ns = 50.0,
-       .dram_rw_turnaround_time_ns = 17.5,
-       .dram_return_buffer_per_channel_bytes = 8192,
-       .round_trip_ping_latency_dcfclk_cycles = 191,
-       .urgent_out_of_order_return_per_channel_bytes = 4096,
-       .channel_interleave_bytes = 256,
-       .num_banks = 8,
-       .gpuvm_min_page_size_bytes = 4096,
-       .hostvm_min_page_size_bytes = 4096,
-       .dram_clock_change_latency_us = 404,
-       .dummy_pstate_latency_us = 5,
-       .writeback_dram_clock_change_latency_us = 23.0,
-       .return_bus_width_bytes = 64,
-       .dispclk_dppclk_vco_speed_mhz = 3650,
-       .xfc_bus_transport_time_us = 20,      // ?
-       .xfc_xbuf_latency_tolerance_us = 4,  // ?
-       .use_urgent_burst_bw = 1,            // ?
-       .do_urgent_latency_adjustment = true,
-       .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
-       .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
-};
-
 enum dcn30_clk_src_array_id {
        DCN30_CLK_SRC_PLL0,
        DCN30_CLK_SRC_PLL1,
@@ -1480,90 +1350,9 @@ int dcn30_populate_dml_pipes_from_context(
 void dcn30_populate_dml_writeback_from_context(
        struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
 {
-       int pipe_cnt, i, j;
-       double max_calc_writeback_dispclk;
-       double writeback_dispclk;
-       struct writeback_st dout_wb;
-
-       for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
-               struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
-
-               if (!stream)
-                       continue;
-               max_calc_writeback_dispclk = 0;
-
-               /* Set writeback information */
-               pipes[pipe_cnt].dout.wb_enable = 0;
-               pipes[pipe_cnt].dout.num_active_wb = 0;
-               for (j = 0; j < stream->num_wb_info; j++) {
-                       struct dc_writeback_info *wb_info = &stream->writeback_info[j];
-
-                       if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
-                                       (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
-                               pipes[pipe_cnt].dout.wb_enable = 1;
-                               pipes[pipe_cnt].dout.num_active_wb++;
-                               dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
-                                       wb_info->dwb_params.cnv_params.crop_height :
-                                       wb_info->dwb_params.cnv_params.src_height;
-                               dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
-                                       wb_info->dwb_params.cnv_params.crop_width :
-                                       wb_info->dwb_params.cnv_params.src_width;
-                               dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
-                               dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
-
-                               /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
-                               if (dc->dml.ip.writeback_max_hscl_taps > 1) {
-                                       dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
-                                       dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
-                               } else {
-                                       dout_wb.wb_htaps_luma = 1;
-                                       dout_wb.wb_vtaps_luma = 1;
-                               }
-                               dout_wb.wb_htaps_chroma = 0;
-                               dout_wb.wb_vtaps_chroma = 0;
-                               dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
-                                       (double)wb_info->dwb_params.cnv_params.crop_width /
-                                               (double)wb_info->dwb_params.dest_width :
-                                       (double)wb_info->dwb_params.cnv_params.src_width /
-                                               (double)wb_info->dwb_params.dest_width;
-                               dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
-                                       (double)wb_info->dwb_params.cnv_params.crop_height /
-                                               (double)wb_info->dwb_params.dest_height :
-                                       (double)wb_info->dwb_params.cnv_params.src_height /
-                                               (double)wb_info->dwb_params.dest_height;
-                               if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
-                                       wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
-                                       dout_wb.wb_pixel_format = dm_444_64;
-                               else
-                                       dout_wb.wb_pixel_format = dm_444_32;
-
-                               /* Workaround for cases where multiple writebacks are connected to same plane
-                                * In which case, need to compute worst case and set the associated writeback parameters
-                                * This workaround is necessary due to DML computation assuming only 1 set of writeback
-                                * parameters per pipe
-                                */
-                               writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
-                                               dout_wb.wb_pixel_format,
-                                               pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
-                                               dout_wb.wb_hratio,
-                                               dout_wb.wb_vratio,
-                                               dout_wb.wb_htaps_luma,
-                                               dout_wb.wb_vtaps_luma,
-                                               dout_wb.wb_src_width,
-                                               dout_wb.wb_dst_width,
-                                               pipes[pipe_cnt].pipe.dest.htotal,
-                                               dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
-
-                               if (writeback_dispclk > max_calc_writeback_dispclk) {
-                                       max_calc_writeback_dispclk = writeback_dispclk;
-                                       pipes[pipe_cnt].dout.wb = dout_wb;
-                               }
-                       }
-               }
-
-               pipe_cnt++;
-       }
-
+       DC_FP_START();
+       dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes);
+       DC_FP_END();
 }
 
 unsigned int dcn30_calc_max_scaled_time(
@@ -1598,7 +1387,7 @@ void dcn30_set_mcif_arb_params(
        enum mmhubbub_wbif_mode wbif_mode;
        struct display_mode_lib *dml = &context->bw_ctx.dml;
        struct mcif_arb_params *wb_arb_params;
-       int i, j, k, dwb_pipe;
+       int i, j, dwb_pipe;
 
        /* Writeback MCIF_WB arbitration parameters */
        dwb_pipe = 0;
@@ -1622,17 +1411,15 @@ void dcn30_set_mcif_arb_params(
                        else
                                wbif_mode = PACKED_444;
 
-                       for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
-                               wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
-                               wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
-                       }
+                       DC_FP_START();
+                       dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j);
+                       DC_FP_END();
                        wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */
                        wb_arb_params->slice_lines = 32;
                        wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */
                        wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel,
                                        wbif_mode,
                                        wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
-                       wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
 
                        dwb_pipe++;
 
@@ -2111,178 +1898,11 @@ validate_out:
        return out;
 }
 
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_calculate_wm_and_dlg_fp(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel)
+void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
 {
-       int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
-       int i, pipe_idx;
-       double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
-       bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
-
-       if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
-               dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-       pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
-       /* Set B:
-        * DCFCLK: 1GHz or min required above 1GHz
-        * FCLK/UCLK: Max
-        */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
-               if (vlevel == 0) {
-                       pipes[0].clks_cfg.voltage = 1;
-                       pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
-               }
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
-       }
-       context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
-       /* Set D:
-        * DCFCLK: Min Required
-        * FCLK(proportional to UCLK): 1GHz or Max
-        * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
-        */
-       /*
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
-       }
-       context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       */
-
-       /* Set C:
-        * DCFCLK: Min Required
-        * FCLK(proportional to UCLK): 1GHz or Max
-        * pstate latency overridden to 5us
-        */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
-               unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
-               unsigned int min_dram_speed_mts_margin = 160;
-
-               if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
-                       min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
-               /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
-               for (i = 3; i > 0; i--)
-                       if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
-                               break;
-
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
-       }
-
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
-       if (!pstate_en) {
-               /* The only difference between A and C is p-state latency, if p-state is not supported we want to
-                * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
-                */
-               context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
-               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
-       } else {
-               /* Set A:
-                * DCFCLK: Min Required
-                * FCLK(proportional to UCLK): 1GHz or Max
-                *
-                * Set A calculated last so that following calculations are based on Set A
-                */
-               dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
-               context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-               context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       }
-
-       context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
-       /* Make set D = set A until set D is enabled */
-       context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
-
-       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
-               if (!context->res_ctx.pipe_ctx[i].stream)
-                       continue;
-
-               pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
-               pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
-               if (dc->config.forced_clocks) {
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
-               }
-               if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
-               if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
-               pipe_idx++;
-       }
-
        DC_FP_START();
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+       dcn30_fpu_update_soc_for_wm_a(dc, context);
        DC_FP_END();
-
-       if (!pstate_en)
-               /* Restore full p-state latency */
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
-                               dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
-       }
 }
 
 void dcn30_calculate_wm_and_dlg(
@@ -2292,7 +1912,7 @@ void dcn30_calculate_wm_and_dlg(
                int vlevel)
 {
        DC_FP_START();
-       dcn30_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+       dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
        DC_FP_END();
 }
 
@@ -2351,40 +1971,6 @@ validate_out:
        return out;
 }
 
-/*
- * This must be noinline to ensure anything that deals with FP registers
- * is contained within this call; previously our compiling with hard-float
- * would result in fp instructions being emitted outside of the boundaries
- * of the DC_FP_START/END macros, which makes sense as the compiler has no
- * idea about what is wrapped and what is not
- *
- * This is largely just a workaround to avoid breakage introduced with 5.6,
- * ideally all fp-using code should be moved into its own file, only that
- * should be compiled with hard-float, and all code exported from there
- * should be strictly wrapped with DC_FP_START/END
- */
-static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
-               unsigned int *optimal_dcfclk,
-               unsigned int *optimal_fclk)
-{
-       double bw_from_dram, bw_from_dram1, bw_from_dram2;
-
-       bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
-               dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
-       bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
-               dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
-
-       bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
-
-       if (optimal_fclk)
-               *optimal_fclk = bw_from_dram /
-               (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-
-       if (optimal_dcfclk)
-               *optimal_dcfclk =  bw_from_dram /
-               (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
-}
-
 void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
 {
        unsigned int i, j;
@@ -2399,47 +1985,43 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
        unsigned int num_dcfclk_sta_targets = 4;
        unsigned int num_uclk_states;
 
+       struct dc_bounding_box_max_clk dcn30_bb_max_clk;
+
+       memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk));
+
        if (dc->ctx->dc_bios->vram_info.num_chans)
                dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
 
-       if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
-               dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
-       dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-       dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+       DC_FP_START();
+       dcn30_fpu_update_dram_channel_width_bytes(dc);
+       DC_FP_END();
 
        if (bw_params->clk_table.entries[0].memclk_mhz) {
-               int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
 
                for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
-                       if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
-                               max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
-                       if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
-                               max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
-                       if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
-                               max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
-                       if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
-                               max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+                       if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz)
+                               dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+                       if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz)
+                               dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+                       if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz)
+                               dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+                       if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz)
+                               dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
                }
 
-               if (!max_dcfclk_mhz)
-                       max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
-               if (!max_dispclk_mhz)
-                       max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
-               if (!max_dppclk_mhz)
-                       max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
-               if (!max_phyclk_mhz)
-                       max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+               DC_FP_START();
+               dcn30_fpu_update_max_clk(&dcn30_bb_max_clk);
+               DC_FP_END();
 
-               if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+               if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
                        // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
-                       dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+                       dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz;
                        num_dcfclk_sta_targets++;
-               } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+               } else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
                        // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
                        for (i = 0; i < num_dcfclk_sta_targets; i++) {
-                               if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
-                                       dcfclk_sta_targets[i] = max_dcfclk_mhz;
+                               if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) {
+                                       dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz;
                                        break;
                                }
                        }
@@ -2452,7 +2034,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
                // Calculate optimal dcfclk for each uclk
                for (i = 0; i < num_uclk_states; i++) {
                        DC_FP_START();
-                       dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+                       dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
                                        &optimal_dcfclk_for_uclk[i], NULL);
                        DC_FP_END();
                        if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2479,7 +2061,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
                                dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
                                dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
                        } else {
-                               if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+                               if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
                                        dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
                                        dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
                                } else {
@@ -2494,33 +2076,15 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
                }
 
                while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
-                               optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+                               optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) {
                        dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
                        dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
                }
 
                dcn3_0_soc.num_states = num_states;
-               for (i = 0; i < dcn3_0_soc.num_states; i++) {
-                       dcn3_0_soc.clock_limits[i].state = i;
-                       dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
-                       dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
-                       dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
-
-                       /* Fill all states with max values of all other clocks */
-                       dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
-                       dcn3_0_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
-                       dcn3_0_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
-                       dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
-                       /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
-                       /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
-                       dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
-                       dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
-                       dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
-               }
-               /* re-init DML with updated bb */
-               dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
-               if (dc->current_state)
-                       dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+               DC_FP_START();
+               dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts);
+               DC_FP_END();
        }
 }
 
index b92e4cc..3330a10 100644 (file)
@@ -35,6 +35,9 @@ struct dc;
 struct resource_pool;
 struct _vcs_dpi_display_pipe_params_st;
 
+extern struct _vcs_dpi_ip_params_st dcn3_0_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc;
+
 struct dcn30_resource_pool {
        struct resource_pool base;
 };
@@ -96,4 +99,6 @@ enum dc_status dcn30_add_stream_to_ctx(
 
 void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
 
+void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
+
 #endif /* _DCN30_RESOURCE_H_ */
index 4daf893..a5df741 100644 (file)
@@ -81,6 +81,8 @@
 #include "dce/dce_aux.h"
 #include "dce/dce_i2c.h"
 
+#include "dml/dcn30/dcn30_fpu.h"
+
 #include "dml/dcn30/display_mode_vba_30.h"
 #include "dml/dcn301/dcn301_fpu.h"
 #include "vm_helper.h"
index f093865..f537888 100644 (file)
@@ -43,6 +43,8 @@
 #include "dcn20/dcn20_dsc.h"
 #include "dcn20/dcn20_resource.h"
 
+#include "dml/dcn30/dcn30_fpu.h"
+
 #include "dcn10/dcn10_resource.h"
 
 #include "dce/dce_abm.h"
index 4fcbc05..76f863e 100644 (file)
@@ -25,6 +25,8 @@
 #include "dcn20/dcn20_dsc.h"
 #include "dcn20/dcn20_resource.h"
 
+#include "dml/dcn30/dcn30_fpu.h"
+
 #include "dcn10/dcn10_resource.h"
 
 #include "dc_link_ddc.h"
index ccf1b71..3d9f07d 100644 (file)
@@ -36,6 +36,8 @@
 #include "dcn20/dcn20_resource.h"
 #include "dcn30/dcn30_resource.h"
 
+#include "dml/dcn30/dcn30_fpu.h"
+
 #include "dcn10/dcn10_ipp.h"
 #include "dcn30/dcn30_hubbub.h"
 #include "dcn31/dcn31_hubbub.h"
index ee91145..a64b88c 100644 (file)
@@ -71,6 +71,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
@@ -113,7 +114,7 @@ DML += dcn20/dcn20_fpu.o
 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
 DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
-DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
+DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
 DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
 DML += dcn31/dcn31_fpu.o
 DML += dcn301/dcn301_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
new file mode 100644 (file)
index 0000000..574676a
--- /dev/null
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "dcn_calc_math.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+
+
+#include "display_mode_vba_30.h"
+#include "dcn30_fpu.h"
+
+#define REG(reg)\
+       optc1->tg_regs->reg
+
+#define CTX \
+       optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+       optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+struct _vcs_dpi_ip_params_st dcn3_0_ip = {
+       .use_min_dcfclk = 0,
+       .clamp_min_dcfclk = 0,
+       .odm_capable = 1,
+       .gpuvm_enable = 0,
+       .hostvm_enable = 0,
+       .gpuvm_max_page_table_levels = 4,
+       .hostvm_max_page_table_levels = 4,
+       .hostvm_cached_page_table_levels = 0,
+       .pte_group_size_bytes = 2048,
+       .num_dsc = 6,
+       .rob_buffer_size_kbytes = 184,
+       .det_buffer_size_kbytes = 184,
+       .dpte_buffer_size_in_pte_reqs_luma = 84,
+       .pde_proc_buffer_size_64k_reqs = 48,
+       .dpp_output_buffer_pixels = 2560,
+       .opp_output_buffer_lines = 1,
+       .pixel_chunk_size_kbytes = 8,
+       .pte_enable = 1,
+       .max_page_table_levels = 2,
+       .pte_chunk_size_kbytes = 2,  // ?
+       .meta_chunk_size_kbytes = 2,
+       .writeback_chunk_size_kbytes = 8,
+       .line_buffer_size_bits = 789504,
+       .is_line_buffer_bpp_fixed = 0,  // ?
+       .line_buffer_fixed_bpp = 0,     // ?
+       .dcc_supported = true,
+       .writeback_interface_buffer_size_kbytes = 90,
+       .writeback_line_buffer_buffer_size = 0,
+       .max_line_buffer_lines = 12,
+       .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
+       .writeback_chroma_buffer_size_kbytes = 8,
+       .writeback_chroma_line_buffer_width_pixels = 4,
+       .writeback_max_hscl_ratio = 1,
+       .writeback_max_vscl_ratio = 1,
+       .writeback_min_hscl_ratio = 1,
+       .writeback_min_vscl_ratio = 1,
+       .writeback_max_hscl_taps = 1,
+       .writeback_max_vscl_taps = 1,
+       .writeback_line_buffer_luma_buffer_size = 0,
+       .writeback_line_buffer_chroma_buffer_size = 14643,
+       .cursor_buffer_size = 8,
+       .cursor_chunk_size = 2,
+       .max_num_otg = 6,
+       .max_num_dpp = 6,
+       .max_num_wb = 1,
+       .max_dchub_pscl_bw_pix_per_clk = 4,
+       .max_pscl_lb_bw_pix_per_clk = 2,
+       .max_lb_vscl_bw_pix_per_clk = 4,
+       .max_vscl_hscl_bw_pix_per_clk = 4,
+       .max_hscl_ratio = 6,
+       .max_vscl_ratio = 6,
+       .hscl_mults = 4,
+       .vscl_mults = 4,
+       .max_hscl_taps = 8,
+       .max_vscl_taps = 8,
+       .dispclk_ramp_margin_percent = 1,
+       .underscan_factor = 1.11,
+       .min_vblank_lines = 32,
+       .dppclk_delay_subtotal = 46,
+       .dynamic_metadata_vm_enabled = true,
+       .dppclk_delay_scl_lb_only = 16,
+       .dppclk_delay_scl = 50,
+       .dppclk_delay_cnvc_formatter = 27,
+       .dppclk_delay_cnvc_cursor = 6,
+       .dispclk_delay_subtotal = 119,
+       .dcfclk_cstate_latency = 5.2, // SRExitTime
+       .max_inter_dcn_tile_repeaters = 8,
+       .max_num_hdmi_frl_outputs = 1,
+       .odm_combine_4to1_supported = true,
+
+       .xfc_supported = false,
+       .xfc_fill_bw_overhead_percent = 10.0,
+       .xfc_fill_constant_bytes = 0,
+       .gfx7_compat_tiling_supported = 0,
+       .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+       .clock_limits = {
+                       {
+                               .state = 0,
+                               .dispclk_mhz = 562.0,
+                               .dppclk_mhz = 300.0,
+                               .phyclk_mhz = 300.0,
+                               .phyclk_d18_mhz = 667.0,
+                               .dscclk_mhz = 405.6,
+                       },
+               },
+
+       .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+       .num_states = 1,
+       .sr_exit_time_us = 15.5,
+       .sr_enter_plus_exit_time_us = 20,
+       .urgent_latency_us = 4.0,
+       .urgent_latency_pixel_data_only_us = 4.0,
+       .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+       .urgent_latency_vm_data_only_us = 4.0,
+       .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+       .max_avg_sdp_bw_use_normal_percent = 60.0,
+       .max_avg_dram_bw_use_normal_percent = 40.0,
+       .writeback_latency_us = 12.0,
+       .max_request_size_bytes = 256,
+       .fabric_datapath_to_dcn_data_return_bytes = 64,
+       .dcn_downspread_percent = 0.5,
+       .downspread_percent = 0.38,
+       .dram_page_open_time_ns = 50.0,
+       .dram_rw_turnaround_time_ns = 17.5,
+       .dram_return_buffer_per_channel_bytes = 8192,
+       .round_trip_ping_latency_dcfclk_cycles = 191,
+       .urgent_out_of_order_return_per_channel_bytes = 4096,
+       .channel_interleave_bytes = 256,
+       .num_banks = 8,
+       .gpuvm_min_page_size_bytes = 4096,
+       .hostvm_min_page_size_bytes = 4096,
+       .dram_clock_change_latency_us = 404,
+       .dummy_pstate_latency_us = 5,
+       .writeback_dram_clock_change_latency_us = 23.0,
+       .return_bus_width_bytes = 64,
+       .dispclk_dppclk_vco_speed_mhz = 3650,
+       .xfc_bus_transport_time_us = 20,      // ?
+       .xfc_xbuf_latency_tolerance_us = 4,  // ?
+       .use_urgent_burst_bw = 1,            // ?
+       .do_urgent_latency_adjustment = true,
+       .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
+       .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+};
+
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+               double vtotal_avg)
+{
+struct optc *optc1 = DCN10TG_FROM_TG(optc);
+       double vtotal_min, vtotal_max;
+       double ratio, modulo, phase;
+       uint32_t vblank_start;
+       uint32_t v_total_mask_value = 0;
+
+       dc_assert_fp_enabled();
+
+       /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
+        * VOTAL_MAX - VTOTAL_MIN = 1
+        */
+       v_total_mask_value = 16;
+       vtotal_min = dcn_bw_floor(vtotal_avg);
+       vtotal_max = dcn_bw_ceil(vtotal_avg);
+
+       /* Check that bottom VBLANK is at least 2 lines tall when running with
+        * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
+        * of lines in a frame - 1'.
+        */
+       REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
+               &vblank_start);
+       ASSERT(vtotal_min >= vblank_start + 1);
+
+       /* Special case where the average frame rate can be achieved
+        * without using the DTO
+        */
+       if (vtotal_min == vtotal_max) {
+               REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+
+               optc->funcs->set_vtotal_min_max(optc, 0, 0);
+               REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+               REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+               REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+                       OTG_V_TOTAL_MIN_SEL, 0,
+                       OTG_V_TOTAL_MAX_SEL, 0,
+                       OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+               return;
+       }
+
+       ratio = vtotal_max - vtotal_avg;
+       modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
+       phase = ratio * modulo;
+
+       /* Special cases where the DTO phase gets rounded to 0 or
+        * to DTO modulo
+        */
+       if (phase <= 0 || phase >= modulo) {
+               REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
+                       phase <= 0 ?
+                               (uint32_t)vtotal_max : (uint32_t)vtotal_min);
+               REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
+               REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
+               REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
+               REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
+               REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
+                       OTG_V_TOTAL_MIN_SEL, 0,
+                       OTG_V_TOTAL_MAX_SEL, 0,
+                       OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
+               return;
+       }
+       REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
+               OTG_V_TOTAL_MIN_SEL, 1,
+               OTG_V_TOTAL_MAX_SEL, 1,
+               OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
+               OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
+               OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
+               OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
+       REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
+       optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+       REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
+       REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
+}
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+               struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+       int pipe_cnt, i, j;
+       double max_calc_writeback_dispclk;
+       double writeback_dispclk;
+       struct writeback_st dout_wb;
+
+       dc_assert_fp_enabled();
+
+       for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+               struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
+
+               if (!stream)
+                       continue;
+               max_calc_writeback_dispclk = 0;
+
+               /* Set writeback information */
+               pipes[pipe_cnt].dout.wb_enable = 0;
+               pipes[pipe_cnt].dout.num_active_wb = 0;
+               for (j = 0; j < stream->num_wb_info; j++) {
+                       struct dc_writeback_info *wb_info = &stream->writeback_info[j];
+
+                       if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
+                                       (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
+                               pipes[pipe_cnt].dout.wb_enable = 1;
+                               pipes[pipe_cnt].dout.num_active_wb++;
+                               dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
+                                       wb_info->dwb_params.cnv_params.crop_height :
+                                       wb_info->dwb_params.cnv_params.src_height;
+                               dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
+                                       wb_info->dwb_params.cnv_params.crop_width :
+                                       wb_info->dwb_params.cnv_params.src_width;
+                               dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
+                               dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
+
+                               /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
+                               if (dc->dml.ip.writeback_max_hscl_taps > 1) {
+                                       dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
+                                       dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
+                               } else {
+                                       dout_wb.wb_htaps_luma = 1;
+                                       dout_wb.wb_vtaps_luma = 1;
+                               }
+                               dout_wb.wb_htaps_chroma = 0;
+                               dout_wb.wb_vtaps_chroma = 0;
+                               dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
+                                       (double)wb_info->dwb_params.cnv_params.crop_width /
+                                               (double)wb_info->dwb_params.dest_width :
+                                       (double)wb_info->dwb_params.cnv_params.src_width /
+                                               (double)wb_info->dwb_params.dest_width;
+                               dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
+                                       (double)wb_info->dwb_params.cnv_params.crop_height /
+                                               (double)wb_info->dwb_params.dest_height :
+                                       (double)wb_info->dwb_params.cnv_params.src_height /
+                                               (double)wb_info->dwb_params.dest_height;
+                               if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
+                                       wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
+                                       dout_wb.wb_pixel_format = dm_444_64;
+                               else
+                                       dout_wb.wb_pixel_format = dm_444_32;
+
+                               /* Workaround for cases where multiple writebacks are connected to same plane
+                                * In which case, need to compute worst case and set the associated writeback parameters
+                                * This workaround is necessary due to DML computation assuming only 1 set of writeback
+                                * parameters per pipe
+                                */
+                               writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
+                                               dout_wb.wb_pixel_format,
+                                               pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
+                                               dout_wb.wb_hratio,
+                                               dout_wb.wb_vratio,
+                                               dout_wb.wb_htaps_luma,
+                                               dout_wb.wb_vtaps_luma,
+                                               dout_wb.wb_src_width,
+                                               dout_wb.wb_dst_width,
+                                               pipes[pipe_cnt].pipe.dest.htotal,
+                                               dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
+
+                               if (writeback_dispclk > max_calc_writeback_dispclk) {
+                                       max_calc_writeback_dispclk = writeback_dispclk;
+                                       pipes[pipe_cnt].dout.wb = dout_wb;
+                               }
+                       }
+               }
+
+               pipe_cnt++;
+       }
+}
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+       struct display_mode_lib *dml,
+       display_e2e_pipe_params_st *pipes,
+       int pipe_cnt,
+       int cur_pipe)
+{
+    int i;
+
+       dc_assert_fp_enabled();
+
+    for (i = 0; i < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); i++) {
+               wb_arb_params->cli_watermark[i] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
+               wb_arb_params->pstate_watermark[i] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+    }
+
+    wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[cur_pipe] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
+}
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+
+dc_assert_fp_enabled();
+
+if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+       }
+}
+
+void dcn30_fpu_calculate_wm_and_dlg(
+               struct dc *dc, struct dc_state *context,
+               display_e2e_pipe_params_st *pipes,
+               int pipe_cnt,
+               int vlevel)
+{
+int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
+       int i, pipe_idx;
+       double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
+       bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
+
+dc_assert_fp_enabled();
+
+       if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+               dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+       pipes[0].clks_cfg.voltage = vlevel;
+       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+       pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+       /* Set B:
+        * DCFCLK: 1GHz or min required above 1GHz
+        * FCLK/UCLK: Max
+        */
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+               if (vlevel == 0) {
+                       pipes[0].clks_cfg.voltage = 1;
+                       pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+               }
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+       }
+       context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+       pipes[0].clks_cfg.voltage = vlevel;
+       pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+       /* Set D:
+        * DCFCLK: Min Required
+        * FCLK(proportional to UCLK): 1GHz or Max
+        * MALL stutter, sr_enter_exit = 4, sr_exit = 2us
+        */
+       /*
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+       }
+       context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       */
+
+       /* Set C:
+        * DCFCLK: Min Required
+        * FCLK(proportional to UCLK): 1GHz or Max
+        * pstate latency overridden to 5us
+        */
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+               unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+               unsigned int min_dram_speed_mts_margin = 160;
+
+               if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+                       min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+               /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+               for (i = 3; i > 0; i--)
+                       if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+                               break;
+
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+       }
+
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+       if (!pstate_en) {
+               /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+                * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+                */
+               context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+       } else {
+               /* Set A:
+                * DCFCLK: Min Required
+                * FCLK(proportional to UCLK): 1GHz or Max
+                *
+                * Set A calculated last so that following calculations are based on Set A
+                */
+               dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       }
+
+       context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+       /* Make set D = set A until set D is enabled */
+       context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+
+       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+               if (!context->res_ctx.pipe_ctx[i].stream)
+                       continue;
+
+               pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+               pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+               if (dc->config.forced_clocks) {
+                       pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+                       pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+               }
+               if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+                       pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+               if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+                       pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+               pipe_idx++;
+       }
+
+       DC_FP_START();
+       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+       DC_FP_END();
+
+       if (!pstate_en)
+               /* Restore full p-state latency */
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+                               dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+
+}
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc)
+{
+       dc_assert_fp_enabled();
+
+       if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+               dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+}
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk)
+{
+               dc_assert_fp_enabled();
+
+               if (!dcn30_bb_max_clk->max_dcfclk_mhz)
+                       dcn30_bb_max_clk->max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
+               if (!dcn30_bb_max_clk->max_dispclk_mhz)
+                       dcn30_bb_max_clk->max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
+               if (!dcn30_bb_max_clk->max_dppclk_mhz)
+                       dcn30_bb_max_clk->max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
+               if (!dcn30_bb_max_clk->max_phyclk_mhz)
+                       dcn30_bb_max_clk->max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
+}
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+               unsigned int *optimal_dcfclk,
+               unsigned int *optimal_fclk)
+{
+       double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+       dc_assert_fp_enabled();
+
+       bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
+               dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+       bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
+               dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+       bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+       if (optimal_fclk)
+               *optimal_fclk = bw_from_dram /
+               (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+       if (optimal_dcfclk)
+               *optimal_dcfclk =  bw_from_dram /
+               (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+       struct clk_bw_params *bw_params,
+       struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+       unsigned int *dcfclk_mhz,
+       unsigned int *dram_speed_mts)
+{
+       unsigned int i;
+
+       dc_assert_fp_enabled();
+
+       dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+       dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+       for (i = 0; i < dcn3_0_soc.num_states; i++) {
+               dcn3_0_soc.clock_limits[i].state = i;
+               dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+               dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+               dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+               /* Fill all states with max values of all other clocks */
+               dcn3_0_soc.clock_limits[i].dispclk_mhz = dcn30_bb_max_clk->max_dispclk_mhz;
+               dcn3_0_soc.clock_limits[i].dppclk_mhz  = dcn30_bb_max_clk->max_dppclk_mhz;
+               dcn3_0_soc.clock_limits[i].phyclk_mhz  = dcn30_bb_max_clk->max_phyclk_mhz;
+               dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
+               /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
+               /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+               dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
+               dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
+               dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
+       }
+       /* re-init DML with updated bb */
+       dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+       if (dc->current_state)
+               dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
new file mode 100644 (file)
index 0000000..dedfe7b
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_FPU_H__
+#define __DCN30_FPU_H__
+
+#include "core_types.h"
+#include "dcn20/dcn20_optc.h"
+
+void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
+               double vtotal_avg);
+
+void dcn30_fpu_populate_dml_writeback_from_context(
+               struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
+void dcn30_fpu_set_mcif_arb_params(struct mcif_arb_params *wb_arb_params,
+       struct display_mode_lib *dml,
+       display_e2e_pipe_params_st *pipes,
+       int pipe_cnt,
+       int cur_pipe);
+
+void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
+void dcn30_fpu_calculate_wm_and_dlg(
+               struct dc *dc, struct dc_state *context,
+               display_e2e_pipe_params_st *pipes,
+               int pipe_cnt,
+               int vlevel);
+
+void dcn30_fpu_update_dram_channel_width_bytes(struct dc *dc);
+
+void dcn30_fpu_update_max_clk(struct dc_bounding_box_max_clk *dcn30_bb_max_clk);
+
+void dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+               unsigned int *optimal_dcfclk,
+               unsigned int *optimal_fclk);
+
+void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
+       struct clk_bw_params *bw_params,
+       struct dc_bounding_box_max_clk *dcn30_bb_max_clk,
+       unsigned int *dcfclk_mhz,
+       unsigned int *dram_speed_mts);
+
+
+#endif /* __DCN30_FPU_H__*/
index 26f3a55..555d4d9 100644 (file)
@@ -486,4 +486,11 @@ struct dc_state {
        } perf_params;
 };
 
+struct dc_bounding_box_max_clk {
+       int max_dcfclk_mhz;
+       int max_dispclk_mhz;
+       int max_dppclk_mhz;
+       int max_phyclk_mhz;
+};
+
 #endif /* _CORE_TYPES_H_ */
index 851b982..a3c1e9c 100644 (file)
@@ -217,8 +217,7 @@ void disable_dp_hpo_output(struct dc_link *link,
 void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
 bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
 
-void dp_retrieve_lttpr_cap(struct dc_link *link);
-bool dp_apply_lttpr_mode(struct dc_link *link);
+bool dp_retrieve_lttpr_cap(struct dc_link *link);
 void edp_panel_backlight_power_on(struct dc_link *link);
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
 void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
index 74dafd0..39c1d1d 100644 (file)
@@ -87,6 +87,11 @@ union dpia_set_config_data {
  */
 enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
 
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dc_link_dpia_query_hpd_status(struct dc_link *link);
+
 /* Train DP tunneling link for USB4 DPIA display endpoint.
  * DPIA equivalent of dc_link_dp_perfrorm_link_training.
  * Aborts link training upon detection of sink unplug.
index 9f465b4..447a562 100644 (file)
@@ -80,12 +80,6 @@ enum link_training_result {
        DP_128b_132b_CDS_DONE_TIMEOUT,
 };
 
-enum lttpr_support {
-       LTTPR_UNSUPPORTED,
-       LTTPR_CHECK_EXT_SUPPORT,
-       LTTPR_SUPPORTED,
-};
-
 enum lttpr_mode {
        LTTPR_MODE_NON_LTTPR,
        LTTPR_MODE_TRANSPARENT,
index c755f43..7a2c6b1 100644 (file)
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
 #define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
index 14a3bac..fa1f437 100644 (file)
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
 #define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
index 106094e..39f6fde 100644 (file)
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
 #define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
index bcd190a..c5f4afa 100644 (file)
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT                                                     0x5
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT                                                   0x8
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT                                                   0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT                                                    0xc
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT                                                   0x10
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK                                                     0x00000001L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK                                                       0x00000010L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK                                                       0x00000020L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK                                                     0x00000100L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK                                                     0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK                                                      0x00001000L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK                                                     0x003F0000L
 //DIG0_HDMI_INFOFRAME_CONTROL0
 #define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND__SHIFT                                               0x0
index 9b6825b..2358090 100644 (file)
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x1000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
 #define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
 #define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
index e7c0cad..a788ff3 100644 (file)
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT                                                     0x5
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT                                                   0x8
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT                                                   0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT                                                    0xc
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT                                                   0x10
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK                                                     0x00000001L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK                                                       0x00000010L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK                                                       0x00000020L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK                                                     0x00000100L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK                                                     0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK                                                      0x00001000L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK                                                     0x003F0000L
 //DIG0_HDMI_INFOFRAME_CONTROL0
 #define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT                                             0x4
index dc8ce7a..c70f7ba 100644 (file)
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT                                                     0x5
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT                                                   0x8
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT                                                   0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT                                                    0xc
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT                                                   0x10
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK                                                     0x00000001L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK                                                       0x00000010L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK                                                       0x00000020L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK                                                     0x00000100L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK                                                     0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK                                                      0x00001000L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK                                                     0x003F0000L
 //DIG0_HDMI_INFOFRAME_CONTROL0
 #define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT                                             0x4
index 9196955..ca1e1eb 100755 (executable)
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT                                                     0x5
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT                                                   0x8
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT                                                   0x9
-
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT                                                    0xc
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT                                                   0x10
 
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK                                                     0x00000001L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK                                                       0x00000020L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK                                                     0x00000100L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK                                                     0x00000200L
-
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK                                                      0x00001000L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK                                                     0x003F0000L
 
 //DIG0_HDMI_INFOFRAME_CONTROL0
index 2f780ae..6104ae3 100644 (file)
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT                                                     0x5
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT                                                   0x8
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT                                                   0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT                                                    0xc
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT                                                   0x10
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK                                                     0x00000001L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK                                                       0x00000010L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK                                                       0x00000020L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK                                                     0x00000100L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK                                                     0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK                                                      0x00001000L
 #define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK                                                     0x003F0000L
 //DIG0_HDMI_INFOFRAME_CONTROL0
 #define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT                                             0x4
index 5472f99..d1bf073 100644 (file)
@@ -770,6 +770,9 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        enum amd_dpm_forced_level level;
 
+       if (!pp_funcs)
+               return AMD_DPM_FORCED_LEVEL_AUTO;
+
        mutex_lock(&adev->pm.mutex);
        if (pp_funcs->get_performance_level)
                level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
index 6016b32..a601024 100644 (file)
@@ -1436,6 +1436,7 @@ static int smu_disable_dpms(struct smu_context *smu)
                case IP_VERSION(11, 0, 0):
                case IP_VERSION(11, 0, 5):
                case IP_VERSION(11, 0, 9):
+               case IP_VERSION(13, 0, 0):
                        return 0;
                default:
                        break;
index ecc6411..c1f7623 100644 (file)
@@ -671,8 +671,8 @@ typedef struct {
   uint16_t               reserved[2];
 
   //Frequency changes
-  uint16_t               GfxclkFmin;           // MHz
-  uint16_t               GfxclkFmax;           // MHz
+  int16_t                GfxclkFmin;           // MHz
+  int16_t                GfxclkFmax;           // MHz
   uint16_t               UclkFmin;             // MHz
   uint16_t               UclkFmax;             // MHz
 
@@ -683,15 +683,14 @@ typedef struct {
   //Fan control
   uint8_t                FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
   uint8_t                FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
-  uint16_t               FanMaximumRpm;
   uint16_t               FanMinimumPwm;
-  uint16_t               FanAcousticLimitRpm;
+  uint16_t               AcousticTargetRpmThreshold;
+  uint16_t               AcousticLimitRpmThreshold;
   uint16_t               FanTargetTemperature; // Degree Celcius
   uint8_t                FanZeroRpmEnable;
   uint8_t                FanZeroRpmStopTemp;
   uint8_t                FanMode;
-  uint8_t                Padding[1];
-
+  uint8_t                MaxOpTemp;
 
   uint32_t               Spare[13];
   uint32_t               MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
@@ -719,15 +718,14 @@ typedef struct {
 
   uint8_t                FanLinearPwmPoints;
   uint8_t                FanLinearTempPoints;
-  uint16_t               FanMaximumRpm;
   uint16_t               FanMinimumPwm;
-  uint16_t               FanAcousticLimitRpm;
+  uint16_t               AcousticTargetRpmThreshold;
+  uint16_t               AcousticLimitRpmThreshold;
   uint16_t               FanTargetTemperature; // Degree Celcius
   uint8_t                FanZeroRpmEnable;
   uint8_t                FanZeroRpmStopTemp;
   uint8_t                FanMode;
-  uint8_t                Padding[1];
-
+  uint8_t                MaxOpTemp;
 
   uint32_t               Spare[13];
 
@@ -997,7 +995,8 @@ typedef struct {
   uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
   uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
 
-  uint32_t       SpareVmin[12];
+  QuadraticInt_t Vmin_droop;
+  uint32_t       SpareVmin[9];
 
 
   //SECTION: DPM Configuration 1
@@ -1286,7 +1285,6 @@ typedef struct {
   uint32_t    PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
   uint32_t    BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
 
-
   // SECTION: Board Reserved
   uint32_t     BoardSpare[64];
 
index 2b44d41..afa1991 100644 (file)
@@ -30,7 +30,7 @@
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x27
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x28
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x28
 
 #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
index d68be8f..78f3d9e 100644 (file)
@@ -697,12 +697,28 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
        uint32_t apu_percent = 0;
        uint32_t dgpu_percent = 0;
 
-       if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
-               (smu->smc_fw_version >= 0x3A4900))
-               use_metrics_v3 = true;
-       else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
-               (smu->smc_fw_version >= 0x3A4300))
-               use_metrics_v2 =  true;
+       switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+       case IP_VERSION(11, 0, 7):
+               if (smu->smc_fw_version >= 0x3A4900)
+                       use_metrics_v3 = true;
+               else if (smu->smc_fw_version >= 0x3A4300)
+                       use_metrics_v2 = true;
+               break;
+       case IP_VERSION(11, 0, 11):
+               if (smu->smc_fw_version >= 0x412D00)
+                       use_metrics_v2 = true;
+               break;
+       case IP_VERSION(11, 0, 12):
+               if (smu->smc_fw_version >= 0x3B2300)
+                       use_metrics_v2 = true;
+               break;
+       case IP_VERSION(11, 0, 13):
+               if (smu->smc_fw_version >= 0x491100)
+                       use_metrics_v2 = true;
+               break;
+       default:
+               break;
+       }
 
        ret = smu_cmn_get_metrics_table(smu,
                                        NULL,
@@ -3833,13 +3849,28 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
        uint16_t average_gfx_activity;
        int ret = 0;
 
-       if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
-               (smu->smc_fw_version >= 0x3A4900))
-               use_metrics_v3 = true;
-       else if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
-               (smu->smc_fw_version >= 0x3A4300))
-               use_metrics_v2 = true;
-
+       switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+       case IP_VERSION(11, 0, 7):
+               if (smu->smc_fw_version >= 0x3A4900)
+                       use_metrics_v3 = true;
+               else if (smu->smc_fw_version >= 0x3A4300)
+                       use_metrics_v2 = true;
+               break;
+       case IP_VERSION(11, 0, 11):
+               if (smu->smc_fw_version >= 0x412D00)
+                       use_metrics_v2 = true;
+               break;
+       case IP_VERSION(11, 0, 12):
+               if (smu->smc_fw_version >= 0x3B2300)
+                       use_metrics_v2 = true;
+               break;
+       case IP_VERSION(11, 0, 13):
+               if (smu->smc_fw_version >= 0x491100)
+                       use_metrics_v2 = true;
+               break;
+       default:
+               break;
+       }
 
        ret = smu_cmn_get_metrics_table(smu,
                                        &metrics_external,
index b87f550..5f8809f 100644 (file)
@@ -781,7 +781,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
                goto failed;
        }
 
-       bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+       bitmap_to_arr32(feature_mask, feature->allowed, 64);
 
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
                                          feature_mask[1], NULL);
index 38af648..fb13040 100644 (file)
@@ -1666,6 +1666,7 @@ static const struct throttling_logging_label {
        uint32_t feature_mask;
        const char *label;
 } logging_label[] = {
+       {(1U << THROTTLER_TEMP_GPU_BIT), "GPU"},
        {(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
        {(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
        {(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
index ae6321a..ef9b56d 100644 (file)
@@ -218,13 +218,25 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
                        pptable_id == 3688)
                        pptable_id = 36881;
                /*
-                * Temporary solution for SMU V13.0.0:
-                *   - use 99991 signed pptable when SCPM enabled
-                * TODO: drop this when the pptable carried in vbios
-                * is ready.
+                * Temporary solution for SMU V13.0.0 with SCPM enabled:
+                *   - use 36831 signed pptable when pp_table_id is 3683
+                *   - use 36641 signed pptable when pp_table_id is 3664 or 0
+                * TODO: drop these when the pptable carried in vbios is ready.
                 */
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))
-                       pptable_id = 99991;
+               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+                       switch (pptable_id) {
+                       case 0:
+                       case 3664:
+                               pptable_id = 36641;
+                               break;
+                       case 3683:
+                               pptable_id = 36831;
+                               break;
+                       default:
+                               dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+                               return -EINVAL;
+                       }
+               }
        }
 
        /* "pptable_id == 0" means vbios carries the pptable. */
@@ -448,13 +460,24 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
                pptable_id = smu->smu_table.boot_values.pp_table_id;
 
                /*
-                * Temporary solution for SMU V13.0.0:
-                *   - use 9999 unsigned pptable when SCPM disabled
-                * TODO: drop this when the pptable carried in vbios
-                * is ready.
+                * Temporary solution for SMU V13.0.0 with SCPM disabled:
+                *   - use 3664 or 3683 on request
+                *   - use 3664 when pptable_id is 0
+                * TODO: drop these when the pptable carried in vbios is ready.
                 */
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))
-                       pptable_id = 9999;
+               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+                       switch (pptable_id) {
+                       case 0:
+                               pptable_id = 3664;
+                               break;
+                       case 3664:
+                       case 3683:
+                               break;
+                       default:
+                               dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+                               return -EINVAL;
+                       }
+               }
        }
 
        /* force using vbios pptable in sriov mode */
@@ -814,7 +837,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
            feature->feature_num < 64)
                return -EINVAL;
 
-       bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+       bitmap_to_arr32(feature_mask, feature->allowed, 64);
 
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
                                              feature_mask[1], NULL);
index 197a0e2..7432b3e 100644 (file)
@@ -275,9 +275,7 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
                *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
        }
 
-#if 0
        *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
-#endif
 
        if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
                *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
@@ -296,6 +294,12 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
 
        *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
 
+       *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
+       *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
+
+       *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+       *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
+
        return 0;
 }
 
index 7d6ff14..5a17b51 100644 (file)
@@ -644,42 +644,40 @@ static int smu_v13_0_4_set_watermarks_table(struct smu_context *smu,
        if (!table || !clock_ranges)
                return -EINVAL;
 
-       if (clock_ranges) {
-               if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
-                       clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
-                       return -EINVAL;
-
-               for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
-                       table->WatermarkRow[WM_DCFCLK][i].MinClock =
-                               clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
-                       table->WatermarkRow[WM_DCFCLK][i].MaxClock =
-                               clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
-                       table->WatermarkRow[WM_DCFCLK][i].MinMclk =
-                               clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
-                       table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
-                               clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
-
-                       table->WatermarkRow[WM_DCFCLK][i].WmSetting =
-                               clock_ranges->reader_wm_sets[i].wm_inst;
-               }
+       if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+               clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+               return -EINVAL;
 
-               for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
-                       table->WatermarkRow[WM_SOCCLK][i].MinClock =
-                               clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
-                       table->WatermarkRow[WM_SOCCLK][i].MaxClock =
-                               clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
-                       table->WatermarkRow[WM_SOCCLK][i].MinMclk =
-                               clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
-                       table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
-                               clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
-
-                       table->WatermarkRow[WM_SOCCLK][i].WmSetting =
-                               clock_ranges->writer_wm_sets[i].wm_inst;
-               }
+       for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+               table->WatermarkRow[WM_DCFCLK][i].MinClock =
+                       clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+               table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+                       clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+               table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+                       clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+               table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+                       clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+               table->WatermarkRow[WM_DCFCLK][i].WmSetting =
+                       clock_ranges->reader_wm_sets[i].wm_inst;
+       }
 
-               smu->watermarks_bitmap |= WATERMARKS_EXIST;
+       for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+               table->WatermarkRow[WM_SOCCLK][i].MinClock =
+                       clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+               table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+                       clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+               table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+                       clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+               table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+                       clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+               table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+                       clock_ranges->writer_wm_sets[i].wm_inst;
        }
 
+       smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
        /* pass data to smu controller */
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
             !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
index 87257b1..feff4f8 100644 (file)
@@ -190,6 +190,9 @@ static int yellow_carp_fini_smc_tables(struct smu_context *smu)
        kfree(smu_table->watermarks_table);
        smu_table->watermarks_table = NULL;
 
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+
        return 0;
 }
 
index e957d48..f024dc9 100644 (file)
@@ -69,7 +69,7 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 
 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
-    defined(__mips__)
+    defined(__mips__) || defined(__loongarch__)
        if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
                tmp = pgprot_noncached(tmp);
        else
index 9c5cc28..b4f6936 100644 (file)
@@ -51,7 +51,7 @@ static int preallocated_oos_pages = 8192;
 
 static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
 {
-       struct kvm *kvm = vgpu->kvm;
+       struct kvm *kvm = vgpu->vfio_device.kvm;
        int idx;
        bool ret;
 
@@ -1185,7 +1185,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
 
        if (!vgpu->attached)
                return -EINVAL;
-       pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
+       pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
        if (is_error_noslot_pfn(pfn))
                return -EINVAL;
        return PageTransHuge(pfn_to_page(pfn));
index 03ecffc..aee1a45 100644 (file)
@@ -227,11 +227,7 @@ struct intel_vgpu {
        struct mutex cache_lock;
 
        struct notifier_block iommu_notifier;
-       struct notifier_block group_notifier;
-       struct kvm *kvm;
-       struct work_struct release_work;
        atomic_t released;
-       struct vfio_group *vfio_group;
 
        struct kvm_page_track_notifier_node track_node;
 #define NR_BKT (1 << 18)
@@ -732,7 +728,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
 {
        if (!vgpu->attached)
                return -ESRCH;
-       return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
+       return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
 }
 
 /**
@@ -750,7 +746,7 @@ static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
 {
        if (!vgpu->attached)
                return -ESRCH;
-       return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
+       return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
 }
 
 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
index 0787ba5..e2f6c56 100644 (file)
@@ -228,8 +228,6 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
        }
 }
 
-static void intel_vgpu_release_work(struct work_struct *work);
-
 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
                unsigned long size)
 {
@@ -243,7 +241,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
        for (npage = 0; npage < total_pages; npage++) {
                unsigned long cur_gfn = gfn + npage;
 
-               ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1);
+               ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1);
                drm_WARN_ON(&i915->drm, ret != 1);
        }
 }
@@ -266,8 +264,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
                unsigned long cur_gfn = gfn + npage;
                unsigned long pfn;
 
-               ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1,
-                                          IOMMU_READ | IOMMU_WRITE, &pfn);
+               ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1,
+                                    IOMMU_READ | IOMMU_WRITE, &pfn);
                if (ret != 1) {
                        gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
                                     cur_gfn, ret);
@@ -761,23 +759,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
-static int intel_vgpu_group_notifier(struct notifier_block *nb,
-                                    unsigned long action, void *data)
-{
-       struct intel_vgpu *vgpu =
-               container_of(nb, struct intel_vgpu, group_notifier);
-
-       /* the only action we care about */
-       if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
-               vgpu->kvm = data;
-
-               if (!data)
-                       schedule_work(&vgpu->release_work);
-       }
-
-       return NOTIFY_OK;
-}
-
 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
 {
        struct intel_vgpu *itr;
@@ -789,7 +770,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
                if (!itr->attached)
                        continue;
 
-               if (vgpu->kvm == itr->kvm) {
+               if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
                        ret = true;
                        goto out;
                }
@@ -804,61 +785,44 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
        struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
        unsigned long events;
        int ret;
-       struct vfio_group *vfio_group;
 
        vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
-       vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier;
 
        events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
-       ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events,
-                               &vgpu->iommu_notifier);
+       ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events,
+                                    &vgpu->iommu_notifier);
        if (ret != 0) {
                gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
                        ret);
                goto out;
        }
 
-       events = VFIO_GROUP_NOTIFY_SET_KVM;
-       ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events,
-                               &vgpu->group_notifier);
-       if (ret != 0) {
-               gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
-                       ret);
-               goto undo_iommu;
-       }
-
-       vfio_group =
-               vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev);
-       if (IS_ERR_OR_NULL(vfio_group)) {
-               ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
-               gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
-               goto undo_register;
-       }
-       vgpu->vfio_group = vfio_group;
-
        ret = -EEXIST;
        if (vgpu->attached)
-               goto undo_group;
+               goto undo_iommu;
 
        ret = -ESRCH;
-       if (!vgpu->kvm || vgpu->kvm->mm != current->mm) {
+       if (!vgpu->vfio_device.kvm ||
+           vgpu->vfio_device.kvm->mm != current->mm) {
                gvt_vgpu_err("KVM is required to use Intel vGPU\n");
-               goto undo_group;
+               goto undo_iommu;
        }
 
+       kvm_get_kvm(vgpu->vfio_device.kvm);
+
        ret = -EEXIST;
        if (__kvmgt_vgpu_exist(vgpu))
-               goto undo_group;
+               goto undo_iommu;
 
        vgpu->attached = true;
-       kvm_get_kvm(vgpu->kvm);
 
        kvmgt_protect_table_init(vgpu);
        gvt_cache_init(vgpu);
 
        vgpu->track_node.track_write = kvmgt_page_track_write;
        vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
-       kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node);
+       kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
+                                        &vgpu->track_node);
 
        debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
                             &vgpu->nr_cache_entries);
@@ -868,17 +832,9 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
        atomic_set(&vgpu->released, 0);
        return 0;
 
-undo_group:
-       vfio_group_put_external_user(vgpu->vfio_group);
-       vgpu->vfio_group = NULL;
-
-undo_register:
-       vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY,
-                                       &vgpu->group_notifier);
-
 undo_iommu:
-       vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY,
-                                       &vgpu->iommu_notifier);
+       vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY,
+                                &vgpu->iommu_notifier);
 out:
        return ret;
 }
@@ -894,8 +850,9 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
        }
 }
 
-static void __intel_vgpu_release(struct intel_vgpu *vgpu)
+static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
 {
+       struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
        struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
        int ret;
 
@@ -907,41 +864,24 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
 
        intel_gvt_release_vgpu(vgpu);
 
-       ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY,
-                                       &vgpu->iommu_notifier);
+       ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY,
+                                      &vgpu->iommu_notifier);
        drm_WARN(&i915->drm, ret,
                 "vfio_unregister_notifier for iommu failed: %d\n", ret);
 
-       ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY,
-                                       &vgpu->group_notifier);
-       drm_WARN(&i915->drm, ret,
-                "vfio_unregister_notifier for group failed: %d\n", ret);
-
        debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
 
-       kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node);
-       kvm_put_kvm(vgpu->kvm);
+       kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
+                                          &vgpu->track_node);
        kvmgt_protect_table_destroy(vgpu);
        gvt_cache_destroy(vgpu);
 
        intel_vgpu_release_msi_eventfd_ctx(vgpu);
-       vfio_group_put_external_user(vgpu->vfio_group);
 
-       vgpu->kvm = NULL;
        vgpu->attached = false;
-}
-
-static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
-{
-       __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev));
-}
-
-static void intel_vgpu_release_work(struct work_struct *work)
-{
-       struct intel_vgpu *vgpu =
-               container_of(work, struct intel_vgpu, release_work);
 
-       __intel_vgpu_release(vgpu);
+       if (vgpu->vfio_device.kvm)
+               kvm_put_kvm(vgpu->vfio_device.kvm);
 }
 
 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1690,7 +1630,6 @@ static int intel_vgpu_probe(struct mdev_device *mdev)
                return PTR_ERR(vgpu);
        }
 
-       INIT_WORK(&vgpu->release_work, intel_vgpu_release_work);
        vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
                            &intel_vgpu_dev_ops);
 
@@ -1728,7 +1667,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = {
 
 int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
 {
-       struct kvm *kvm = info->kvm;
+       struct kvm *kvm = info->vfio_device.kvm;
        struct kvm_memory_slot *slot;
        int idx;
 
@@ -1758,7 +1697,7 @@ out:
 
 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
 {
-       struct kvm *kvm = info->kvm;
+       struct kvm *kvm = info->vfio_device.kvm;
        struct kvm_memory_slot *slot;
        int idx;
 
index 3e3b095..958b371 100644 (file)
@@ -1047,7 +1047,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
        GEM_BUG_ON(!pmu->base.event_init);
 
        /* Select the first online CPU as a designated reader. */
-       if (!cpumask_weight(&i915_pmu_cpumask))
+       if (cpumask_empty(&i915_pmu_cpumask))
                cpumask_set_cpu(cpu, &i915_pmu_cpumask);
 
        return 0;
index 52516eb..3a462e3 100644 (file)
@@ -541,7 +541,6 @@ static int dpu_encoder_virt_atomic_check(
        struct dpu_encoder_virt *dpu_enc;
        struct msm_drm_private *priv;
        struct dpu_kms *dpu_kms;
-       const struct drm_display_mode *mode;
        struct drm_display_mode *adj_mode;
        struct msm_display_topology topology;
        struct dpu_global_state *global_state;
@@ -559,7 +558,6 @@ static int dpu_encoder_virt_atomic_check(
 
        priv = drm_enc->dev->dev_private;
        dpu_kms = to_dpu_kms(priv->kms);
-       mode = &crtc_state->mode;
        adj_mode = &crtc_state->adjusted_mode;
        global_state = dpu_kms_get_global_state(crtc_state->state);
        if (IS_ERR(global_state))
@@ -1814,7 +1812,6 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
                }
        }
 
-       dsc_common_mode = 0;
        pic_width = dsc->drm->pic_width;
 
        dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
index 4829d1c..59da348 100644 (file)
@@ -574,11 +574,11 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
  */
 static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
 {
-       DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
-
        if (!phys_enc)
                return;
 
+       DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
+
        kfree(phys_enc);
 }
 
index bce4764..e23e255 100644 (file)
@@ -49,8 +49,6 @@
 #define DPU_DEBUGFS_DIR "msm_dpu"
 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
 
-#define MIN_IB_BW      400000000ULL /* Min ib vote 400MB */
-
 static int dpu_kms_hw_init(struct msm_kms *kms);
 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
 
@@ -1305,15 +1303,9 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
        struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
        struct drm_encoder *encoder;
        struct drm_device *ddev;
-       int i;
 
        ddev = dpu_kms->dev;
 
-       WARN_ON(!(dpu_kms->num_paths));
-       /* Min vote of BW is required before turning on AXI clk */
-       for (i = 0; i < dpu_kms->num_paths; i++)
-               icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
-
        rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
        if (rc) {
                DPU_ERROR("clock enable failed rc:%d\n", rc);
index d21971b..b7f5b8d 100644 (file)
@@ -1390,8 +1390,13 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
 
        dp_catalog_ctrl_reset(ctrl->catalog);
 
-       if (enable)
-               dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+       /*
+        * all dp controller programmable registers will not
+        * be reset to default value after DP_SW_RESET
+        * therefore interrupt mask bits have to be updated
+        * to enable/disable interrupts
+        */
+       dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
 }
 
 void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
index 0454a57..e13c5c1 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/interconnect.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
 #include <linux/irqdesc.h>
@@ -25,6 +26,8 @@
 #define UBWC_CTRL_2                    0x150
 #define UBWC_PREDICTION_MODE           0x154
 
+#define MIN_IB_BW      400000000UL /* Min ib vote 400MB */
+
 struct msm_mdss {
        struct device *dev;
 
@@ -36,8 +39,47 @@ struct msm_mdss {
                unsigned long enabled_mask;
                struct irq_domain *domain;
        } irq_controller;
+       struct icc_path *path[2];
+       u32 num_paths;
 };
 
+static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+                                           struct msm_mdss *msm_mdss)
+{
+       struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
+       struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+
+       if (IS_ERR_OR_NULL(path0))
+               return PTR_ERR_OR_ZERO(path0);
+
+       msm_mdss->path[0] = path0;
+       msm_mdss->num_paths = 1;
+
+       if (!IS_ERR_OR_NULL(path1)) {
+               msm_mdss->path[1] = path1;
+               msm_mdss->num_paths++;
+       }
+
+       return 0;
+}
+
+static void msm_mdss_put_icc_path(void *data)
+{
+       struct msm_mdss *msm_mdss = data;
+       int i;
+
+       for (i = 0; i < msm_mdss->num_paths; i++)
+               icc_put(msm_mdss->path[i]);
+}
+
+static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
+{
+       int i;
+
+       for (i = 0; i < msm_mdss->num_paths; i++)
+               icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+}
+
 static void msm_mdss_irq(struct irq_desc *desc)
 {
        struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
@@ -136,6 +178,13 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
 {
        int ret;
 
+       /*
+        * Several components have AXI clocks that can only be turned on if
+        * the interconnect is enabled (non-zero bandwidth). Let's make sure
+        * that the interconnects are at least at a minimum amount.
+        */
+       msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+
        ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
        if (ret) {
                dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
@@ -178,6 +227,7 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
 static int msm_mdss_disable(struct msm_mdss *msm_mdss)
 {
        clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
+       msm_mdss_icc_request_bw(msm_mdss, 0);
 
        return 0;
 }
@@ -271,6 +321,13 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
 
        dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
 
+       ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
+       if (ret)
+               return ERR_PTR(ret);
+       ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
+       if (ret)
+               return ERR_PTR(ret);
+
        if (is_mdp5)
                ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
        else
index a16892c..58db799 100644 (file)
@@ -473,6 +473,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
            native_mode->vdisplay != 0 &&
            native_mode->clock != 0) {
                mode = drm_mode_duplicate(dev, native_mode);
+               if (!mode)
+                       return NULL;
                mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
                drm_mode_set_name(mode);
 
@@ -487,6 +489,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
                 * simpler.
                 */
                mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+               if (!mode)
+                       return NULL;
                mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
                DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
        }
index a3ad7c9..b3fffe7 100644 (file)
@@ -74,7 +74,7 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
 #endif /* CONFIG_UML */
 #endif /* __i386__ || __x86_64__ */
 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
-       defined(__powerpc__) || defined(__mips__)
+       defined(__powerpc__) || defined(__mips__) || defined(__loongarch__)
        if (caching == ttm_write_combined)
                tmp = pgprot_writecombine(tmp);
        else
index 6815b4d..1861a81 100644 (file)
@@ -1,8 +1,13 @@
 # SPDX-License-Identifier: GPL-2.0-only
+
+config TEGRA_HOST1X_CONTEXT_BUS
+       bool
+
 config TEGRA_HOST1X
        tristate "NVIDIA Tegra host1x driver"
        depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
        select DMA_SHARED_BUFFER
+       select TEGRA_HOST1X_CONTEXT_BUS
        select IOMMU_IOVA
        help
          Driver for the NVIDIA Tegra host1x hardware.
index d2b6f7d..c891a3e 100644 (file)
@@ -18,3 +18,4 @@ host1x-y = \
        hw/host1x07.o
 
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
+obj-$(CONFIG_TEGRA_HOST1X_CONTEXT_BUS) += context_bus.o
diff --git a/drivers/gpu/host1x/context_bus.c b/drivers/gpu/host1x/context_bus.c
new file mode 100644 (file)
index 0000000..b0d35b2
--- /dev/null
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct bus_type host1x_context_device_bus_type = {
+       .name = "host1x-context",
+};
+EXPORT_SYMBOL_GPL(host1x_context_device_bus_type);
+
+static int __init host1x_context_device_bus_init(void)
+{
+       int err;
+
+       if (!of_machine_is_compatible("nvidia,tegra186") &&
+           !of_machine_is_compatible("nvidia,tegra194") &&
+           !of_machine_is_compatible("nvidia,tegra234"))
+               return 0;
+
+       err = bus_register(&host1x_context_device_bus_type);
+       if (err < 0) {
+               pr_err("bus type registration failed: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+postcore_initcall(host1x_context_device_bus_init);
index 54752c8..4490e2f 100644 (file)
@@ -387,7 +387,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
 
                usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
                maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
-                                         usbhid->urbctrl->pipe, 0);
+                                         usbhid->urbctrl->pipe);
                len += (len == 0);      /* Don't allow 0-length reports */
                len = round_up(len, maxpacket);
                if (len > usbhid->bufsize)
index df02002..b4b007c 100644 (file)
@@ -279,7 +279,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
                return -ENODEV;
 
        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(dev, pipe);
 
        kbd = kzalloc(sizeof(struct usb_kbd), GFP_KERNEL);
        input_dev = input_allocate_device();
index c893320..fb1d7d1 100644 (file)
@@ -123,7 +123,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
                return -ENODEV;
 
        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(dev, pipe);
 
        mouse = kzalloc(sizeof(struct usb_mouse), GFP_KERNEL);
        input_dev = input_allocate_device();
diff --git a/drivers/hte/Kconfig b/drivers/hte/Kconfig
new file mode 100644 (file)
index 0000000..cf29e02
--- /dev/null
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig HTE
+       bool "Hardware Timestamping Engine (HTE) Support"
+       help
+         Hardware Timestamping Engine (HTE) Support.
+
+         Some devices provide a hardware timestamping engine which can
+         timestamp certain device lines/signals in realtime. It comes with a
+         benefit for the applications needing accurate timestamping event with
+         less jitter. This framework provides a generic interface to such HTE
+         providers and consumer devices.
+
+         If unsure, say no.
+
+if HTE
+
+config HTE_TEGRA194
+       tristate "NVIDIA Tegra194 HTE Support"
+       depends on ARCH_TEGRA_194_SOC
+       help
+         Enable this option for integrated hardware timestamping engine also
+         known as generic timestamping engine (GTE) support on NVIDIA Tegra194
+         systems-on-chip. The driver supports 352 LIC IRQs and 39 AON GPIOs
+         lines for timestamping in realtime.
+
+config HTE_TEGRA194_TEST
+        tristate "NVIDIA Tegra194 HTE Test"
+        depends on HTE_TEGRA194
+        help
+         The NVIDIA Tegra194 GTE test driver demonstrates how to use HTE
+         framework to timestamp GPIO and LIC IRQ lines.
+
+endif
diff --git a/drivers/hte/Makefile b/drivers/hte/Makefile
new file mode 100644 (file)
index 0000000..8cca124
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HTE)              += hte.o
+obj-$(CONFIG_HTE_TEGRA194)     += hte-tegra194.o
+obj-$(CONFIG_HTE_TEGRA194_TEST) += hte-tegra194-test.o
diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
new file mode 100644 (file)
index 0000000..5d776a1
--- /dev/null
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/hte.h>
+
+/*
+ * This sample HTE GPIO test driver demonstrates HTE API usage by enabling
+ * hardware timestamp on gpio_in and specified LIC IRQ lines.
+ *
+ * Note: gpio_out and gpio_in need to be shorted externally in order for this
+ * test driver to work for the GPIO monitoring. The test driver has been
+ * tested on Jetson AGX Xavier platform by shorting pin 32 and 16 on 40 pin
+ * header.
+ *
+ * Device tree snippet to activate this driver:
+ *     tegra_hte_test {
+ *             compatible = "nvidia,tegra194-hte-test";
+ *             in-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 1)>;
+ *             out-gpio = <&gpio_aon TEGRA194_AON_GPIO(BB, 0)>;
+ *             timestamps = <&tegra_hte_aon TEGRA194_AON_GPIO(BB, 1)>,
+ *                          <&tegra_hte_lic 0x19>;
+ *             timestamp-names = "hte-gpio", "hte-i2c-irq";
+ *             status = "okay";
+ *     };
+ *
+ * How to run test driver:
+ * - Load test driver.
+ * - For the GPIO, at regular interval gpio_out pin toggles triggering
+ *   HTE for rising edge on gpio_in pin.
+ *
+ * - For the LIC IRQ line, it uses 0x19 interrupt which is i2c controller 1.
+ * - Run i2cdetect -y 1 1>/dev/null, this command will generate i2c bus
+ *   transactions which creates timestamp data.
+ * - It prints below message for both the lines.
+ *   HW timestamp(<line id>:<ts seq number>): <timestamp>, edge: <edge>.
+ * - Unloading the driver disables and deallocate the HTE.
+ */
+
+static struct tegra_hte_test {
+       int gpio_in_irq;
+       struct device *pdev;
+       struct gpio_desc *gpio_in;
+       struct gpio_desc *gpio_out;
+       struct hte_ts_desc *desc;
+       struct timer_list timer;
+       struct kobject *kobj;
+} hte;
+
+static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+       char *edge;
+       struct hte_ts_desc *desc = p;
+
+       if (!ts || !p)
+               return HTE_CB_HANDLED;
+
+       if (ts->raw_level < 0)
+               edge = "Unknown";
+
+       pr_info("HW timestamp(%u: %llu): %llu, edge: %s\n",
+               desc->attr.line_id, ts->seq, ts->tsc,
+               (ts->raw_level >= 0) ? ((ts->raw_level == 0) ?
+                                       "falling" : "rising") : edge);
+
+       return HTE_CB_HANDLED;
+}
+
+static void gpio_timer_cb(struct timer_list *t)
+{
+       (void)t;
+
+       gpiod_set_value(hte.gpio_out, !gpiod_get_value(hte.gpio_out));
+       mod_timer(&hte.timer, jiffies + msecs_to_jiffies(8000));
+}
+
+static irqreturn_t tegra_hte_test_gpio_isr(int irq, void *data)
+{
+       (void)irq;
+       (void)data;
+
+       return IRQ_HANDLED;
+}
+
+static const struct of_device_id tegra_hte_test_of_match[] = {
+       { .compatible = "nvidia,tegra194-hte-test"},
+       { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_test_of_match);
+
+static int tegra_hte_test_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       int i, cnt;
+
+       dev_set_drvdata(&pdev->dev, &hte);
+       hte.pdev = &pdev->dev;
+
+       hte.gpio_out = gpiod_get(&pdev->dev, "out", 0);
+       if (IS_ERR(hte.gpio_out)) {
+               dev_err(&pdev->dev, "failed to get gpio out\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       hte.gpio_in = gpiod_get(&pdev->dev, "in", 0);
+       if (IS_ERR(hte.gpio_in)) {
+               dev_err(&pdev->dev, "failed to get gpio in\n");
+               ret = -EINVAL;
+               goto free_gpio_out;
+       }
+
+       ret = gpiod_direction_output(hte.gpio_out, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to set output\n");
+               ret = -EINVAL;
+               goto free_gpio_in;
+       }
+
+       ret = gpiod_direction_input(hte.gpio_in);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to set input\n");
+               ret = -EINVAL;
+               goto free_gpio_in;
+       }
+
+       ret = gpiod_to_irq(hte.gpio_in);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
+               ret = -ENXIO;
+               goto free_gpio_in;
+       }
+
+       hte.gpio_in_irq = ret;
+       ret = request_irq(ret, tegra_hte_test_gpio_isr,
+                         IRQF_TRIGGER_RISING,
+                         "tegra_hte_gpio_test_isr", &hte);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to acquire IRQ\n");
+               ret = -ENXIO;
+               goto free_irq;
+       }
+
+       cnt = of_hte_req_count(hte.pdev);
+       if (cnt < 0)
+               goto free_irq;
+
+       dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+
+       hte.desc = devm_kzalloc(hte.pdev, sizeof(*hte.desc) * cnt, GFP_KERNEL);
+       if (!hte.desc) {
+               ret = -ENOMEM;
+               goto free_irq;
+       }
+
+       for (i = 0; i < cnt; i++) {
+               if (i == 0)
+                       /*
+                        * GPIO hte init, line_id and name will be parsed from
+                        * the device tree node. The edge_flag is implicitly
+                        * set by request_irq call. Only line_data is needed to be
+                        * set.
+                        */
+                       hte_init_line_attr(&hte.desc[i], 0, 0, NULL,
+                                          hte.gpio_in);
+               else
+                       /*
+                        * same comment as above except that IRQ does not need
+                        * line data.
+                        */
+                       hte_init_line_attr(&hte.desc[i], 0, 0, NULL, NULL);
+
+               ret = hte_ts_get(hte.pdev, &hte.desc[i], i);
+               if (ret)
+                       goto ts_put;
+
+               ret = devm_hte_request_ts_ns(hte.pdev, &hte.desc[i],
+                                            process_hw_ts, NULL,
+                                            &hte.desc[i]);
+               if (ret) /* no need to ts_put, request API takes care */
+                       goto free_irq;
+       }
+
+       timer_setup(&hte.timer, gpio_timer_cb, 0);
+       mod_timer(&hte.timer, jiffies + msecs_to_jiffies(5000));
+
+       return 0;
+
+ts_put:
+       cnt = i;
+       for (i = 0; i < cnt; i++)
+               hte_ts_put(&hte.desc[i]);
+free_irq:
+       free_irq(hte.gpio_in_irq, &hte);
+free_gpio_in:
+       gpiod_put(hte.gpio_in);
+free_gpio_out:
+       gpiod_put(hte.gpio_out);
+out:
+
+       return ret;
+}
+
+static int tegra_hte_test_remove(struct platform_device *pdev)
+{
+       (void)pdev;
+
+       free_irq(hte.gpio_in_irq, &hte);
+       gpiod_put(hte.gpio_in);
+       gpiod_put(hte.gpio_out);
+       del_timer_sync(&hte.timer);
+
+       return 0;
+}
+
+static struct platform_driver tegra_hte_test_driver = {
+       .probe = tegra_hte_test_probe,
+       .remove = tegra_hte_test_remove,
+       .driver = {
+               .name = "tegra_hte_test",
+               .of_match_table = tegra_hte_test_of_match,
+       },
+};
+module_platform_driver(tegra_hte_test_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte-tegra194.c b/drivers/hte/hte-tegra194.c
new file mode 100644 (file)
index 0000000..49a27af
--- /dev/null
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/hte.h>
+#include <linux/uaccess.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+#define HTE_SUSPEND    0
+
+/* HTE source clock TSC is 31.25MHz */
+#define HTE_TS_CLK_RATE_HZ     31250000ULL
+#define HTE_CLK_RATE_NS                32
+#define HTE_TS_NS_SHIFT        __builtin_ctz(HTE_CLK_RATE_NS)
+
+#define NV_AON_SLICE_INVALID   -1
+#define NV_LINES_IN_SLICE      32
+
+/* AON HTE line map For slice 1 */
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_28  12
+#define NV_AON_HTE_SLICE1_IRQ_GPIO_29  13
+
+/* AON HTE line map For slice 2 */
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_0   0
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_1   1
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_2   2
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_3   3
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_4   4
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_5   5
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_6   6
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_7   7
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_8   8
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_9   9
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_10  10
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_11  11
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_12  12
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_13  13
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_14  14
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_15  15
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_16  16
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_17  17
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_18  18
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_19  19
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_20  20
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_21  21
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_22  22
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_23  23
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_24  24
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_25  25
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_26  26
+#define NV_AON_HTE_SLICE2_IRQ_GPIO_27  27
+
+#define HTE_TECTRL             0x0
+#define HTE_TETSCH             0x4
+#define HTE_TETSCL             0x8
+#define HTE_TESRC              0xC
+#define HTE_TECCV              0x10
+#define HTE_TEPCV              0x14
+#define HTE_TECMD              0x1C
+#define HTE_TESTATUS           0x20
+#define HTE_SLICE0_TETEN       0x40
+#define HTE_SLICE1_TETEN       0x60
+
+#define HTE_SLICE_SIZE         (HTE_SLICE1_TETEN - HTE_SLICE0_TETEN)
+
+#define HTE_TECTRL_ENABLE_ENABLE       0x1
+
+#define HTE_TECTRL_OCCU_SHIFT          0x8
+#define HTE_TECTRL_INTR_SHIFT          0x1
+#define HTE_TECTRL_INTR_ENABLE         0x1
+
+#define HTE_TESRC_SLICE_SHIFT          16
+#define HTE_TESRC_SLICE_DEFAULT_MASK   0xFF
+
+#define HTE_TECMD_CMD_POP              0x1
+
+#define HTE_TESTATUS_OCCUPANCY_SHIFT   8
+#define HTE_TESTATUS_OCCUPANCY_MASK    0xFF
+
+enum tegra_hte_type {
+       HTE_TEGRA_TYPE_GPIO = 1U << 0,
+       HTE_TEGRA_TYPE_LIC = 1U << 1,
+};
+
+struct hte_slices {
+       u32 r_val;
+       unsigned long flags;
+       /* to prevent lines mapped to same slice updating its register */
+       spinlock_t s_lock;
+};
+
+struct tegra_hte_line_mapped {
+       int slice;
+       u32 bit_index;
+};
+
+struct tegra_hte_line_data {
+       unsigned long flags;
+       void *data;
+};
+
+struct tegra_hte_data {
+       enum tegra_hte_type type;
+       u32 map_sz;
+       u32 sec_map_sz;
+       const struct tegra_hte_line_mapped *map;
+       const struct tegra_hte_line_mapped *sec_map;
+};
+
+struct tegra_hte_soc {
+       int hte_irq;
+       u32 itr_thrshld;
+       u32 conf_rval;
+       struct hte_slices *sl;
+       const struct tegra_hte_data *prov_data;
+       struct tegra_hte_line_data *line_data;
+       struct hte_chip *chip;
+       struct gpio_chip *c;
+       void __iomem *regs;
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_map[] = {
+       /* gpio, slice, bit_index */
+       /* AA port */
+       [0]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+       [1]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+       [2]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+       [3]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+       [4]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+       [5]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+       [6]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+       [7]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+       /* BB port */
+       [8]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+       [9]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+       [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+       [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+       /* CC port */
+       [12] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+       [13] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+       [14] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+       [15] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+       [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+       [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+       [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+       [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+       /* DD port */
+       [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+       [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+       [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+       /* EE port */
+       [23] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+       [24] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+       [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+       [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+       [27] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+       [28] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+       [29] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+};
+
+static const struct tegra_hte_line_mapped tegra194_aon_gpio_sec_map[] = {
+       /* gpio, slice, bit_index */
+       /* AA port */
+       [0]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_11},
+       [1]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_10},
+       [2]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_9},
+       [3]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_8},
+       [4]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_7},
+       [5]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_6},
+       [6]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_5},
+       [7]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_4},
+       /* BB port */
+       [8]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_3},
+       [9]  = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_2},
+       [10] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_1},
+       [11] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_0},
+       [12]  = {NV_AON_SLICE_INVALID, 0},
+       [13]  = {NV_AON_SLICE_INVALID, 0},
+       [14] = {NV_AON_SLICE_INVALID, 0},
+       [15] = {NV_AON_SLICE_INVALID, 0},
+       /* CC port */
+       [16] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_22},
+       [17] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_21},
+       [18] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_20},
+       [19] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_19},
+       [20] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_18},
+       [21] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_17},
+       [22] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_16},
+       [23] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_15},
+       /* DD port */
+       [24] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_14},
+       [25] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_13},
+       [26] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_12},
+       [27] = {NV_AON_SLICE_INVALID, 0},
+       [28] = {NV_AON_SLICE_INVALID, 0},
+       [29] = {NV_AON_SLICE_INVALID, 0},
+       [30] = {NV_AON_SLICE_INVALID, 0},
+       [31] = {NV_AON_SLICE_INVALID, 0},
+       /* EE port */
+       [32] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_29},
+       [33] = {1, NV_AON_HTE_SLICE1_IRQ_GPIO_28},
+       [34] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_27},
+       [35] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_26},
+       [36] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_25},
+       [37] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_24},
+       [38] = {2, NV_AON_HTE_SLICE2_IRQ_GPIO_23},
+       [39] = {NV_AON_SLICE_INVALID, 0},
+};
+
+static const struct tegra_hte_data aon_hte = {
+       .map_sz = ARRAY_SIZE(tegra194_aon_gpio_map),
+       .map = tegra194_aon_gpio_map,
+       .sec_map_sz = ARRAY_SIZE(tegra194_aon_gpio_sec_map),
+       .sec_map = tegra194_aon_gpio_sec_map,
+       .type = HTE_TEGRA_TYPE_GPIO,
+};
+
+static const struct tegra_hte_data lic_hte = {
+       .map_sz = 0,
+       .map = NULL,
+       .type = HTE_TEGRA_TYPE_LIC,
+};
+
+static inline u32 tegra_hte_readl(struct tegra_hte_soc *hte, u32 reg)
+{
+       return readl(hte->regs + reg);
+}
+
+static inline void tegra_hte_writel(struct tegra_hte_soc *hte, u32 reg,
+                                   u32 val)
+{
+       writel(val, hte->regs + reg);
+}
+
+static int tegra_hte_map_to_line_id(u32 eid,
+                                   const struct tegra_hte_line_mapped *m,
+                                   u32 map_sz, u32 *mapped)
+{
+
+       if (m) {
+               if (eid > map_sz)
+                       return -EINVAL;
+               if (m[eid].slice == NV_AON_SLICE_INVALID)
+                       return -EINVAL;
+
+               *mapped = (m[eid].slice << 5) + m[eid].bit_index;
+       } else {
+               *mapped = eid;
+       }
+
+       return 0;
+}
+
+static int tegra_hte_line_xlate(struct hte_chip *gc,
+                               const struct of_phandle_args *args,
+                               struct hte_ts_desc *desc, u32 *xlated_id)
+{
+       int ret = 0;
+       u32 line_id;
+       struct tegra_hte_soc *gs;
+       const struct tegra_hte_line_mapped *map = NULL;
+       u32 map_sz = 0;
+
+       if (!gc || !desc || !xlated_id)
+               return -EINVAL;
+
+       if (args) {
+               if (gc->of_hte_n_cells < 1)
+                       return -EINVAL;
+
+               if (args->args_count != gc->of_hte_n_cells)
+                       return -EINVAL;
+
+               desc->attr.line_id = args->args[0];
+       }
+
+       gs = gc->data;
+       if (!gs || !gs->prov_data)
+               return -EINVAL;
+
+       /*
+        *
+        * There are two paths GPIO consumers can take as follows:
+        * 1) The consumer (gpiolib-cdev for example) which uses GPIO global
+        * number which gets assigned run time.
+        * 2) The consumer passing GPIO from the DT which is assigned
+        * statically for example by using TEGRA194_AON_GPIO gpio DT binding.
+        *
+        * The code below addresses both the consumer use cases and maps into
+        * HTE/GTE namespace.
+        */
+       if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && !args) {
+               line_id = desc->attr.line_id - gs->c->base;
+               map = gs->prov_data->map;
+               map_sz = gs->prov_data->map_sz;
+       } else if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO && args) {
+               line_id = desc->attr.line_id;
+               map = gs->prov_data->sec_map;
+               map_sz = gs->prov_data->sec_map_sz;
+       } else {
+               line_id = desc->attr.line_id;
+       }
+
+       ret = tegra_hte_map_to_line_id(line_id, map, map_sz, xlated_id);
+       if (ret < 0) {
+               dev_err(gc->dev, "line_id:%u mapping failed\n",
+                       desc->attr.line_id);
+               return ret;
+       }
+
+       if (*xlated_id > gc->nlines)
+               return -EINVAL;
+
+       dev_dbg(gc->dev, "requested id:%u, xlated id:%u\n",
+               desc->attr.line_id, *xlated_id);
+
+       return 0;
+}
+
+static int tegra_hte_line_xlate_plat(struct hte_chip *gc,
+                                    struct hte_ts_desc *desc, u32 *xlated_id)
+{
+       return tegra_hte_line_xlate(gc, NULL, desc, xlated_id);
+}
+
+static int tegra_hte_en_dis_common(struct hte_chip *chip, u32 line_id, bool en)
+{
+       u32 slice, sl_bit_shift, line_bit, val, reg;
+       struct tegra_hte_soc *gs;
+
+       sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+       if (!chip)
+               return -EINVAL;
+
+       gs = chip->data;
+
+       if (line_id > chip->nlines) {
+               dev_err(chip->dev,
+                       "line id: %u is not supported by this controller\n",
+                       line_id);
+               return -EINVAL;
+       }
+
+       slice = line_id >> sl_bit_shift;
+       line_bit = line_id & (HTE_SLICE_SIZE - 1);
+       reg = (slice << sl_bit_shift) + HTE_SLICE0_TETEN;
+
+       spin_lock(&gs->sl[slice].s_lock);
+
+       if (test_bit(HTE_SUSPEND, &gs->sl[slice].flags)) {
+               spin_unlock(&gs->sl[slice].s_lock);
+               dev_dbg(chip->dev, "device suspended");
+               return -EBUSY;
+       }
+
+       val = tegra_hte_readl(gs, reg);
+       if (en)
+               val = val | (1 << line_bit);
+       else
+               val = val & (~(1 << line_bit));
+       tegra_hte_writel(gs, reg, val);
+
+       spin_unlock(&gs->sl[slice].s_lock);
+
+       dev_dbg(chip->dev, "line: %u, slice %u, line_bit %u, reg:0x%x\n",
+               line_id, slice, line_bit, reg);
+
+       return 0;
+}
+
+static int tegra_hte_enable(struct hte_chip *chip, u32 line_id)
+{
+       if (!chip)
+               return -EINVAL;
+
+       return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_disable(struct hte_chip *chip, u32 line_id)
+{
+       if (!chip)
+               return -EINVAL;
+
+       return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_request(struct hte_chip *chip, struct hte_ts_desc *desc,
+                            u32 line_id)
+{
+       int ret;
+       struct tegra_hte_soc *gs;
+       struct hte_line_attr *attr;
+
+       if (!chip || !chip->data || !desc)
+               return -EINVAL;
+
+       gs = chip->data;
+       attr = &desc->attr;
+
+       if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+               if (!attr->line_data)
+                       return -EINVAL;
+
+               ret = gpiod_enable_hw_timestamp_ns(attr->line_data,
+                                                  attr->edge_flags);
+               if (ret)
+                       return ret;
+
+               gs->line_data[line_id].data = attr->line_data;
+               gs->line_data[line_id].flags = attr->edge_flags;
+       }
+
+       return tegra_hte_en_dis_common(chip, line_id, true);
+}
+
+static int tegra_hte_release(struct hte_chip *chip, struct hte_ts_desc *desc,
+                            u32 line_id)
+{
+       struct tegra_hte_soc *gs;
+       struct hte_line_attr *attr;
+       int ret;
+
+       if (!chip || !chip->data || !desc)
+               return -EINVAL;
+
+       gs = chip->data;
+       attr = &desc->attr;
+
+       if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+               ret = gpiod_disable_hw_timestamp_ns(attr->line_data,
+                                                   gs->line_data[line_id].flags);
+               if (ret)
+                       return ret;
+
+               gs->line_data[line_id].data = NULL;
+               gs->line_data[line_id].flags = 0;
+       }
+
+       return tegra_hte_en_dis_common(chip, line_id, false);
+}
+
+static int tegra_hte_clk_src_info(struct hte_chip *chip,
+                                 struct hte_clk_info *ci)
+{
+       (void)chip;
+
+       if (!ci)
+               return -EINVAL;
+
+       ci->hz = HTE_TS_CLK_RATE_HZ;
+       ci->type = CLOCK_MONOTONIC;
+
+       return 0;
+}
+
+static int tegra_hte_get_level(struct tegra_hte_soc *gs, u32 line_id)
+{
+       struct gpio_desc *desc;
+
+       if (gs->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+               desc = gs->line_data[line_id].data;
+               if (desc)
+                       return gpiod_get_raw_value(desc);
+       }
+
+       return -1;
+}
+
+static void tegra_hte_read_fifo(struct tegra_hte_soc *gs)
+{
+       u32 tsh, tsl, src, pv, cv, acv, slice, bit_index, line_id;
+       u64 tsc;
+       struct hte_ts_data el;
+
+       while ((tegra_hte_readl(gs, HTE_TESTATUS) >>
+               HTE_TESTATUS_OCCUPANCY_SHIFT) &
+               HTE_TESTATUS_OCCUPANCY_MASK) {
+               tsh = tegra_hte_readl(gs, HTE_TETSCH);
+               tsl = tegra_hte_readl(gs, HTE_TETSCL);
+               tsc = (((u64)tsh << 32) | tsl);
+
+               src = tegra_hte_readl(gs, HTE_TESRC);
+               slice = (src >> HTE_TESRC_SLICE_SHIFT) &
+                           HTE_TESRC_SLICE_DEFAULT_MASK;
+
+               pv = tegra_hte_readl(gs, HTE_TEPCV);
+               cv = tegra_hte_readl(gs, HTE_TECCV);
+               acv = pv ^ cv;
+               while (acv) {
+                       bit_index = __builtin_ctz(acv);
+                       line_id = bit_index + (slice << 5);
+                       el.tsc = tsc << HTE_TS_NS_SHIFT;
+                       el.raw_level = tegra_hte_get_level(gs, line_id);
+                       hte_push_ts_ns(gs->chip, line_id, &el);
+                       acv &= ~BIT(bit_index);
+               }
+               tegra_hte_writel(gs, HTE_TECMD, HTE_TECMD_CMD_POP);
+       }
+}
+
+static irqreturn_t tegra_hte_isr(int irq, void *dev_id)
+{
+       struct tegra_hte_soc *gs = dev_id;
+       (void)irq;
+
+       tegra_hte_read_fifo(gs);
+
+       return IRQ_HANDLED;
+}
+
+static bool tegra_hte_match_from_linedata(const struct hte_chip *chip,
+                                         const struct hte_ts_desc *hdesc)
+{
+       struct tegra_hte_soc *hte_dev = chip->data;
+
+       if (!hte_dev || (hte_dev->prov_data->type != HTE_TEGRA_TYPE_GPIO))
+               return false;
+
+       return hte_dev->c == gpiod_to_chip(hdesc->attr.line_data);
+}
+
+static const struct of_device_id tegra_hte_of_match[] = {
+       { .compatible = "nvidia,tegra194-gte-lic", .data = &lic_hte},
+       { .compatible = "nvidia,tegra194-gte-aon", .data = &aon_hte},
+       { }
+};
+MODULE_DEVICE_TABLE(of, tegra_hte_of_match);
+
+static const struct hte_ops g_ops = {
+       .request = tegra_hte_request,
+       .release = tegra_hte_release,
+       .enable = tegra_hte_enable,
+       .disable = tegra_hte_disable,
+       .get_clk_src_info = tegra_hte_clk_src_info,
+};
+
+static void tegra_gte_disable(void *data)
+{
+       struct platform_device *pdev = data;
+       struct tegra_hte_soc *gs = dev_get_drvdata(&pdev->dev);
+
+       tegra_hte_writel(gs, HTE_TECTRL, 0);
+}
+
+static int tegra_get_gpiochip_from_name(struct gpio_chip *chip, void *data)
+{
+       return !strcmp(chip->label, data);
+}
+
+static int tegra_hte_probe(struct platform_device *pdev)
+{
+       int ret;
+       u32 i, slices, val = 0;
+       u32 nlines;
+       struct device *dev;
+       struct tegra_hte_soc *hte_dev;
+       struct hte_chip *gc;
+
+       dev = &pdev->dev;
+
+       ret = of_property_read_u32(dev->of_node, "nvidia,slices", &slices);
+       if (ret != 0) {
+               dev_err(dev, "Could not read slices\n");
+               return -EINVAL;
+       }
+       nlines = slices << 5;
+
+       hte_dev = devm_kzalloc(dev, sizeof(*hte_dev), GFP_KERNEL);
+       if (!hte_dev)
+               return -ENOMEM;
+
+       gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+       if (!gc)
+               return -ENOMEM;
+
+       dev_set_drvdata(&pdev->dev, hte_dev);
+       hte_dev->prov_data = of_device_get_match_data(&pdev->dev);
+
+       hte_dev->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(hte_dev->regs))
+               return PTR_ERR(hte_dev->regs);
+
+       ret = of_property_read_u32(dev->of_node, "nvidia,int-threshold",
+                                  &hte_dev->itr_thrshld);
+       if (ret != 0)
+               hte_dev->itr_thrshld = 1;
+
+       hte_dev->sl = devm_kcalloc(dev, slices, sizeof(*hte_dev->sl),
+                                  GFP_KERNEL);
+       if (!hte_dev->sl)
+               return -ENOMEM;
+
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err_probe(dev, ret, "failed to get irq\n");
+               return ret;
+       }
+       hte_dev->hte_irq = ret;
+       ret = devm_request_irq(dev, hte_dev->hte_irq, tegra_hte_isr, 0,
+                              dev_name(dev), hte_dev);
+       if (ret < 0) {
+               dev_err(dev, "request irq failed.\n");
+               return ret;
+       }
+
+       gc->nlines = nlines;
+       gc->ops = &g_ops;
+       gc->dev = dev;
+       gc->data = hte_dev;
+       gc->xlate_of = tegra_hte_line_xlate;
+       gc->xlate_plat = tegra_hte_line_xlate_plat;
+       gc->of_hte_n_cells = 1;
+
+       if (hte_dev->prov_data &&
+           hte_dev->prov_data->type == HTE_TEGRA_TYPE_GPIO) {
+               hte_dev->line_data = devm_kcalloc(dev, nlines,
+                                                 sizeof(*hte_dev->line_data),
+                                                 GFP_KERNEL);
+               if (!hte_dev->line_data)
+                       return -ENOMEM;
+
+               gc->match_from_linedata = tegra_hte_match_from_linedata;
+
+               hte_dev->c = gpiochip_find("tegra194-gpio-aon",
+                                          tegra_get_gpiochip_from_name);
+               if (!hte_dev->c)
+                       return dev_err_probe(dev, -EPROBE_DEFER,
+                                            "wait for gpio controller\n");
+       }
+
+       hte_dev->chip = gc;
+
+       ret = devm_hte_register_chip(hte_dev->chip);
+       if (ret) {
+               dev_err(gc->dev, "hte chip register failed");
+               return ret;
+       }
+
+       for (i = 0; i < slices; i++) {
+               hte_dev->sl[i].flags = 0;
+               spin_lock_init(&hte_dev->sl[i].s_lock);
+       }
+
+       val = HTE_TECTRL_ENABLE_ENABLE |
+             (HTE_TECTRL_INTR_ENABLE << HTE_TECTRL_INTR_SHIFT) |
+             (hte_dev->itr_thrshld << HTE_TECTRL_OCCU_SHIFT);
+       tegra_hte_writel(hte_dev, HTE_TECTRL, val);
+
+       ret = devm_add_action_or_reset(&pdev->dev, tegra_gte_disable, pdev);
+       if (ret)
+               return ret;
+
+       dev_dbg(gc->dev, "lines: %d, slices:%d", gc->nlines, slices);
+
+       return 0;
+}
+
+static int __maybe_unused tegra_hte_resume_early(struct device *dev)
+{
+       u32 i;
+       struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+       u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+       u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+       tegra_hte_writel(gs, HTE_TECTRL, gs->conf_rval);
+
+       for (i = 0; i < slices; i++) {
+               spin_lock(&gs->sl[i].s_lock);
+               tegra_hte_writel(gs,
+                                ((i << sl_bit_shift) + HTE_SLICE0_TETEN),
+                                gs->sl[i].r_val);
+               clear_bit(HTE_SUSPEND, &gs->sl[i].flags);
+               spin_unlock(&gs->sl[i].s_lock);
+       }
+
+       return 0;
+}
+
+static int __maybe_unused tegra_hte_suspend_late(struct device *dev)
+{
+       u32 i;
+       struct tegra_hte_soc *gs = dev_get_drvdata(dev);
+       u32 slices = gs->chip->nlines / NV_LINES_IN_SLICE;
+       u32 sl_bit_shift = __builtin_ctz(HTE_SLICE_SIZE);
+
+       gs->conf_rval = tegra_hte_readl(gs, HTE_TECTRL);
+       for (i = 0; i < slices; i++) {
+               spin_lock(&gs->sl[i].s_lock);
+               gs->sl[i].r_val = tegra_hte_readl(gs,
+                               ((i << sl_bit_shift) + HTE_SLICE0_TETEN));
+               set_bit(HTE_SUSPEND, &gs->sl[i].flags);
+               spin_unlock(&gs->sl[i].s_lock);
+       }
+
+       return 0;
+}
+
+static const struct dev_pm_ops tegra_hte_pm = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(tegra_hte_suspend_late,
+                                    tegra_hte_resume_early)
+};
+
+static struct platform_driver tegra_hte_driver = {
+       .probe = tegra_hte_probe,
+       .driver = {
+               .name = "tegra_hte",
+               .pm = &tegra_hte_pm,
+               .of_match_table = tegra_hte_of_match,
+       },
+};
+
+module_platform_driver(tegra_hte_driver);
+
+MODULE_AUTHOR("Dipen Patel <dipenp@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra HTE (Hardware Timestamping Engine) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hte/hte.c b/drivers/hte/hte.c
new file mode 100644 (file)
index 0000000..7c3b447
--- /dev/null
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022 NVIDIA Corporation
+ *
+ * Author: Dipen Patel <dipenp@nvidia.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/hte.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+
+#define HTE_TS_NAME_LEN                10
+
+/* Global list of the HTE devices */
+static DEFINE_SPINLOCK(hte_lock);
+static LIST_HEAD(hte_devices);
+
+enum {
+       HTE_TS_REGISTERED,
+       HTE_TS_REQ,
+       HTE_TS_DISABLE,
+       HTE_TS_QUEUE_WK,
+};
+
+/**
+ * struct hte_ts_info - Information related to requested timestamp.
+ *
+ * @xlated_id: Timestamp ID as understood between HTE subsys and HTE provider,
+ * See xlate callback API.
+ * @flags: Flags holding state information.
+ * @hte_cb_flags: Callback related flags.
+ * @seq: Timestamp sequence counter.
+ * @line_name: HTE allocated line name.
+ * @free_attr_name: If set, free the attr name.
+ * @cb: A nonsleeping callback function provided by clients.
+ * @tcb: A secondary sleeping callback function provided by clients.
+ * @dropped_ts: Dropped timestamps.
+ * @slock: Spin lock to synchronize between disable/enable,
+ * request/release APIs.
+ * @cb_work: callback workqueue, used when tcb is specified.
+ * @req_mlock: Lock during timestamp request/release APIs.
+ * @ts_dbg_root: Root for the debug fs.
+ * @gdev: HTE abstract device that this timestamp information belongs to.
+ * @cl_data: Client specific data.
+ */
+struct hte_ts_info {
+       u32 xlated_id;
+       unsigned long flags;
+       unsigned long hte_cb_flags;
+       u64 seq;
+       char *line_name;
+       bool free_attr_name;
+       hte_ts_cb_t cb;
+       hte_ts_sec_cb_t tcb;
+       atomic_t dropped_ts;
+       spinlock_t slock;
+       struct work_struct cb_work;
+       struct mutex req_mlock;
+       struct dentry *ts_dbg_root;
+       struct hte_device *gdev;
+       void *cl_data;
+};
+
+/**
+ * struct hte_device - HTE abstract device
+ * @nlines: Number of entities this device supports.
+ * @ts_req: Total number of entities requested.
+ * @sdev: Device used at various debug prints.
+ * @dbg_root: Root directory for debug fs.
+ * @list: List node to store hte_device for each provider.
+ * @chip: HTE chip providing this HTE device.
+ * @owner: helps prevent removal of modules when in use.
+ * @ei: Timestamp information.
+ */
+struct hte_device {
+       u32 nlines;
+       atomic_t ts_req;
+       struct device *sdev;
+       struct dentry *dbg_root;
+       struct list_head list;
+       struct hte_chip *chip;
+       struct module *owner;
+       struct hte_ts_info ei[];
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *hte_root;
+
+static int __init hte_subsys_dbgfs_init(void)
+{
+       /* creates /sys/kernel/debug/hte/ */
+       hte_root = debugfs_create_dir("hte", NULL);
+
+       return 0;
+}
+subsys_initcall(hte_subsys_dbgfs_init);
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+       const struct hte_chip *chip = gdev->chip;
+       const char *name = chip->name ? chip->name : dev_name(chip->dev);
+
+       gdev->dbg_root = debugfs_create_dir(name, hte_root);
+
+       debugfs_create_atomic_t("ts_requested", 0444, gdev->dbg_root,
+                               &gdev->ts_req);
+       debugfs_create_u32("total_ts", 0444, gdev->dbg_root,
+                          &gdev->nlines);
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+       if (!ei->gdev->dbg_root || !name)
+               return;
+
+       ei->ts_dbg_root = debugfs_create_dir(name, ei->gdev->dbg_root);
+
+       debugfs_create_atomic_t("dropped_timestamps", 0444, ei->ts_dbg_root,
+                               &ei->dropped_ts);
+}
+
+#else
+
+static void hte_chip_dbgfs_init(struct hte_device *gdev)
+{
+}
+
+static void hte_ts_dbgfs_init(const char *name, struct hte_ts_info *ei)
+{
+}
+
+#endif
+
+/**
+ * hte_ts_put() - Release and disable timestamp for the given desc.
+ *
+ * @desc: timestamp descriptor.
+ *
+ * Context: debugfs_remove_recursive() function call may use sleeping locks,
+ *         not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_ts_put(struct hte_ts_desc *desc)
+{
+       int ret = 0;
+       unsigned long flag;
+       struct hte_device *gdev;
+       struct hte_ts_info *ei;
+
+       if (!desc)
+               return -EINVAL;
+
+       ei = desc->hte_data;
+
+       if (!ei || !ei->gdev)
+               return -EINVAL;
+
+       gdev = ei->gdev;
+
+       mutex_lock(&ei->req_mlock);
+
+       if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+           !test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+               dev_info(gdev->sdev, "id:%d is not requested\n",
+                        desc->attr.line_id);
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       if (unlikely(!test_bit(HTE_TS_REQ, &ei->flags) &&
+           test_bit(HTE_TS_REGISTERED, &ei->flags))) {
+               dev_info(gdev->sdev, "id:%d is registered but not requested\n",
+                        desc->attr.line_id);
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       if (test_bit(HTE_TS_REQ, &ei->flags) &&
+           !test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+               clear_bit(HTE_TS_REQ, &ei->flags);
+               desc->hte_data = NULL;
+               ret = 0;
+               goto mod_put;
+       }
+
+       ret = gdev->chip->ops->release(gdev->chip, desc, ei->xlated_id);
+       if (ret) {
+               dev_err(gdev->sdev, "id: %d free failed\n",
+                       desc->attr.line_id);
+               goto unlock;
+       }
+
+       kfree(ei->line_name);
+       if (ei->free_attr_name)
+               kfree_const(desc->attr.name);
+
+       debugfs_remove_recursive(ei->ts_dbg_root);
+
+       spin_lock_irqsave(&ei->slock, flag);
+
+       if (test_bit(HTE_TS_QUEUE_WK, &ei->flags)) {
+               spin_unlock_irqrestore(&ei->slock, flag);
+               flush_work(&ei->cb_work);
+               spin_lock_irqsave(&ei->slock, flag);
+       }
+
+       atomic_dec(&gdev->ts_req);
+       atomic_set(&ei->dropped_ts, 0);
+
+       ei->seq = 1;
+       ei->flags = 0;
+       desc->hte_data = NULL;
+
+       spin_unlock_irqrestore(&ei->slock, flag);
+
+       ei->cb = NULL;
+       ei->tcb = NULL;
+       ei->cl_data = NULL;
+
+mod_put:
+       module_put(gdev->owner);
+unlock:
+       mutex_unlock(&ei->req_mlock);
+       dev_dbg(gdev->sdev, "release id: %d\n", desc->attr.line_id);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_put);
+
+static int hte_ts_dis_en_common(struct hte_ts_desc *desc, bool en)
+{
+       u32 ts_id;
+       struct hte_device *gdev;
+       struct hte_ts_info *ei;
+       int ret;
+       unsigned long flag;
+
+       if (!desc)
+               return -EINVAL;
+
+       ei = desc->hte_data;
+
+       if (!ei || !ei->gdev)
+               return -EINVAL;
+
+       gdev = ei->gdev;
+       ts_id = desc->attr.line_id;
+
+       mutex_lock(&ei->req_mlock);
+
+       if (!test_bit(HTE_TS_REGISTERED, &ei->flags)) {
+               dev_dbg(gdev->sdev, "id:%d is not registered", ts_id);
+               ret = -EUSERS;
+               goto out;
+       }
+
+       spin_lock_irqsave(&ei->slock, flag);
+
+       if (en) {
+               if (!test_bit(HTE_TS_DISABLE, &ei->flags)) {
+                       ret = 0;
+                       goto out_unlock;
+               }
+
+               spin_unlock_irqrestore(&ei->slock, flag);
+               ret = gdev->chip->ops->enable(gdev->chip, ei->xlated_id);
+               if (ret) {
+                       dev_warn(gdev->sdev, "id: %d enable failed\n",
+                                ts_id);
+                       goto out;
+               }
+
+               spin_lock_irqsave(&ei->slock, flag);
+               clear_bit(HTE_TS_DISABLE, &ei->flags);
+       } else {
+               if (test_bit(HTE_TS_DISABLE, &ei->flags)) {
+                       ret = 0;
+                       goto out_unlock;
+               }
+
+               spin_unlock_irqrestore(&ei->slock, flag);
+               ret = gdev->chip->ops->disable(gdev->chip, ei->xlated_id);
+               if (ret) {
+                       dev_warn(gdev->sdev, "id: %d disable failed\n",
+                                ts_id);
+                       goto out;
+               }
+
+               spin_lock_irqsave(&ei->slock, flag);
+               set_bit(HTE_TS_DISABLE, &ei->flags);
+       }
+
+out_unlock:
+       spin_unlock_irqrestore(&ei->slock, flag);
+out:
+       mutex_unlock(&ei->req_mlock);
+       return ret;
+}
+
+/**
+ * hte_disable_ts() - Disable timestamp on given descriptor.
+ *
+ * The API does not release any resources associated with desc.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_disable_ts(struct hte_ts_desc *desc)
+{
+       return hte_ts_dis_en_common(desc, false);
+}
+EXPORT_SYMBOL_GPL(hte_disable_ts);
+
+/**
+ * hte_enable_ts() - Enable timestamp on given descriptor.
+ *
+ * @desc: ts descriptor, this is the same as returned by the request API.
+ *
+ * Context: Holds mutex lock, not suitable from atomic context.
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_enable_ts(struct hte_ts_desc *desc)
+{
+       return hte_ts_dis_en_common(desc, true);
+}
+EXPORT_SYMBOL_GPL(hte_enable_ts);
+
+static void hte_do_cb_work(struct work_struct *w)
+{
+       unsigned long flag;
+       struct hte_ts_info *ei = container_of(w, struct hte_ts_info, cb_work);
+
+       if (unlikely(!ei->tcb))
+               return;
+
+       ei->tcb(ei->cl_data);
+
+       spin_lock_irqsave(&ei->slock, flag);
+       clear_bit(HTE_TS_QUEUE_WK, &ei->flags);
+       spin_unlock_irqrestore(&ei->slock, flag);
+}
+
+static int __hte_req_ts(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+                       hte_ts_sec_cb_t tcb, void *data)
+{
+       int ret;
+       struct hte_device *gdev;
+       struct hte_ts_info *ei = desc->hte_data;
+
+       gdev = ei->gdev;
+       /*
+        * There is a chance that multiple consumers requesting same entity,
+        * lock here.
+        */
+       mutex_lock(&ei->req_mlock);
+
+       if (test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+           !test_bit(HTE_TS_REQ, &ei->flags)) {
+               dev_dbg(gdev->chip->dev, "id:%u req failed\n",
+                       desc->attr.line_id);
+               ret = -EUSERS;
+               goto unlock;
+       }
+
+       ei->cb = cb;
+       ei->tcb = tcb;
+       if (tcb)
+               INIT_WORK(&ei->cb_work, hte_do_cb_work);
+
+       ret = gdev->chip->ops->request(gdev->chip, desc, ei->xlated_id);
+       if (ret < 0) {
+               dev_err(gdev->chip->dev, "ts request failed\n");
+               goto unlock;
+       }
+
+       ei->cl_data = data;
+       ei->seq = 1;
+
+       atomic_inc(&gdev->ts_req);
+
+       ei->line_name = NULL;
+       if (!desc->attr.name) {
+               ei->line_name = kzalloc(HTE_TS_NAME_LEN, GFP_KERNEL);
+               if (ei->line_name)
+                       scnprintf(ei->line_name, HTE_TS_NAME_LEN, "ts_%u",
+                                 desc->attr.line_id);
+       }
+
+       hte_ts_dbgfs_init(desc->attr.name == NULL ?
+                         ei->line_name : desc->attr.name, ei);
+       set_bit(HTE_TS_REGISTERED, &ei->flags);
+
+       dev_dbg(gdev->chip->dev, "id: %u, xlated id:%u",
+               desc->attr.line_id, ei->xlated_id);
+
+       ret = 0;
+
+unlock:
+       mutex_unlock(&ei->req_mlock);
+
+       return ret;
+}
+
+static int hte_bind_ts_info_locked(struct hte_ts_info *ei,
+                                  struct hte_ts_desc *desc, u32 x_id)
+{
+       int ret = 0;
+
+       mutex_lock(&ei->req_mlock);
+
+       if (test_bit(HTE_TS_REQ, &ei->flags)) {
+               dev_dbg(ei->gdev->chip->dev, "id:%u is already requested\n",
+                       desc->attr.line_id);
+               ret = -EUSERS;
+               goto out;
+       }
+
+       set_bit(HTE_TS_REQ, &ei->flags);
+       desc->hte_data = ei;
+       ei->xlated_id = x_id;
+
+out:
+       mutex_unlock(&ei->req_mlock);
+
+       return ret;
+}
+
+static struct hte_device *of_node_to_htedevice(struct device_node *np)
+{
+       struct hte_device *gdev;
+
+       spin_lock(&hte_lock);
+
+       list_for_each_entry(gdev, &hte_devices, list)
+               if (gdev->chip && gdev->chip->dev &&
+                   gdev->chip->dev->of_node == np) {
+                       spin_unlock(&hte_lock);
+                       return gdev;
+               }
+
+       spin_unlock(&hte_lock);
+
+       return ERR_PTR(-ENODEV);
+}
+
+static struct hte_device *hte_find_dev_from_linedata(struct hte_ts_desc *desc)
+{
+       struct hte_device *gdev;
+
+       spin_lock(&hte_lock);
+
+       list_for_each_entry(gdev, &hte_devices, list)
+               if (gdev->chip && gdev->chip->match_from_linedata) {
+                       if (!gdev->chip->match_from_linedata(gdev->chip, desc))
+                               continue;
+                       spin_unlock(&hte_lock);
+                       return gdev;
+               }
+
+       spin_unlock(&hte_lock);
+
+       return ERR_PTR(-ENODEV);
+}
+
+/**
+ * of_hte_req_count - Return the number of entities to timestamp.
+ *
+ * The function returns the total count of the requested entities to timestamp
+ * by parsing device tree.
+ *
+ * @dev: The HTE consumer.
+ *
+ * Returns: Positive number on success, -ENOENT if no entries,
+ * -EINVAL for other errors.
+ */
+int of_hte_req_count(struct device *dev)
+{
+       int count;
+
+       if (!dev || !dev->of_node)
+               return -EINVAL;
+
+       count = of_count_phandle_with_args(dev->of_node, "timestamps",
+                                          "#timestamp-cells");
+
+       return count ? count : -ENOENT;
+}
+EXPORT_SYMBOL_GPL(of_hte_req_count);
+
+static inline struct hte_device *hte_get_dev(struct hte_ts_desc *desc)
+{
+       return hte_find_dev_from_linedata(desc);
+}
+
+static struct hte_device *hte_of_get_dev(struct device *dev,
+                                        struct hte_ts_desc *desc,
+                                        int index,
+                                        struct of_phandle_args *args,
+                                        bool *free_name)
+{
+       int ret;
+       struct device_node *np;
+       char *temp;
+
+       if (!dev->of_node)
+               return ERR_PTR(-EINVAL);
+
+       np = dev->of_node;
+
+       if (!of_find_property(np, "timestamp-names", NULL)) {
+               /* Let hte core construct it during request time */
+               desc->attr.name = NULL;
+       } else {
+               ret = of_property_read_string_index(np, "timestamp-names",
+                                                   index, &desc->attr.name);
+               if (ret) {
+                       pr_err("can't parse \"timestamp-names\" property\n");
+                       return ERR_PTR(ret);
+               }
+               *free_name = false;
+               if (desc->attr.name) {
+                       temp = skip_spaces(desc->attr.name);
+                       if (!*temp)
+                               desc->attr.name = NULL;
+               }
+       }
+
+       ret = of_parse_phandle_with_args(np, "timestamps", "#timestamp-cells",
+                                        index, args);
+       if (ret) {
+               pr_err("%s(): can't parse \"timestamps\" property\n",
+                      __func__);
+               return ERR_PTR(ret);
+       }
+
+       of_node_put(args->np);
+
+       return of_node_to_htedevice(args->np);
+}
+
+/**
+ * hte_ts_get() - The function to initialize and obtain HTE desc.
+ *
+ * The function initializes the consumer provided HTE descriptor. If consumer
+ * has device tree node, index is used to parse the line id and other details.
+ * The function needs to be called before using any request APIs.
+ *
+ * @dev: HTE consumer/client device, used in case of parsing device tree node.
+ * @desc: Pre-allocated timestamp descriptor.
+ * @index: The index will be used as an index to parse line_id from the
+ * device tree node if node is present.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index)
+{
+       struct hte_device *gdev;
+       struct hte_ts_info *ei;
+       const struct fwnode_handle *fwnode;
+       struct of_phandle_args args;
+       u32 xlated_id;
+       int ret;
+       bool free_name = false;
+
+       if (!desc)
+               return -EINVAL;
+
+       fwnode = dev ? dev_fwnode(dev) : NULL;
+
+       if (is_of_node(fwnode))
+               gdev = hte_of_get_dev(dev, desc, index, &args, &free_name);
+       else
+               gdev = hte_get_dev(desc);
+
+       if (IS_ERR(gdev)) {
+               pr_err("%s() no hte dev found\n", __func__);
+               return PTR_ERR(gdev);
+       }
+
+       if (!try_module_get(gdev->owner))
+               return -ENODEV;
+
+       if (!gdev->chip) {
+               pr_err("%s(): requested id does not have provider\n",
+                      __func__);
+               ret = -ENODEV;
+               goto put;
+       }
+
+       if (is_of_node(fwnode)) {
+               if (!gdev->chip->xlate_of)
+                       ret = -EINVAL;
+               else
+                       ret = gdev->chip->xlate_of(gdev->chip, &args,
+                                                  desc, &xlated_id);
+       } else {
+               if (!gdev->chip->xlate_plat)
+                       ret = -EINVAL;
+               else
+                       ret = gdev->chip->xlate_plat(gdev->chip, desc,
+                                                    &xlated_id);
+       }
+
+       if (ret < 0)
+               goto put;
+
+       ei = &gdev->ei[xlated_id];
+
+       ret = hte_bind_ts_info_locked(ei, desc, xlated_id);
+       if (ret)
+               goto put;
+
+       ei->free_attr_name = free_name;
+
+       return 0;
+
+put:
+       module_put(gdev->owner);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(hte_ts_get);
+
+static void __devm_hte_release_ts(void *res)
+{
+       hte_ts_put(res);
+}
+
+/**
+ * hte_request_ts_ns() - The API to request and enable hardware timestamp in
+ * nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp.
+ *
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+                     hte_ts_sec_cb_t tcb, void *data)
+{
+       int ret;
+       struct hte_ts_info *ei;
+
+       if (!desc || !desc->hte_data || !cb)
+               return -EINVAL;
+
+       ei = desc->hte_data;
+       if (!ei || !ei->gdev)
+               return -EINVAL;
+
+       ret = __hte_req_ts(desc, cb, tcb, data);
+       if (ret < 0) {
+               dev_err(ei->gdev->chip->dev,
+                       "failed to request id: %d\n", desc->attr.line_id);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hte_request_ts_ns);
+
+/**
+ * devm_hte_request_ts_ns() - Resource managed API to request and enable
+ * hardware timestamp in nanoseconds.
+ *
+ * The entity is provider specific for example, GPIO lines, signals, buses
+ * etc...The API allocates necessary resources and enables the timestamp. It
+ * deallocates and disables automatically when the consumer exits.
+ *
+ * @dev: HTE consumer/client device.
+ * @desc: Pre-allocated and initialized timestamp descriptor.
+ * @cb: Callback to push the timestamp data to consumer.
+ * @tcb: Optional callback. If its provided, subsystem initializes
+ * workqueue. It is called when cb returns HTE_RUN_SECOND_CB.
+ * @data: Client data, used during cb and tcb callbacks.
+ *
+ * Context: Holds mutex lock.
+ * Returns: Returns 0 on success or negative error code on failure.
+ */
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+                          hte_ts_cb_t cb, hte_ts_sec_cb_t tcb,
+                          void *data)
+{
+       int err;
+
+       if (!dev)
+               return -EINVAL;
+
+       err = hte_request_ts_ns(desc, cb, tcb, data);
+       if (err)
+               return err;
+
+       err = devm_add_action_or_reset(dev, __devm_hte_release_ts, desc);
+       if (err)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_request_ts_ns);
+
+/**
+ * hte_init_line_attr() - Initialize line attributes.
+ *
+ * Zeroes out line attributes and initializes with provided arguments.
+ * The function needs to be called before calling any consumer facing
+ * functions.
+ *
+ * @desc: Pre-allocated timestamp descriptor.
+ * @line_id: line id.
+ * @edge_flags: edge flags related to line_id.
+ * @name: name of the line.
+ * @data: line data related to line_id.
+ *
+ * Context: Any.
+ * Returns: 0 on success or negative error code for the failure.
+ */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+                      unsigned long edge_flags, const char *name, void *data)
+{
+       if (!desc)
+               return -EINVAL;
+
+       memset(&desc->attr, 0, sizeof(desc->attr));
+
+       desc->attr.edge_flags = edge_flags;
+       desc->attr.line_id = line_id;
+       desc->attr.line_data = data;
+       if (name) {
+               name =  kstrdup_const(name, GFP_KERNEL);
+               if (!name)
+                       return -ENOMEM;
+       }
+
+       desc->attr.name = name;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hte_init_line_attr);
+
+/**
+ * hte_get_clk_src_info() - Get the clock source information for a ts
+ * descriptor.
+ *
+ * @desc: ts descriptor, same as returned from request API.
+ * @ci: The API fills this structure with the clock information data.
+ *
+ * Context: Any context.
+ * Returns: 0 on success else negative error code on failure.
+ */
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+                        struct hte_clk_info *ci)
+{
+       struct hte_chip *chip;
+       struct hte_ts_info *ei;
+
+       if (!desc || !desc->hte_data || !ci) {
+               pr_debug("%s:%d\n", __func__, __LINE__);
+               return -EINVAL;
+       }
+
+       ei = desc->hte_data;
+       if (!ei->gdev || !ei->gdev->chip)
+               return -EINVAL;
+
+       chip = ei->gdev->chip;
+       if (!chip->ops->get_clk_src_info)
+               return -EOPNOTSUPP;
+
+       return chip->ops->get_clk_src_info(chip, ci);
+}
+EXPORT_SYMBOL_GPL(hte_get_clk_src_info);
+
+/**
+ * hte_push_ts_ns() - Push timestamp data in nanoseconds.
+ *
+ * It is used by the provider to push timestamp data.
+ *
+ * @chip: The HTE chip, used during the registration.
+ * @xlated_id: entity id understood by both subsystem and provider, this is
+ * obtained from xlate callback during request API.
+ * @data: timestamp data.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+                  struct hte_ts_data *data)
+{
+       enum hte_return ret;
+       int st = 0;
+       struct hte_ts_info *ei;
+       unsigned long flag;
+
+       if (!chip || !data || !chip->gdev)
+               return -EINVAL;
+
+       if (xlated_id >= chip->nlines)
+               return -EINVAL;
+
+       ei = &chip->gdev->ei[xlated_id];
+
+       spin_lock_irqsave(&ei->slock, flag);
+
+       /* timestamp sequence counter */
+       data->seq = ei->seq++;
+
+       if (!test_bit(HTE_TS_REGISTERED, &ei->flags) ||
+           test_bit(HTE_TS_DISABLE, &ei->flags)) {
+               dev_dbg(chip->dev, "Unknown timestamp push\n");
+               atomic_inc(&ei->dropped_ts);
+               st = -EINVAL;
+               goto unlock;
+       }
+
+       ret = ei->cb(data, ei->cl_data);
+       if (ret == HTE_RUN_SECOND_CB && ei->tcb) {
+               queue_work(system_unbound_wq, &ei->cb_work);
+               set_bit(HTE_TS_QUEUE_WK, &ei->flags);
+       }
+
+unlock:
+       spin_unlock_irqrestore(&ei->slock, flag);
+
+       return st;
+}
+EXPORT_SYMBOL_GPL(hte_push_ts_ns);
+
+static int hte_register_chip(struct hte_chip *chip)
+{
+       struct hte_device *gdev;
+       u32 i;
+
+       if (!chip || !chip->dev || !chip->dev->of_node)
+               return -EINVAL;
+
+       if (!chip->ops || !chip->ops->request || !chip->ops->release) {
+               dev_err(chip->dev, "Driver needs to provide ops\n");
+               return -EINVAL;
+       }
+
+       gdev = kzalloc(struct_size(gdev, ei, chip->nlines), GFP_KERNEL);
+       if (!gdev)
+               return -ENOMEM;
+
+       gdev->chip = chip;
+       chip->gdev = gdev;
+       gdev->nlines = chip->nlines;
+       gdev->sdev = chip->dev;
+
+       for (i = 0; i < chip->nlines; i++) {
+               gdev->ei[i].gdev = gdev;
+               mutex_init(&gdev->ei[i].req_mlock);
+               spin_lock_init(&gdev->ei[i].slock);
+       }
+
+       if (chip->dev->driver)
+               gdev->owner = chip->dev->driver->owner;
+       else
+               gdev->owner = THIS_MODULE;
+
+       of_node_get(chip->dev->of_node);
+
+       INIT_LIST_HEAD(&gdev->list);
+
+       spin_lock(&hte_lock);
+       list_add_tail(&gdev->list, &hte_devices);
+       spin_unlock(&hte_lock);
+
+       hte_chip_dbgfs_init(gdev);
+
+       dev_dbg(chip->dev, "Added hte chip\n");
+
+       return 0;
+}
+
+static int hte_unregister_chip(struct hte_chip *chip)
+{
+       struct hte_device *gdev;
+
+       if (!chip)
+               return -EINVAL;
+
+       gdev = chip->gdev;
+
+       spin_lock(&hte_lock);
+       list_del(&gdev->list);
+       spin_unlock(&hte_lock);
+
+       gdev->chip = NULL;
+
+       of_node_put(chip->dev->of_node);
+       debugfs_remove_recursive(gdev->dbg_root);
+       kfree(gdev);
+
+       dev_dbg(chip->dev, "Removed hte chip\n");
+
+       return 0;
+}
+
+static void _hte_devm_unregister_chip(void *chip)
+{
+       hte_unregister_chip(chip);
+}
+
+/**
+ * devm_hte_register_chip() - Resource managed API to register HTE chip.
+ *
+ * It is used by the provider to register itself with the HTE subsystem.
+ * The unregistration is done automatically when the provider exits.
+ *
+ * @chip: the HTE chip to add to subsystem.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int devm_hte_register_chip(struct hte_chip *chip)
+{
+       int err;
+
+       err = hte_register_chip(chip);
+       if (err)
+               return err;
+
+       err = devm_add_action_or_reset(chip->dev, _hte_devm_unregister_chip,
+                                      chip);
+       if (err)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devm_hte_register_chip);
index 9c1b362..714d549 100644 (file)
@@ -575,31 +575,11 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
-       char *driver_override, *old, *cp;
-
-       /* We need to keep extra room for a newline */
-       if (count >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, count, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
-
-       cp = strchr(driver_override, '\n');
-       if (cp)
-               *cp = '\0';
-
-       device_lock(dev);
-       old = hv_dev->driver_override;
-       if (strlen(driver_override)) {
-               hv_dev->driver_override = driver_override;
-       } else {
-               kfree(driver_override);
-               hv_dev->driver_override = NULL;
-       }
-       device_unlock(dev);
+       int ret;
 
-       kfree(old);
+       ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
index af00dca..ee6ce92 100644 (file)
@@ -1379,7 +1379,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
                        continue;
                conn->child_dev =
                        coresight_find_csdev_by_fwnode(conn->child_fwnode);
-               if (conn->child_dev) {
+               if (conn->child_dev && conn->child_dev->has_conns_grp) {
                        ret = coresight_make_links(csdev, conn,
                                                   conn->child_dev);
                        if (ret)
@@ -1571,6 +1571,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
        int nr_refcnts = 1;
        atomic_t *refcnts = NULL;
        struct coresight_device *csdev;
+       bool registered = false;
 
        csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
        if (!csdev) {
@@ -1591,7 +1592,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
        refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
        if (!refcnts) {
                ret = -ENOMEM;
-               goto err_free_csdev;
+               kfree(csdev);
+               goto err_out;
        }
 
        csdev->refcnt = refcnts;
@@ -1616,6 +1618,13 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
        csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
        dev_set_name(&csdev->dev, "%s", desc->name);
 
+       /*
+        * Make sure the device registration and the connection fixup
+        * are synchronised, so that we don't see uninitialised devices
+        * on the coresight bus while trying to resolve the connections.
+        */
+       mutex_lock(&coresight_mutex);
+
        ret = device_register(&csdev->dev);
        if (ret) {
                put_device(&csdev->dev);
@@ -1623,7 +1632,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
                 * All resources are free'd explicitly via
                 * coresight_device_release(), triggered from put_device().
                 */
-               goto err_out;
+               goto out_unlock;
        }
 
        if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -1638,11 +1647,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
                         * from put_device(), which is in turn called from
                         * function device_unregister().
                         */
-                       goto err_out;
+                       goto out_unlock;
                }
        }
-
-       mutex_lock(&coresight_mutex);
+       /* Device is now registered */
+       registered = true;
 
        ret = coresight_create_conns_sysfs_group(csdev);
        if (!ret)
@@ -1652,16 +1661,18 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
        if (!ret && cti_assoc_ops && cti_assoc_ops->add)
                cti_assoc_ops->add(csdev);
 
+out_unlock:
        mutex_unlock(&coresight_mutex);
-       if (ret) {
+       /* Success */
+       if (!ret)
+               return csdev;
+
+       /* Unregister the device if needed */
+       if (registered) {
                coresight_unregister(csdev);
                return ERR_PTR(ret);
        }
 
-       return csdev;
-
-err_free_csdev:
-       kfree(csdev);
 err_out:
        /* Cleanup the connection information */
        coresight_release_platform_data(NULL, desc->pdata);
index 8845ec4..1874df7 100644 (file)
@@ -380,9 +380,10 @@ static int debug_notifier_call(struct notifier_block *self,
        int cpu;
        struct debug_drvdata *drvdata;
 
-       mutex_lock(&debug_lock);
+       /* Bail out if we can't acquire the mutex or the functionality is off */
+       if (!mutex_trylock(&debug_lock))
+               return NOTIFY_DONE;
 
-       /* Bail out if the functionality is disabled */
        if (!debug_enable)
                goto skip_dump;
 
@@ -401,7 +402,7 @@ static int debug_notifier_call(struct notifier_block *self,
 
 skip_dump:
        mutex_unlock(&debug_lock);
-       return 0;
+       return NOTIFY_DONE;
 }
 
 static struct notifier_block debug_notifier = {
index 7d413ba..d0ab993 100644 (file)
@@ -204,7 +204,7 @@ void etm_set_default(struct etm_config *config)
         *  set all bits in register 0x007, the ETMTECR2, to 0
         *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
         */
-       config->enable_ctrl1 = BIT(24);
+       config->enable_ctrl1 = ETMTECR1_INC_EXC;
        config->enable_ctrl2 = 0x0;
        config->enable_event = ETM_HARD_WIRE_RES_A;
 
index e8c7649..68fcbf4 100644 (file)
@@ -474,7 +474,7 @@ static ssize_t addr_start_store(struct device *dev,
        config->addr_val[idx] = val;
        config->addr_type[idx] = ETM_ADDR_TYPE_START;
        config->startstop_ctrl |= (1 << idx);
-       config->enable_ctrl1 |= BIT(25);
+       config->enable_ctrl1 |= ETMTECR1_START_STOP;
        spin_unlock(&drvdata->spinlock);
 
        return size;
index 7f416a1..87299e9 100644 (file)
@@ -443,7 +443,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
        for (i = 0; i < drvdata->nr_ss_cmp; i++) {
                /* always clear status bit on restart if using single-shot */
                if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
-                       config->ss_status[i] &= ~BIT(31);
+                       config->ss_status[i] &= ~TRCSSCSRn_STATUS;
                etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
                etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
                if (etm4x_sspcicrn_present(drvdata, i))
@@ -633,7 +633,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
 
        /* Go from generic option to ETMv4 specifics */
        if (attr->config & BIT(ETM_OPT_CYCACC)) {
-               config->cfg |= BIT(4);
+               config->cfg |= TRCCONFIGR_CCI;
                /* TRM: Must program this for cycacc to work */
                config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
        }
@@ -653,14 +653,14 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
                        goto out;
 
                /* bit[11], Global timestamp tracing bit */
-               config->cfg |= BIT(11);
+               config->cfg |= TRCCONFIGR_TS;
        }
 
        /* Only trace contextID when runs in root PID namespace */
        if ((attr->config & BIT(ETM_OPT_CTXTID)) &&
            task_is_in_init_pid_ns(current))
                /* bit[6], Context ID tracing bit */
-               config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+               config->cfg |= TRCCONFIGR_CID;
 
        /*
         * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
@@ -672,17 +672,15 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
                        ret = -EINVAL;
                        goto out;
                }
-
                /* Only trace virtual contextID when runs in root PID namespace */
                if (task_is_in_init_pid_ns(current))
-                       config->cfg |= BIT(ETM4_CFG_BIT_VMID) |
-                                      BIT(ETM4_CFG_BIT_VMID_OPT);
+                       config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT;
        }
 
        /* return stack - enable if selected and supported */
        if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
                /* bit[12], Return stack enable bit */
-               config->cfg |= BIT(12);
+               config->cfg |= TRCCONFIGR_RS;
 
        /*
         * Set any selected configuration and preset.
@@ -1097,107 +1095,67 @@ static void etm4_init_arch_data(void *info)
        etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
 
        /* INSTP0, bits[2:1] P0 tracing support field */
-       if (BMVAL(etmidr0, 1, 2) == 0b11)
-               drvdata->instrp0 = true;
-       else
-               drvdata->instrp0 = false;
-
+       drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11);
        /* TRCBB, bit[5] Branch broadcast tracing support bit */
-       if (BMVAL(etmidr0, 5, 5))
-               drvdata->trcbb = true;
-       else
-               drvdata->trcbb = false;
-
+       drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB);
        /* TRCCOND, bit[6] Conditional instruction tracing support bit */
-       if (BMVAL(etmidr0, 6, 6))
-               drvdata->trccond = true;
-       else
-               drvdata->trccond = false;
-
+       drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND);
        /* TRCCCI, bit[7] Cycle counting instruction bit */
-       if (BMVAL(etmidr0, 7, 7))
-               drvdata->trccci = true;
-       else
-               drvdata->trccci = false;
-
+       drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI);
        /* RETSTACK, bit[9] Return stack bit */
-       if (BMVAL(etmidr0, 9, 9))
-               drvdata->retstack = true;
-       else
-               drvdata->retstack = false;
-
+       drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK);
        /* NUMEVENT, bits[11:10] Number of events field */
-       drvdata->nr_event = BMVAL(etmidr0, 10, 11);
+       drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
        /* QSUPP, bits[16:15] Q element support field */
-       drvdata->q_support = BMVAL(etmidr0, 15, 16);
+       drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
        /* TSSIZE, bits[28:24] Global timestamp size field */
-       drvdata->ts_size = BMVAL(etmidr0, 24, 28);
+       drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
 
        /* maximum size of resources */
        etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
        /* CIDSIZE, bits[9:5] Indicates the Context ID size */
-       drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
+       drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2);
        /* VMIDSIZE, bits[14:10] Indicates the VMID size */
-       drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
+       drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2);
        /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
-       drvdata->ccsize = BMVAL(etmidr2, 25, 28);
+       drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2);
 
        etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
        /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
-       drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
+       drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
        /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
-       drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+       drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
        drvdata->config.s_ex_level = drvdata->s_ex_level;
        /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
-       drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
-
+       drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3);
        /*
         * TRCERR, bit[24] whether a trace unit can trace a
         * system error exception.
         */
-       if (BMVAL(etmidr3, 24, 24))
-               drvdata->trc_error = true;
-       else
-               drvdata->trc_error = false;
-
+       drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR);
        /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
-       if (BMVAL(etmidr3, 25, 25))
-               drvdata->syncpr = true;
-       else
-               drvdata->syncpr = false;
-
+       drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR);
        /* STALLCTL, bit[26] is stall control implemented? */
-       if (BMVAL(etmidr3, 26, 26))
-               drvdata->stallctl = true;
-       else
-               drvdata->stallctl = false;
-
+       drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL);
        /* SYSSTALL, bit[27] implementation can support stall control? */
-       if (BMVAL(etmidr3, 27, 27))
-               drvdata->sysstall = true;
-       else
-               drvdata->sysstall = false;
-
+       drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL);
        /*
         * NUMPROC - the number of PEs available for tracing, 5bits
         *         = TRCIDR3.bits[13:12]bits[30:28]
         *  bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
         *  bits[3:0] = TRCIDR3.bits[30:28]
         */
-       drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
-
+       drvdata->nr_pe =  (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) |
+                          FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3);
        /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
-       if (BMVAL(etmidr3, 31, 31))
-               drvdata->nooverflow = true;
-       else
-               drvdata->nooverflow = false;
+       drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW);
 
        /* number of resources trace unit supports */
        etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
        /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
-       drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
+       drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4);
        /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
-       drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
+       drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4);
        /*
         * NUMRSPAIR, bits[19:16]
         * The number of resource pairs conveyed by the HW starts at 0, i.e a
@@ -1208,7 +1166,7 @@ static void etm4_init_arch_data(void *info)
         * the default TRUE and FALSE resource selectors are omitted.
         * Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
         */
-       drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
+       drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4);
        if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
                drvdata->nr_resource += 1;
        /*
@@ -1216,45 +1174,39 @@ static void etm4_init_arch_data(void *info)
         * comparator control for tracing. Read any status regs as these
         * also contain RO capability data.
         */
-       drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+       drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4);
        for (i = 0; i < drvdata->nr_ss_cmp; i++) {
                drvdata->config.ss_status[i] =
                        etm4x_relaxed_read32(csa, TRCSSCSRn(i));
        }
        /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
-       drvdata->numcidc = BMVAL(etmidr4, 24, 27);
+       drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4);
        /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
-       drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
+       drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4);
 
        etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
        /* NUMEXTIN, bits[8:0] number of external inputs implemented */
-       drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
+       drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
        /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
-       drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
+       drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
        /* ATBTRIG, bit[22] implementation can support ATB triggers? */
-       if (BMVAL(etmidr5, 22, 22))
-               drvdata->atbtrig = true;
-       else
-               drvdata->atbtrig = false;
+       drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG);
        /*
         * LPOVERRIDE, bit[23] implementation supports
         * low-power state override
         */
-       if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up))
-               drvdata->lpoverride = true;
-       else
-               drvdata->lpoverride = false;
+       drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up);
        /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
-       drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
+       drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5);
        /* NUMCNTR, bits[30:28] number of counters available for tracing */
-       drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
+       drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5);
        etm4_cs_lock(drvdata, csa);
        cpu_detect_trace_filtering(drvdata);
 }
 
 static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
 {
-       return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT;
+       return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK);
 }
 
 /* Set ELx trace filter access in the TRCVICTLR register */
@@ -1280,7 +1232,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
        config->ts_ctrl = 0x0;
 
        /* TRCVICTLR::EVENT = 0x01, select the always on logic */
-       config->vinst_ctrl = BIT(0);
+       config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
 
        /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
        etm4_set_victlr_access(config);
@@ -1389,7 +1341,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
         * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
         * in the started state
         */
-       config->vinst_ctrl |= BIT(9);
+       config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
        config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
 
        /* No start-stop filtering for ViewInst */
@@ -1493,7 +1445,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
                         * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
                         * in the started state
                         */
-                       config->vinst_ctrl |= BIT(9);
+                       config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
 
                        /* No start-stop filtering for ViewInst */
                        config->vissctlr = 0x0;
@@ -1521,7 +1473,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
                         * etm4_disable_perf().
                         */
                        if (filters->ssstatus)
-                               config->vinst_ctrl |= BIT(9);
+                               config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
 
                        /* No include/exclude filtering for ViewInst */
                        config->viiectlr = 0x0;
index 21687cc..6ea8181 100644 (file)
@@ -22,7 +22,7 @@ static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
         * TRCACATRn.TYPE bit[1:0]: type of comparison
         * the trace unit performs
         */
-       if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
+       if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
                if (idx % 2 != 0)
                        return -EINVAL;
 
@@ -180,12 +180,12 @@ static ssize_t reset_store(struct device *dev,
 
        /* Disable data tracing: do not trace load and store data transfers */
        config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
-       config->cfg &= ~(BIT(1) | BIT(2));
+       config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
 
        /* Disable data value and data address tracing */
        config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
                           ETM_MODE_DATA_TRACE_VAL);
-       config->cfg &= ~(BIT(16) | BIT(17));
+       config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
 
        /* Disable all events tracing */
        config->eventctrl0 = 0x0;
@@ -206,11 +206,11 @@ static ssize_t reset_store(struct device *dev,
         * started state. ARM recommends start-stop logic is set before
         * each trace run.
         */
-       config->vinst_ctrl = BIT(0);
+       config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
        if (drvdata->nr_addr_cmp > 0) {
                config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
                /* SSSTATUS, bit[9] */
-               config->vinst_ctrl |= BIT(9);
+               config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
        }
 
        /* No address range filtering for ViewInst */
@@ -304,134 +304,134 @@ static ssize_t mode_store(struct device *dev,
 
        if (drvdata->instrp0 == true) {
                /* start by clearing instruction P0 field */
-               config->cfg  &= ~(BIT(1) | BIT(2));
+               config->cfg  &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
                if (config->mode & ETM_MODE_LOAD)
                        /* 0b01 Trace load instructions as P0 instructions */
-                       config->cfg  |= BIT(1);
+                       config->cfg  |= TRCCONFIGR_INSTP0_LOAD;
                if (config->mode & ETM_MODE_STORE)
                        /* 0b10 Trace store instructions as P0 instructions */
-                       config->cfg  |= BIT(2);
+                       config->cfg  |= TRCCONFIGR_INSTP0_STORE;
                if (config->mode & ETM_MODE_LOAD_STORE)
                        /*
                         * 0b11 Trace load and store instructions
                         * as P0 instructions
                         */
-                       config->cfg  |= BIT(1) | BIT(2);
+                       config->cfg  |= TRCCONFIGR_INSTP0_LOAD_STORE;
        }
 
        /* bit[3], Branch broadcast mode */
        if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
-               config->cfg |= BIT(3);
+               config->cfg |= TRCCONFIGR_BB;
        else
-               config->cfg &= ~BIT(3);
+               config->cfg &= ~TRCCONFIGR_BB;
 
        /* bit[4], Cycle counting instruction trace bit */
        if ((config->mode & ETMv4_MODE_CYCACC) &&
                (drvdata->trccci == true))
-               config->cfg |= BIT(4);
+               config->cfg |= TRCCONFIGR_CCI;
        else
-               config->cfg &= ~BIT(4);
+               config->cfg &= ~TRCCONFIGR_CCI;
 
        /* bit[6], Context ID tracing bit */
        if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
-               config->cfg |= BIT(6);
+               config->cfg |= TRCCONFIGR_CID;
        else
-               config->cfg &= ~BIT(6);
+               config->cfg &= ~TRCCONFIGR_CID;
 
        if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
-               config->cfg |= BIT(7);
+               config->cfg |= TRCCONFIGR_VMID;
        else
-               config->cfg &= ~BIT(7);
+               config->cfg &= ~TRCCONFIGR_VMID;
 
        /* bits[10:8], Conditional instruction tracing bit */
        mode = ETM_MODE_COND(config->mode);
        if (drvdata->trccond == true) {
-               config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
-               config->cfg |= mode << 8;
+               config->cfg &= ~TRCCONFIGR_COND_MASK;
+               config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
        }
 
        /* bit[11], Global timestamp tracing bit */
        if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
-               config->cfg |= BIT(11);
+               config->cfg |= TRCCONFIGR_TS;
        else
-               config->cfg &= ~BIT(11);
+               config->cfg &= ~TRCCONFIGR_TS;
 
        /* bit[12], Return stack enable bit */
        if ((config->mode & ETM_MODE_RETURNSTACK) &&
                                        (drvdata->retstack == true))
-               config->cfg |= BIT(12);
+               config->cfg |= TRCCONFIGR_RS;
        else
-               config->cfg &= ~BIT(12);
+               config->cfg &= ~TRCCONFIGR_RS;
 
        /* bits[14:13], Q element enable field */
        mode = ETM_MODE_QELEM(config->mode);
        /* start by clearing QE bits */
-       config->cfg &= ~(BIT(13) | BIT(14));
+       config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
        /*
         * if supported, Q elements with instruction counts are enabled.
         * Always set the low bit for any requested mode. Valid combos are
         * 0b00, 0b01 and 0b11.
         */
        if (mode && drvdata->q_support)
-               config->cfg |= BIT(13);
+               config->cfg |= TRCCONFIGR_QE_W_COUNTS;
        /*
         * if supported, Q elements with and without instruction
         * counts are enabled
         */
        if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
-               config->cfg |= BIT(14);
+               config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
 
        /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
        if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
            (drvdata->atbtrig == true))
-               config->eventctrl1 |= BIT(11);
+               config->eventctrl1 |= TRCEVENTCTL1R_ATB;
        else
-               config->eventctrl1 &= ~BIT(11);
+               config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
 
        /* bit[12], Low-power state behavior override bit */
        if ((config->mode & ETM_MODE_LPOVERRIDE) &&
            (drvdata->lpoverride == true))
-               config->eventctrl1 |= BIT(12);
+               config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
        else
-               config->eventctrl1 &= ~BIT(12);
+               config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
 
        /* bit[8], Instruction stall bit */
        if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
-               config->stall_ctrl |= BIT(8);
+               config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
        else
-               config->stall_ctrl &= ~BIT(8);
+               config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
 
        /* bit[10], Prioritize instruction trace bit */
        if (config->mode & ETM_MODE_INSTPRIO)
-               config->stall_ctrl |= BIT(10);
+               config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
        else
-               config->stall_ctrl &= ~BIT(10);
+               config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
 
        /* bit[13], Trace overflow prevention bit */
        if ((config->mode & ETM_MODE_NOOVERFLOW) &&
                (drvdata->nooverflow == true))
-               config->stall_ctrl |= BIT(13);
+               config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
        else
-               config->stall_ctrl &= ~BIT(13);
+               config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
 
        /* bit[9] Start/stop logic control bit */
        if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
-               config->vinst_ctrl |= BIT(9);
+               config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
        else
-               config->vinst_ctrl &= ~BIT(9);
+               config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
 
        /* bit[10], Whether a trace unit must trace a Reset exception */
        if (config->mode & ETM_MODE_TRACE_RESET)
-               config->vinst_ctrl |= BIT(10);
+               config->vinst_ctrl |= TRCVICTLR_TRCRESET;
        else
-               config->vinst_ctrl &= ~BIT(10);
+               config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
 
        /* bit[11], Whether a trace unit must trace a system error exception */
        if ((config->mode & ETM_MODE_TRACE_ERR) &&
                (drvdata->trc_error == true))
-               config->vinst_ctrl |= BIT(11);
+               config->vinst_ctrl |= TRCVICTLR_TRCERR;
        else
-               config->vinst_ctrl &= ~BIT(11);
+               config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
 
        if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
                etm4_config_trace_mode(config);
@@ -534,7 +534,7 @@ static ssize_t event_instren_show(struct device *dev,
        struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
        struct etmv4_config *config = &drvdata->config;
 
-       val = BMVAL(config->eventctrl1, 0, 3);
+       val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
        return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
@@ -551,23 +551,28 @@ static ssize_t event_instren_store(struct device *dev,
 
        spin_lock(&drvdata->spinlock);
        /* start by clearing all instruction event enable bits */
-       config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
+       config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
        switch (drvdata->nr_event) {
        case 0x0:
                /* generate Event element for event 1 */
-               config->eventctrl1 |= val & BIT(1);
+               config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
                break;
        case 0x1:
                /* generate Event element for event 1 and 2 */
-               config->eventctrl1 |= val & (BIT(0) | BIT(1));
+               config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
                break;
        case 0x2:
                /* generate Event element for event 1, 2 and 3 */
-               config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
+               config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
+                                            TRCEVENTCTL1R_INSTEN_1 |
+                                            TRCEVENTCTL1R_INSTEN_2);
                break;
        case 0x3:
                /* generate Event element for all 4 events */
-               config->eventctrl1 |= val & 0xF;
+               config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
+                                            TRCEVENTCTL1R_INSTEN_1 |
+                                            TRCEVENTCTL1R_INSTEN_2 |
+                                            TRCEVENTCTL1R_INSTEN_3);
                break;
        default:
                break;
@@ -702,10 +707,10 @@ static ssize_t bb_ctrl_store(struct device *dev,
         * individual range comparators. If include then at least 1
         * range must be selected.
         */
-       if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
+       if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
                return -EINVAL;
 
-       config->bb_ctrl = val & GENMASK(8, 0);
+       config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
        return size;
 }
 static DEVICE_ATTR_RW(bb_ctrl);
@@ -718,7 +723,7 @@ static ssize_t event_vinst_show(struct device *dev,
        struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
        struct etmv4_config *config = &drvdata->config;
 
-       val = config->vinst_ctrl & ETMv4_EVENT_MASK;
+       val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
        return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
@@ -734,9 +739,9 @@ static ssize_t event_vinst_store(struct device *dev,
                return -EINVAL;
 
        spin_lock(&drvdata->spinlock);
-       val &= ETMv4_EVENT_MASK;
-       config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
-       config->vinst_ctrl |= val;
+       val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
+       config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
+       config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
        spin_unlock(&drvdata->spinlock);
        return size;
 }
@@ -750,7 +755,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
        struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
        struct etmv4_config *config = &drvdata->config;
 
-       val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
+       val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
        return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
@@ -767,10 +772,10 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
 
        spin_lock(&drvdata->spinlock);
        /* clear all EXLEVEL_S bits  */
-       config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
+       config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
        /* enable instruction tracing for corresponding exception level */
        val &= drvdata->s_ex_level;
-       config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
+       config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
        spin_unlock(&drvdata->spinlock);
        return size;
 }
@@ -785,7 +790,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
        struct etmv4_config *config = &drvdata->config;
 
        /* EXLEVEL_NS, bits[23:20] */
-       val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
+       val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
        return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
@@ -802,10 +807,10 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
 
        spin_lock(&drvdata->spinlock);
        /* clear EXLEVEL_NS bits  */
-       config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
+       config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
        /* enable instruction tracing for corresponding exception level */
        val &= drvdata->ns_ex_level;
-       config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
+       config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
        spin_unlock(&drvdata->spinlock);
        return size;
 }
@@ -858,11 +863,11 @@ static ssize_t addr_instdatatype_show(struct device *dev,
 
        spin_lock(&drvdata->spinlock);
        idx = config->addr_idx;
-       val = BMVAL(config->addr_acc[idx], 0, 1);
+       val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
        len = scnprintf(buf, PAGE_SIZE, "%s\n",
-                       val == ETM_INSTR_ADDR ? "instr" :
-                       (val == ETM_DATA_LOAD_ADDR ? "data_load" :
-                       (val == ETM_DATA_STORE_ADDR ? "data_store" :
+                       val == TRCACATRn_TYPE_ADDR ? "instr" :
+                       (val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
+                       (val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
                        "data_load_store")));
        spin_unlock(&drvdata->spinlock);
        return len;
@@ -886,7 +891,7 @@ static ssize_t addr_instdatatype_store(struct device *dev,
        idx = config->addr_idx;
        if (!strcmp(str, "instr"))
                /* TYPE, bits[1:0] */
-               config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
+               config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
 
        spin_unlock(&drvdata->spinlock);
        return size;
@@ -1144,7 +1149,7 @@ static ssize_t addr_ctxtype_show(struct device *dev,
        spin_lock(&drvdata->spinlock);
        idx = config->addr_idx;
        /* CONTEXTTYPE, bits[3:2] */
-       val = BMVAL(config->addr_acc[idx], 2, 3);
+       val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
        len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
                        (val == ETM_CTX_CTXID ? "ctxid" :
                        (val == ETM_CTX_VMID ? "vmid" : "all")));
@@ -1170,18 +1175,18 @@ static ssize_t addr_ctxtype_store(struct device *dev,
        idx = config->addr_idx;
        if (!strcmp(str, "none"))
                /* start by clearing context type bits */
-               config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
+               config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
        else if (!strcmp(str, "ctxid")) {
                /* 0b01 The trace unit performs a Context ID */
                if (drvdata->numcidc) {
-                       config->addr_acc[idx] |= BIT(2);
-                       config->addr_acc[idx] &= ~BIT(3);
+                       config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
+                       config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
                }
        } else if (!strcmp(str, "vmid")) {
                /* 0b10 The trace unit performs a VMID */
                if (drvdata->numvmidc) {
-                       config->addr_acc[idx] &= ~BIT(2);
-                       config->addr_acc[idx] |= BIT(3);
+                       config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
+                       config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
                }
        } else if (!strcmp(str, "all")) {
                /*
@@ -1189,9 +1194,9 @@ static ssize_t addr_ctxtype_store(struct device *dev,
                 * comparison and a VMID
                 */
                if (drvdata->numcidc)
-                       config->addr_acc[idx] |= BIT(2);
+                       config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
                if (drvdata->numvmidc)
-                       config->addr_acc[idx] |= BIT(3);
+                       config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
        }
        spin_unlock(&drvdata->spinlock);
        return size;
@@ -1210,7 +1215,7 @@ static ssize_t addr_context_show(struct device *dev,
        spin_lock(&drvdata->spinlock);
        idx = config->addr_idx;
        /* context ID comparator bits[6:4] */
-       val = BMVAL(config->addr_acc[idx], 4, 6);
+       val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
        spin_unlock(&drvdata->spinlock);
        return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
@@ -1235,8 +1240,8 @@ static ssize_t addr_context_store(struct device *dev,
        spin_lock(&drvdata->spinlock);
        idx = config->addr_idx;
        /* clear context ID comparator bits[6:4] */
-       config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
-       config->addr_acc[idx] |= (val << 4);
+       config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
+       config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
        spin_unlock(&drvdata->spinlock);
        return size;
 }
@@ -1253,7 +1258,7 @@ static ssize_t addr_exlevel_s_ns_show(struct device *dev,
 
        spin_lock(&drvdata->spinlock);
        idx = config->addr_idx;
-       val = BMVAL(config->addr_acc[idx], 8, 14);
+       val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
        spin_unlock(&drvdata->spinlock);
        return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
@@ -1270,14 +1275,14 @@ static ssize_t addr_exlevel_s_ns_store(struct device *dev,
        if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
-       if (val & ~((GENMASK(14, 8) >> 8)))
+       if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
                return -EINVAL;
 
        spin_lock(&drvdata->spinlock);
        idx = config->addr_idx;
        /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
-       config->addr_acc[idx] &= ~(GENMASK(14, 8));
-       config->addr_acc[idx] |= (val << 8);
+       config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
+       config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
        spin_unlock(&drvdata->spinlock);
        return size;
 }
@@ -1721,8 +1726,11 @@ static ssize_t res_ctrl_store(struct device *dev,
        /* For odd idx pair inversal bit is RES0 */
        if (idx % 2 != 0)
                /* PAIRINV, bit[21] */
-               val &= ~BIT(21);
-       config->res_ctrl[idx] = val & GENMASK(21, 0);
+               val &= ~TRCRSCTLRn_PAIRINV;
+       config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
+                                      TRCRSCTLRn_INV |
+                                      TRCRSCTLRn_GROUP_MASK |
+                                      TRCRSCTLRn_SELECT_MASK);
        spin_unlock(&drvdata->spinlock);
        return size;
 }
@@ -1787,9 +1795,9 @@ static ssize_t sshot_ctrl_store(struct device *dev,
 
        spin_lock(&drvdata->spinlock);
        idx = config->ss_idx;
-       config->ss_ctrl[idx] = val & GENMASK(24, 0);
+       config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
        /* must clear bit 31 in related status register on programming */
-       config->ss_status[idx] &= ~BIT(31);
+       config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
        spin_unlock(&drvdata->spinlock);
        return size;
 }
@@ -1837,9 +1845,9 @@ static ssize_t sshot_pe_ctrl_store(struct device *dev,
 
        spin_lock(&drvdata->spinlock);
        idx = config->ss_idx;
-       config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
+       config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
        /* must clear bit 31 in related status register on programming */
-       config->ss_status[idx] &= ~BIT(31);
+       config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
        spin_unlock(&drvdata->spinlock);
        return size;
 }
index 3c4d69b..33869c1 100644 (file)
 #define TRCRSR_TA                      BIT(12)
 
 /*
+ * Bit positions of registers that are defined above, in the sysreg.h style
+ * of _MASK for multi bit fields and BIT() for single bits.
+ */
+#define TRCIDR0_INSTP0_MASK                    GENMASK(2, 1)
+#define TRCIDR0_TRCBB                          BIT(5)
+#define TRCIDR0_TRCCOND                                BIT(6)
+#define TRCIDR0_TRCCCI                         BIT(7)
+#define TRCIDR0_RETSTACK                       BIT(9)
+#define TRCIDR0_NUMEVENT_MASK                  GENMASK(11, 10)
+#define TRCIDR0_QSUPP_MASK                     GENMASK(16, 15)
+#define TRCIDR0_TSSIZE_MASK                    GENMASK(28, 24)
+
+#define TRCIDR2_CIDSIZE_MASK                   GENMASK(9, 5)
+#define TRCIDR2_VMIDSIZE_MASK                  GENMASK(14, 10)
+#define TRCIDR2_CCSIZE_MASK                    GENMASK(28, 25)
+
+#define TRCIDR3_CCITMIN_MASK                   GENMASK(11, 0)
+#define TRCIDR3_EXLEVEL_S_MASK                 GENMASK(19, 16)
+#define TRCIDR3_EXLEVEL_NS_MASK                        GENMASK(23, 20)
+#define TRCIDR3_TRCERR                         BIT(24)
+#define TRCIDR3_SYNCPR                         BIT(25)
+#define TRCIDR3_STALLCTL                       BIT(26)
+#define TRCIDR3_SYSSTALL                       BIT(27)
+#define TRCIDR3_NUMPROC_LO_MASK                        GENMASK(30, 28)
+#define TRCIDR3_NUMPROC_HI_MASK                        GENMASK(13, 12)
+#define TRCIDR3_NOOVERFLOW                     BIT(31)
+
+#define TRCIDR4_NUMACPAIRS_MASK                        GENMASK(3, 0)
+#define TRCIDR4_NUMPC_MASK                     GENMASK(15, 12)
+#define TRCIDR4_NUMRSPAIR_MASK                 GENMASK(19, 16)
+#define TRCIDR4_NUMSSCC_MASK                   GENMASK(23, 20)
+#define TRCIDR4_NUMCIDC_MASK                   GENMASK(27, 24)
+#define TRCIDR4_NUMVMIDC_MASK                  GENMASK(31, 28)
+
+#define TRCIDR5_NUMEXTIN_MASK                  GENMASK(8, 0)
+#define TRCIDR5_TRACEIDSIZE_MASK               GENMASK(21, 16)
+#define TRCIDR5_ATBTRIG                                BIT(22)
+#define TRCIDR5_LPOVERRIDE                     BIT(23)
+#define TRCIDR5_NUMSEQSTATE_MASK               GENMASK(27, 25)
+#define TRCIDR5_NUMCNTR_MASK                   GENMASK(30, 28)
+
+#define TRCCONFIGR_INSTP0_LOAD                 BIT(1)
+#define TRCCONFIGR_INSTP0_STORE                        BIT(2)
+#define TRCCONFIGR_INSTP0_LOAD_STORE           (TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE)
+#define TRCCONFIGR_BB                          BIT(3)
+#define TRCCONFIGR_CCI                         BIT(4)
+#define TRCCONFIGR_CID                         BIT(6)
+#define TRCCONFIGR_VMID                                BIT(7)
+#define TRCCONFIGR_COND_MASK                   GENMASK(10, 8)
+#define TRCCONFIGR_TS                          BIT(11)
+#define TRCCONFIGR_RS                          BIT(12)
+#define TRCCONFIGR_QE_W_COUNTS                 BIT(13)
+#define TRCCONFIGR_QE_WO_COUNTS                        BIT(14)
+#define TRCCONFIGR_VMIDOPT                     BIT(15)
+#define TRCCONFIGR_DA                          BIT(16)
+#define TRCCONFIGR_DV                          BIT(17)
+
+#define TRCEVENTCTL1R_INSTEN_MASK              GENMASK(3, 0)
+#define TRCEVENTCTL1R_INSTEN_0                 BIT(0)
+#define TRCEVENTCTL1R_INSTEN_1                 BIT(1)
+#define TRCEVENTCTL1R_INSTEN_2                 BIT(2)
+#define TRCEVENTCTL1R_INSTEN_3                 BIT(3)
+#define TRCEVENTCTL1R_ATB                      BIT(11)
+#define TRCEVENTCTL1R_LPOVERRIDE               BIT(12)
+
+#define TRCSTALLCTLR_ISTALL                    BIT(8)
+#define TRCSTALLCTLR_INSTPRIORITY              BIT(10)
+#define TRCSTALLCTLR_NOOVERFLOW                        BIT(13)
+
+#define TRCVICTLR_EVENT_MASK                   GENMASK(7, 0)
+#define TRCVICTLR_SSSTATUS                     BIT(9)
+#define TRCVICTLR_TRCRESET                     BIT(10)
+#define TRCVICTLR_TRCERR                       BIT(11)
+#define TRCVICTLR_EXLEVEL_MASK                 GENMASK(22, 16)
+#define TRCVICTLR_EXLEVEL_S_MASK               GENMASK(19, 16)
+#define TRCVICTLR_EXLEVEL_NS_MASK              GENMASK(22, 20)
+
+#define TRCACATRn_TYPE_MASK                    GENMASK(1, 0)
+#define TRCACATRn_CONTEXTTYPE_MASK             GENMASK(3, 2)
+#define TRCACATRn_CONTEXTTYPE_CTXID            BIT(2)
+#define TRCACATRn_CONTEXTTYPE_VMID             BIT(3)
+#define TRCACATRn_CONTEXT_MASK                 GENMASK(6, 4)
+#define TRCACATRn_EXLEVEL_MASK                 GENMASK(14, 8)
+
+#define TRCSSCSRn_STATUS                       BIT(31)
+#define TRCSSCCRn_SAC_ARC_RST_MASK             GENMASK(24, 0)
+
+#define TRCSSPCICRn_PC_MASK                    GENMASK(7, 0)
+
+#define TRCBBCTLR_MODE                         BIT(8)
+#define TRCBBCTLR_RANGE_MASK                   GENMASK(7, 0)
+
+#define TRCRSCTLRn_PAIRINV                     BIT(21)
+#define TRCRSCTLRn_INV                         BIT(20)
+#define TRCRSCTLRn_GROUP_MASK                  GENMASK(19, 16)
+#define TRCRSCTLRn_SELECT_MASK                 GENMASK(15, 0)
+
+/*
  * System instructions to access ETM registers.
  * See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
  */
 #define ETM_EXLEVEL_NS_OS              BIT(5)  /* NonSecure EL1        */
 #define ETM_EXLEVEL_NS_HYP             BIT(6)  /* NonSecure EL2        */
 
-#define ETM_EXLEVEL_MASK               (GENMASK(6, 0))
-#define ETM_EXLEVEL_S_MASK             (GENMASK(3, 0))
-#define ETM_EXLEVEL_NS_MASK            (GENMASK(6, 4))
-
 /* access level controls in TRCACATRn */
 #define TRCACATR_EXLEVEL_SHIFT         8
 
-/* access level control in TRCVICTLR */
-#define TRCVICTLR_EXLEVEL_SHIFT                16
-#define TRCVICTLR_EXLEVEL_S_SHIFT      16
-#define TRCVICTLR_EXLEVEL_NS_SHIFT     20
-
-/* secure / non secure masks - TRCVICTLR, IDR3 */
-#define TRCVICTLR_EXLEVEL_MASK         (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-#define TRCVICTLR_EXLEVEL_S_MASK       (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-#define TRCVICTLR_EXLEVEL_NS_MASK      (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT)
-
 #define ETM_TRCIDR1_ARCH_MAJOR_SHIFT   8
 #define ETM_TRCIDR1_ARCH_MAJOR_MASK    (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
 #define ETM_TRCIDR1_ARCH_MAJOR(x)      \
@@ -986,10 +1070,10 @@ struct etmv4_drvdata {
 
 /* Address comparator access types */
 enum etm_addr_acctype {
-       ETM_INSTR_ADDR,
-       ETM_DATA_LOAD_ADDR,
-       ETM_DATA_STORE_ADDR,
-       ETM_DATA_LOAD_STORE_ADDR,
+       TRCACATRn_TYPE_ADDR,
+       TRCACATRn_TYPE_DATA_LOAD_ADDR,
+       TRCACATRn_TYPE_DATA_STORE_ADDR,
+       TRCACATRn_TYPE_DATA_LOAD_STORE_ADDR,
 };
 
 /* Address comparator context types */
index b0eae94..c0c3578 100644 (file)
@@ -656,6 +656,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
        unsigned int_addr_flag = 0;
        struct i2c_msg *m_start = msg;
        bool is_read;
+       u8 *dma_buf = NULL;
 
        dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
 
@@ -703,7 +704,17 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
        dev->msg = m_start;
        dev->recv_len_abort = false;
 
+       if (dev->use_dma) {
+               dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
+               if (!dma_buf) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               dev->buf = dma_buf;
+       }
+
        ret = at91_do_twi_transfer(dev);
+       i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
 
        ret = (ret < 0) ? ret : num;
 out:
index 805c771..b4c1ad1 100644 (file)
@@ -760,7 +760,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
 static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
                struct i2c_adapter *adap)
 {
-       unsigned long time_left;
+       unsigned long time_left, msg_timeout;
        u32 reg;
 
        id->p_msg = msg;
@@ -785,8 +785,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
        else
                cdns_i2c_msend(id);
 
+       /* Minimal time to execute this message */
+       msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
+       /* Plus some wiggle room */
+       msg_timeout += msecs_to_jiffies(500);
+
+       if (msg_timeout < adap->timeout)
+               msg_timeout = adap->timeout;
+
        /* Wait for the signal of completion */
-       time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
+       time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
        if (time_left == 0) {
                cdns_i2c_master_reset(adap);
                dev_err(id->adap.dev.parent,
index e9d0732..9e09db3 100644 (file)
@@ -539,10 +539,9 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 
        dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
 
-       ret = pm_runtime_get_sync(dev->dev);
+       ret = pm_runtime_resume_and_get(dev->dev);
        if (ret < 0) {
                dev_err(dev->dev, "Failed to runtime_get device: %d\n", ret);
-               pm_runtime_put_noidle(dev->dev);
                return ret;
        }
 
@@ -821,10 +820,9 @@ static int davinci_i2c_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev->dev);
 
-       r = pm_runtime_get_sync(dev->dev);
+       r = pm_runtime_resume_and_get(dev->dev);
        if (r < 0) {
                dev_err(dev->dev, "failed to runtime_get device: %d\n", r);
-               pm_runtime_put_noidle(dev->dev);
                return r;
        }
 
@@ -898,11 +896,9 @@ static int davinci_i2c_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&dev->adapter);
 
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret < 0)
                return ret;
-       }
 
        davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
 
index 9b37f2b..b624356 100644 (file)
@@ -16,8 +16,8 @@
 #define PSP_CMD_TIMEOUT_US     (500 * USEC_PER_MSEC)
 
 #define PSP_I2C_REQ_BUS_CMD            0x64
-#define PSP_I2C_REQ_RETRY_CNT          10
-#define PSP_I2C_REQ_RETRY_DELAY_US     (50 * USEC_PER_MSEC)
+#define PSP_I2C_REQ_RETRY_CNT          400
+#define PSP_I2C_REQ_RETRY_DELAY_US     (25 * USEC_PER_MSEC)
 #define PSP_I2C_REQ_STS_OK             0x0
 #define PSP_I2C_REQ_STS_BUS_BUSY       0x1
 #define PSP_I2C_REQ_STS_INV_PARAM      0x3
index 9f85743..e7d316b 100644 (file)
@@ -266,9 +266,9 @@ int i2c_dw_acpi_configure(struct device *device)
         * selected speed modes.
         */
        i2c_dw_acpi_params(device, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht);
+       i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
        i2c_dw_acpi_params(device, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht);
        i2c_dw_acpi_params(device, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
-       i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
 
        switch (t->bus_freq_hz) {
        case I2C_MAX_STANDARD_MODE_FREQ:
index c16157e..6078fa0 100644 (file)
@@ -528,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
 
        case I2C_SMBUS_BLOCK_PROC_CALL:
                dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n");
+               if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
+                       return -EINVAL;
+
                dma_size = I2C_SMBUS_BLOCK_MAX;
                desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1);
                desc->wr_len_cmd = data->block[0] + 1;
index 07eb819..61cc5b2 100644 (file)
 #define REG_TOK_RDATA1         0x1c
 
 /* Control register fields */
-#define REG_CTRL_START         BIT(0)
-#define REG_CTRL_ACK_IGNORE    BIT(1)
-#define REG_CTRL_STATUS                BIT(2)
-#define REG_CTRL_ERROR         BIT(3)
-#define REG_CTRL_CLKDIV                GENMASK(21, 12)
-#define REG_CTRL_CLKDIVEXT     GENMASK(29, 28)
-
-#define REG_SLV_ADDR           GENMASK(7, 0)
-#define REG_SLV_SDA_FILTER     GENMASK(10, 8)
-#define REG_SLV_SCL_FILTER     GENMASK(13, 11)
-#define REG_SLV_SCL_LOW                GENMASK(27, 16)
-#define REG_SLV_SCL_LOW_EN     BIT(28)
+#define REG_CTRL_START                 BIT(0)
+#define REG_CTRL_ACK_IGNORE            BIT(1)
+#define REG_CTRL_STATUS                        BIT(2)
+#define REG_CTRL_ERROR                 BIT(3)
+#define REG_CTRL_CLKDIV_SHIFT          12
+#define REG_CTRL_CLKDIV_MASK           GENMASK(21, REG_CTRL_CLKDIV_SHIFT)
+#define REG_CTRL_CLKDIVEXT_SHIFT       28
+#define REG_CTRL_CLKDIVEXT_MASK                GENMASK(29, REG_CTRL_CLKDIVEXT_SHIFT)
+
+#define REG_SLV_ADDR_MASK              GENMASK(7, 0)
+#define REG_SLV_SDA_FILTER_MASK                GENMASK(10, 8)
+#define REG_SLV_SCL_FILTER_MASK                GENMASK(13, 11)
+#define REG_SLV_SCL_LOW_SHIFT          16
+#define REG_SLV_SCL_LOW_MASK           GENMASK(27, REG_SLV_SCL_LOW_SHIFT)
+#define REG_SLV_SCL_LOW_EN             BIT(28)
 
 #define I2C_TIMEOUT_MS         500
 #define FILTER_DELAY           15
@@ -62,10 +65,6 @@ enum {
        STATE_WRITE,
 };
 
-struct meson_i2c_data {
-       unsigned char div_factor;
-};
-
 /**
  * struct meson_i2c - Meson I2C device private data
  *
@@ -83,7 +82,7 @@ struct meson_i2c_data {
  * @done:      Completion used to wait for transfer termination
  * @tokens:    Sequence of tokens to be written to the device
  * @num_tokens:        Number of tokens
- * @data:      Pointer to the controlller's platform data
+ * @data:      Pointer to the controller's platform data
  */
 struct meson_i2c {
        struct i2c_adapter      adap;
@@ -106,6 +105,10 @@ struct meson_i2c {
        const struct meson_i2c_data *data;
 };
 
+struct meson_i2c_data {
+       void (*set_clk_div)(struct meson_i2c *i2c, unsigned int freq);
+};
+
 static void meson_i2c_set_mask(struct meson_i2c *i2c, int reg, u32 mask,
                               u32 val)
 {
@@ -134,14 +137,62 @@ static void meson_i2c_add_token(struct meson_i2c *i2c, int token)
        i2c->num_tokens++;
 }
 
-static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
+static void meson_gxbb_axg_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
+{
+       unsigned long clk_rate = clk_get_rate(i2c->clk);
+       unsigned int div_h, div_l;
+
+       /* According to I2C-BUS Spec 2.1, in FAST-MODE, the minimum LOW period is 1.3uS, and
+        * minimum HIGH is least 0.6us.
+        * For 400000 freq, the period is 2.5us. To keep within the specs, give 40% of period to
+        * HIGH and 60% to LOW. This means HIGH at 1.0us and LOW 1.5us.
+        * The same applies for Fast-mode plus, where LOW is 0.5us and HIGH is 0.26us.
+        * Duty = H/(H + L) = 2/5
+        */
+       if (freq <= I2C_MAX_STANDARD_MODE_FREQ) {
+               div_h = DIV_ROUND_UP(clk_rate, freq);
+               div_l = DIV_ROUND_UP(div_h, 4);
+               div_h = DIV_ROUND_UP(div_h, 2) - FILTER_DELAY;
+       } else {
+               div_h = DIV_ROUND_UP(clk_rate * 2, freq * 5) - FILTER_DELAY;
+               div_l = DIV_ROUND_UP(clk_rate * 3, freq * 5 * 2);
+       }
+
+       /* clock divider has 12 bits */
+       if (div_h > GENMASK(11, 0)) {
+               dev_err(i2c->dev, "requested bus frequency too low\n");
+               div_h = GENMASK(11, 0);
+       }
+       if (div_l > GENMASK(11, 0)) {
+               dev_err(i2c->dev, "requested bus frequency too low\n");
+               div_l = GENMASK(11, 0);
+       }
+
+       meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK,
+                          FIELD_PREP(REG_CTRL_CLKDIV_MASK, div_h & GENMASK(9, 0)));
+
+       meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK,
+                          FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div_h >> 10));
+
+       /* set SCL low delay */
+       meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_MASK,
+                          FIELD_PREP(REG_SLV_SCL_LOW_MASK, div_l));
+
+       /* Enable HIGH/LOW mode */
+       meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, REG_SLV_SCL_LOW_EN);
+
+       dev_dbg(i2c->dev, "%s: clk %lu, freq %u, divh %u, divl %u\n", __func__,
+               clk_rate, freq, div_h, div_l);
+}
+
+static void meson6_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
 {
        unsigned long clk_rate = clk_get_rate(i2c->clk);
        unsigned int div;
 
        div = DIV_ROUND_UP(clk_rate, freq);
        div -= FILTER_DELAY;
-       div = DIV_ROUND_UP(div, i2c->data->div_factor);
+       div = DIV_ROUND_UP(div, 4);
 
        /* clock divider has 12 bits */
        if (div > GENMASK(11, 0)) {
@@ -149,11 +200,11 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
                div = GENMASK(11, 0);
        }
 
-       meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV,
-                          FIELD_PREP(REG_CTRL_CLKDIV, div & GENMASK(9, 0)));
+       meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK,
+                          FIELD_PREP(REG_CTRL_CLKDIV_MASK, div & GENMASK(9, 0)));
 
-       meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT,
-                          FIELD_PREP(REG_CTRL_CLKDIVEXT, div >> 10));
+       meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK,
+                          FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div >> 10));
 
        /* Disable HIGH/LOW mode */
        meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0);
@@ -292,8 +343,8 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg)
                TOKEN_SLAVE_ADDR_WRITE;
 
 
-       meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR,
-                          FIELD_PREP(REG_SLV_ADDR, msg->addr << 1));
+       meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR_MASK,
+                          FIELD_PREP(REG_SLV_ADDR_MASK, msg->addr << 1));
 
        meson_i2c_add_token(i2c, TOKEN_START);
        meson_i2c_add_token(i2c, token);
@@ -467,9 +518,13 @@ static int meson_i2c_probe(struct platform_device *pdev)
 
        /* Disable filtering */
        meson_i2c_set_mask(i2c, REG_SLAVE_ADDR,
-                          REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0);
+                          REG_SLV_SDA_FILTER_MASK | REG_SLV_SCL_FILTER_MASK, 0);
 
-       meson_i2c_set_clk_div(i2c, timings.bus_freq_hz);
+       if (!i2c->data->set_clk_div) {
+               clk_disable_unprepare(i2c->clk);
+               return -EINVAL;
+       }
+       i2c->data->set_clk_div(i2c, timings.bus_freq_hz);
 
        ret = i2c_add_adapter(&i2c->adap);
        if (ret < 0) {
@@ -491,15 +546,15 @@ static int meson_i2c_remove(struct platform_device *pdev)
 }
 
 static const struct meson_i2c_data i2c_meson6_data = {
-       .div_factor = 4,
+       .set_clk_div = meson6_i2c_set_clk_div,
 };
 
 static const struct meson_i2c_data i2c_gxbb_data = {
-       .div_factor = 4,
+       .set_clk_div = meson_gxbb_axg_i2c_set_clk_div,
 };
 
 static const struct meson_i2c_data i2c_axg_data = {
-       .div_factor = 3,
+       .set_clk_div = meson_gxbb_axg_i2c_set_clk_div,
 };
 
 static const struct of_device_id meson_i2c_match[] = {
index f651d3e..bdecb78 100644 (file)
@@ -1177,7 +1177,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
        int left_num = num;
        struct mtk_i2c *i2c = i2c_get_adapdata(adap);
 
-       ret = clk_bulk_prepare_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
+       ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
        if (ret)
                return ret;
 
@@ -1231,7 +1231,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
        ret = num;
 
 err_exit:
-       clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+       clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks);
        return ret;
 }
 
@@ -1412,7 +1412,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
                return ret;
        }
        mtk_i2c_init_hw(i2c);
-       clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+       clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks);
 
        ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
                               IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
@@ -1439,6 +1439,8 @@ static int mtk_i2c_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&i2c->adap);
 
+       clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+
        return 0;
 }
 
@@ -1448,6 +1450,7 @@ static int mtk_i2c_suspend_noirq(struct device *dev)
        struct mtk_i2c *i2c = dev_get_drvdata(dev);
 
        i2c_mark_adapter_suspended(&i2c->adap);
+       clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
 
        return 0;
 }
@@ -1465,7 +1468,7 @@ static int mtk_i2c_resume_noirq(struct device *dev)
 
        mtk_i2c_init_hw(i2c);
 
-       clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+       clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks);
 
        i2c_mark_adapter_resumed(&i2c->adap);
 
index 901f0fb..cfe6de8 100644 (file)
@@ -270,18 +270,15 @@ static void mtk_i2c_init(struct mtk_i2c *i2c)
 
 static int mtk_i2c_probe(struct platform_device *pdev)
 {
-       struct resource *res;
        struct mtk_i2c *i2c;
        struct i2c_adapter *adap;
        int ret;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
        i2c = devm_kzalloc(&pdev->dev, sizeof(struct mtk_i2c), GFP_KERNEL);
        if (!i2c)
                return -ENOMEM;
 
-       i2c->base = devm_ioremap_resource(&pdev->dev, res);
+       i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
        if (IS_ERR(i2c->base))
                return PTR_ERR(i2c->base);
 
index 71aad02..5960ccd 100644 (file)
@@ -314,6 +314,7 @@ struct npcm_i2c {
        u64 rec_fail_cnt;
        u64 nack_cnt;
        u64 timeout_cnt;
+       u64 tx_complete_cnt;
 };
 
 static inline void npcm_i2c_select_bank(struct npcm_i2c *bus,
@@ -359,14 +360,14 @@ static int npcm_i2c_get_SCL(struct i2c_adapter *_adap)
 {
        struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
 
-       return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+       return !!(I2CCTL3_SCL_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
 }
 
 static int npcm_i2c_get_SDA(struct i2c_adapter *_adap)
 {
        struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
 
-       return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+       return !!(I2CCTL3_SDA_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
 }
 
 static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus)
@@ -563,6 +564,15 @@ static inline void npcm_i2c_nack(struct npcm_i2c *bus)
        iowrite8(val, bus->reg + NPCM_I2CCTL1);
 }
 
+static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
+{
+       u8 val;
+
+       /* Clear NEGACK, STASTR and BER bits */
+       val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
+       iowrite8(val, bus->reg + NPCM_I2CST);
+}
+
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
 static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable)
 {
@@ -642,8 +652,8 @@ static void npcm_i2c_reset(struct npcm_i2c *bus)
        iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
        iowrite8(0xFF, bus->reg + NPCM_I2CST);
 
-       /* Clear EOB bit */
-       iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3);
+       /* Clear and disable EOB */
+       npcm_i2c_eob_int(bus, false);
 
        /* Clear all fifo bits: */
        iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS);
@@ -655,6 +665,9 @@ static void npcm_i2c_reset(struct npcm_i2c *bus)
        }
 #endif
 
+       /* clear status bits for spurious interrupts */
+       npcm_i2c_clear_master_status(bus);
+
        bus->state = I2C_IDLE;
 }
 
@@ -684,6 +697,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
        switch (op_status) {
        case I2C_MASTER_DONE_IND:
                bus->cmd_err = bus->msgs_num;
+               if (bus->tx_complete_cnt < ULLONG_MAX)
+                       bus->tx_complete_cnt++;
                fallthrough;
        case I2C_BLOCK_BYTES_ERR_IND:
                /* Master tx finished and all transmit bytes were sent */
@@ -815,15 +830,6 @@ static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo)
        }
 }
 
-static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
-{
-       u8 val;
-
-       /* Clear NEGACK, STASTR and BER bits */
-       val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
-       iowrite8(val, bus->reg + NPCM_I2CST);
-}
-
 static void npcm_i2c_master_abort(struct npcm_i2c *bus)
 {
        /* Only current master is allowed to issue a stop condition */
@@ -1231,7 +1237,16 @@ static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus)
                ret = IRQ_HANDLED;
        } /* SDAST */
 
-       return ret;
+       /*
+        * if irq is not one of the above, make sure EOB is disabled and all
+        * status bits are cleared.
+        */
+       if (ret == IRQ_NONE) {
+               npcm_i2c_eob_int(bus, false);
+               npcm_i2c_clear_master_status(bus);
+       }
+
+       return IRQ_HANDLED;
 }
 
 static int npcm_i2c_reg_slave(struct i2c_client *client)
@@ -1467,6 +1482,9 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
                npcm_i2c_eob_int(bus, false);
                npcm_i2c_master_stop(bus);
 
+               /* Clear SDA Status bit (by reading dummy byte) */
+               npcm_i2c_rd_byte(bus);
+
                /*
                 * The bus is released from stall only after the SW clears
                 * NEGACK bit. Then a Stop condition is sent.
@@ -1474,6 +1492,8 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
                npcm_i2c_clear_master_status(bus);
                readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val,
                                          !(val & NPCM_I2CCST_BUSY), 10, 200);
+               /* verify no status bits are still set after bus is released */
+               npcm_i2c_clear_master_status(bus);
        }
        bus->state = I2C_IDLE;
 
@@ -1672,10 +1692,10 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap)
        int              iter = 27;
 
        if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) {
-               dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck",
-                       bus->num);
+               dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck",
+                       bus->num, bus->dest_addr);
                npcm_i2c_reset(bus);
-               return status;
+               return 0;
        }
 
        npcm_i2c_int_enable(bus, false);
@@ -1909,6 +1929,7 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
            bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ)
                return -EINVAL;
 
+       npcm_i2c_int_enable(bus, false);
        npcm_i2c_disable(bus);
 
        /* Configure FIFO mode : */
@@ -1937,10 +1958,17 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
        val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS;
        iowrite8(val, bus->reg + NPCM_I2CCTL1);
 
-       npcm_i2c_int_enable(bus, true);
-
        npcm_i2c_reset(bus);
 
+       /* check HW is OK: SDA and SCL should be high at this point. */
+       if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
+               dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
+               dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
+                       npcm_i2c_get_SCL(&bus->adap));
+               return -ENXIO;
+       }
+
+       npcm_i2c_int_enable(bus, true);
        return 0;
 }
 
@@ -1988,10 +2016,14 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
        if (bus->slave) {
                bus->master_or_slave = I2C_SLAVE;
-               return npcm_i2c_int_slave_handler(bus);
+               if (npcm_i2c_int_slave_handler(bus))
+                       return IRQ_HANDLED;
        }
 #endif
-       return IRQ_NONE;
+       /* clear status bits for spurious interrupts */
+       npcm_i2c_clear_master_status(bus);
+
+       return IRQ_HANDLED;
 }
 
 static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
@@ -2047,8 +2079,7 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
        u16 nwrite, nread;
        u8 *write_data, *read_data;
        u8 slave_addr;
-       int timeout;
-       int ret = 0;
+       unsigned long timeout;
        bool read_block = false;
        bool read_PEC = false;
        u8 bus_busy;
@@ -2099,13 +2130,13 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
         * 9: bits per transaction (including the ack/nack)
         */
        timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
-       timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
+       timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec));
        if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
                dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
                return -EINVAL;
        }
 
-       time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1;
+       time_left = jiffies + timeout + 1;
        do {
                /*
                 * we must clear slave address immediately when the bus is not
@@ -2138,12 +2169,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
        bus->read_block_use = read_block;
 
        reinit_completion(&bus->cmd_complete);
-       if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
-                                       write_data, read_data, read_PEC,
-                                       read_block))
-               ret = -EBUSY;
 
-       if (ret != -EBUSY) {
+       npcm_i2c_int_enable(bus, true);
+
+       if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+                                      write_data, read_data, read_PEC,
+                                      read_block)) {
                time_left = wait_for_completion_timeout(&bus->cmd_complete,
                                                        timeout);
 
@@ -2157,26 +2188,31 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
                        }
                }
        }
-       ret = bus->cmd_err;
 
        /* if there was BER, check if need to recover the bus: */
        if (bus->cmd_err == -EAGAIN)
-               ret = i2c_recover_bus(adap);
+               bus->cmd_err = i2c_recover_bus(adap);
 
        /*
         * After any type of error, check if LAST bit is still set,
         * due to a HW issue.
         * It cannot be cleared without resetting the module.
         */
-       if (bus->cmd_err &&
-           (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
+       else if (bus->cmd_err &&
+                (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
                npcm_i2c_reset(bus);
 
+       /* after any xfer, successful or not, stall and EOB must be disabled */
+       npcm_i2c_stall_after_start(bus, false);
+       npcm_i2c_eob_int(bus, false);
+
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
        /* reenable slave if it was enabled */
        if (bus->slave)
                iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN,
                         bus->reg + NPCM_I2CADDR1);
+#else
+       npcm_i2c_int_enable(bus, false);
 #endif
        return bus->cmd_err;
 }
@@ -2223,17 +2259,18 @@ static void npcm_i2c_init_debugfs(struct platform_device *pdev,
        debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt);
        debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt);
        debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt);
+       debugfs_create_u64("tx_complete_cnt", 0444, d, &bus->tx_complete_cnt);
 
        bus->debugfs = d;
 }
 
 static int npcm_i2c_probe_bus(struct platform_device *pdev)
 {
-       struct npcm_i2c *bus;
+       struct device_node *np = pdev->dev.of_node;
+       static struct regmap *gcr_regmap;
        struct i2c_adapter *adap;
+       struct npcm_i2c *bus;
        struct clk *i2c_clk;
-       static struct regmap *gcr_regmap;
-       static struct regmap *clk_regmap;
        int irq;
        int ret;
 
@@ -2250,15 +2287,14 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
                return PTR_ERR(i2c_clk);
        bus->apb_clk = clk_get_rate(i2c_clk);
 
-       gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+       gcr_regmap = syscon_regmap_lookup_by_phandle(np, "nuvoton,sys-mgr");
+       if (IS_ERR(gcr_regmap))
+               gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+
        if (IS_ERR(gcr_regmap))
                return PTR_ERR(gcr_regmap);
        regmap_write(gcr_regmap, NPCM_I2CSEGCTL, NPCM_I2CSEGCTL_INIT_VAL);
 
-       clk_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-clk");
-       if (IS_ERR(clk_regmap))
-               return PTR_ERR(clk_regmap);
-
        bus->reg = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(bus->reg))
                return PTR_ERR(bus->reg);
@@ -2269,7 +2305,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
        adap = &bus->adap;
        adap->owner = THIS_MODULE;
        adap->retries = 3;
-       adap->timeout = HZ;
+       adap->timeout = msecs_to_jiffies(35);
        adap->algo = &npcm_i2c_algo;
        adap->quirks = &npcm_i2c_quirks;
        adap->algo_data = bus;
index 5241e6f..2e74747 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/of_irq.h>
-#include <asm/prom.h>
+
 #include <asm/pmac_low_i2c.h>
 
 MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
index 5b920f0..6ac402e 100644 (file)
@@ -727,16 +727,14 @@ static int setup_gpi_dma(struct geni_i2c_dev *gi2c)
        if (IS_ERR(gi2c->tx_c)) {
                ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->tx_c),
                                    "Failed to get tx DMA ch\n");
-               if (ret < 0)
-                       goto err_tx;
+               goto err_tx;
        }
 
        gi2c->rx_c = dma_request_chan(gi2c->se.dev, "rx");
        if (IS_ERR(gi2c->rx_c)) {
                ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->rx_c),
                                    "Failed to get rx DMA ch\n");
-               if (ret < 0)
-                       goto err_rx;
+               goto err_rx;
        }
 
        dev_dbg(gi2c->se.dev, "Grabbed GPI dma channels\n");
index 0db3d75..6e7be9d 100644 (file)
 #define ICDMAER        0x3c    /* DMA enable (Gen3) */
 
 /* ICSCR */
-#define SDBS   (1 << 3)        /* slave data buffer select */
-#define SIE    (1 << 2)        /* slave interface enable */
-#define GCAE   (1 << 1)        /* general call address enable */
-#define FNA    (1 << 0)        /* forced non acknowledgment */
+#define SDBS   BIT(3)  /* slave data buffer select */
+#define SIE    BIT(2)  /* slave interface enable */
+#define GCAE   BIT(1)  /* general call address enable */
+#define FNA    BIT(0)  /* forced non acknowledgment */
 
 /* ICMCR */
-#define MDBS   (1 << 7)        /* non-fifo mode switch */
-#define FSCL   (1 << 6)        /* override SCL pin */
-#define FSDA   (1 << 5)        /* override SDA pin */
-#define OBPC   (1 << 4)        /* override pins */
-#define MIE    (1 << 3)        /* master if enable */
-#define TSBE   (1 << 2)
-#define FSB    (1 << 1)        /* force stop bit */
-#define ESG    (1 << 0)        /* enable start bit gen */
+#define MDBS   BIT(7)  /* non-fifo mode switch */
+#define FSCL   BIT(6)  /* override SCL pin */
+#define FSDA   BIT(5)  /* override SDA pin */
+#define OBPC   BIT(4)  /* override pins */
+#define MIE    BIT(3)  /* master if enable */
+#define TSBE   BIT(2)
+#define FSB    BIT(1)  /* force stop bit */
+#define ESG    BIT(0)  /* enable start bit gen */
 
 /* ICSSR (also for ICSIER) */
-#define GCAR   (1 << 6)        /* general call received */
-#define STM    (1 << 5)        /* slave transmit mode */
-#define SSR    (1 << 4)        /* stop received */
-#define SDE    (1 << 3)        /* slave data empty */
-#define SDT    (1 << 2)        /* slave data transmitted */
-#define SDR    (1 << 1)        /* slave data received */
-#define SAR    (1 << 0)        /* slave addr received */
+#define GCAR   BIT(6)  /* general call received */
+#define STM    BIT(5)  /* slave transmit mode */
+#define SSR    BIT(4)  /* stop received */
+#define SDE    BIT(3)  /* slave data empty */
+#define SDT    BIT(2)  /* slave data transmitted */
+#define SDR    BIT(1)  /* slave data received */
+#define SAR    BIT(0)  /* slave addr received */
 
 /* ICMSR (also for ICMIE) */
-#define MNR    (1 << 6)        /* nack received */
-#define MAL    (1 << 5)        /* arbitration lost */
-#define MST    (1 << 4)        /* sent a stop */
-#define MDE    (1 << 3)
-#define MDT    (1 << 2)
-#define MDR    (1 << 1)
-#define MAT    (1 << 0)        /* slave addr xfer done */
+#define MNR    BIT(6)  /* nack received */
+#define MAL    BIT(5)  /* arbitration lost */
+#define MST    BIT(4)  /* sent a stop */
+#define MDE    BIT(3)
+#define MDT    BIT(2)
+#define MDR    BIT(1)
+#define MAT    BIT(0)  /* slave addr xfer done */
 
 /* ICDMAER */
-#define RSDMAE (1 << 3)        /* DMA Slave Received Enable */
-#define TSDMAE (1 << 2)        /* DMA Slave Transmitted Enable */
-#define RMDMAE (1 << 1)        /* DMA Master Received Enable */
-#define TMDMAE (1 << 0)        /* DMA Master Transmitted Enable */
+#define RSDMAE BIT(3)  /* DMA Slave Received Enable */
+#define TSDMAE BIT(2)  /* DMA Slave Transmitted Enable */
+#define RMDMAE BIT(1)  /* DMA Master Received Enable */
+#define TMDMAE BIT(0)  /* DMA Master Transmitted Enable */
 
 /* ICFBSCR */
 #define TCYC17 0x0f            /* 17*Tcyc delay 1st bit between SDA and SCL */
 #define RCAR_IRQ_RECV  (MNR | MAL | MST | MAT | MDR)
 #define RCAR_IRQ_STOP  (MST)
 
-#define RCAR_IRQ_ACK_SEND      (~(MAT | MDE) & 0x7F)
-#define RCAR_IRQ_ACK_RECV      (~(MAT | MDR) & 0x7F)
-
-#define ID_LAST_MSG    (1 << 0)
-#define ID_FIRST_MSG   (1 << 1)
-#define ID_DONE                (1 << 2)
-#define ID_ARBLOST     (1 << 3)
-#define ID_NACK                (1 << 4)
+#define ID_LAST_MSG            BIT(0)
+#define ID_REP_AFTER_RD                BIT(1)
+#define ID_DONE                        BIT(2)
+#define ID_ARBLOST             BIT(3)
+#define ID_NACK                        BIT(4)
+#define ID_EPROTO              BIT(5)
 /* persistent flags */
-#define ID_P_HOST_NOTIFY       BIT(28)
-#define ID_P_REP_AFTER_RD      BIT(29)
+#define ID_P_NOT_ATOMIC                BIT(28)
+#define ID_P_HOST_NOTIFY       BIT(29)
 #define ID_P_NO_RXDMA          BIT(30) /* HW forbids RXDMA sometimes */
 #define ID_P_PM_BLOCKED                BIT(31)
 #define ID_P_MASK              GENMASK(31, 28)
@@ -141,7 +139,6 @@ struct rcar_i2c_priv {
        enum dma_data_direction dma_direction;
 
        struct reset_control *rstc;
-       bool atomic_xfer;
        int irq;
 
        struct i2c_client *host_notify_client;
@@ -160,6 +157,11 @@ static u32 rcar_i2c_read(struct rcar_i2c_priv *priv, int reg)
        return readl(priv->io + reg);
 }
 
+static void rcar_i2c_clear_irq(struct rcar_i2c_priv *priv, u32 val)
+{
+       writel(~val & 0x7f, priv->io + ICMSR);
+}
+
 static int rcar_i2c_get_scl(struct i2c_adapter *adap)
 {
        struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
@@ -330,41 +332,46 @@ scgd_find:
        return 0;
 }
 
+/*
+ * We don't have a test case but the HW engineers say that the write order of
+ * ICMSR and ICMCR depends on whether we issue START or REP_START. So, ICMSR
+ * handling is outside of this function. First messages clear ICMSR before this
+ * function, interrupt handlers clear the relevant bits after this function.
+ */
 static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
 {
        int read = !!rcar_i2c_is_recv(priv);
+       bool rep_start = !(priv->flags & ID_REP_AFTER_RD);
 
        priv->pos = 0;
+       priv->flags &= ID_P_MASK;
+
        if (priv->msgs_left == 1)
                priv->flags |= ID_LAST_MSG;
 
        rcar_i2c_write(priv, ICMAR, i2c_8bit_addr_from_msg(priv->msg));
-       if (!priv->atomic_xfer)
+       if (priv->flags & ID_P_NOT_ATOMIC)
                rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
 
-       /*
-        * We don't have a test case but the HW engineers say that the write order
-        * of ICMSR and ICMCR depends on whether we issue START or REP_START. Since
-        * it didn't cause a drawback for me, let's rather be safe than sorry.
-        */
-       if (priv->flags & ID_FIRST_MSG) {
-               rcar_i2c_write(priv, ICMSR, 0);
+       if (rep_start)
                rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
-       } else {
-               if (priv->flags & ID_P_REP_AFTER_RD)
-                       priv->flags &= ~ID_P_REP_AFTER_RD;
-               else
-                       rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
-               rcar_i2c_write(priv, ICMSR, 0);
-       }
+}
+
+static void rcar_i2c_first_msg(struct rcar_i2c_priv *priv,
+                              struct i2c_msg *msgs, int num)
+{
+       priv->msg = msgs;
+       priv->msgs_left = num;
+       rcar_i2c_write(priv, ICMSR, 0); /* must be before preparing msg */
+       rcar_i2c_prepare_msg(priv);
 }
 
 static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
 {
        priv->msg++;
        priv->msgs_left--;
-       priv->flags &= ID_P_MASK;
        rcar_i2c_prepare_msg(priv);
+       /* ICMSR handling must come afterwards in the irq handler */
 }
 
 static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
@@ -413,7 +420,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
        int len;
 
        /* Do various checks to see if DMA is feasible at all */
-       if (priv->atomic_xfer || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
+       if (!(priv->flags & ID_P_NOT_ATOMIC) || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
            !(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA))
                return false;
 
@@ -475,11 +482,15 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
 static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
 {
        struct i2c_msg *msg = priv->msg;
+       u32 irqs_to_clear = MDE;
 
        /* FIXME: sometimes, unknown interrupt happened. Do nothing */
        if (!(msr & MDE))
                return;
 
+       if (msr & MAT)
+               irqs_to_clear |= MAT;
+
        /* Check if DMA can be enabled and take over */
        if (priv->pos == 1 && rcar_i2c_dma(priv))
                return;
@@ -503,31 +514,32 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
                 * [ICRXTX] -> [SHIFT] -> [I2C bus]
                 */
 
-               if (priv->flags & ID_LAST_MSG) {
+               if (priv->flags & ID_LAST_MSG)
                        /*
                         * If current msg is the _LAST_ msg,
                         * prepare stop condition here.
                         * ID_DONE will be set on STOP irq.
                         */
                        rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
-               } else {
+               else
                        rcar_i2c_next_msg(priv);
-                       return;
-               }
        }
 
-       rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_SEND);
+       rcar_i2c_clear_irq(priv, irqs_to_clear);
 }
 
 static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
 {
        struct i2c_msg *msg = priv->msg;
+       bool recv_len_init = priv->pos == 0 && msg->flags & I2C_M_RECV_LEN;
+       u32 irqs_to_clear = MDR;
 
        /* FIXME: sometimes, unknown interrupt happened. Do nothing */
        if (!(msr & MDR))
                return;
 
        if (msr & MAT) {
+               irqs_to_clear |= MAT;
                /*
                 * Address transfer phase finished, but no data at this point.
                 * Try to use DMA to receive data.
@@ -535,24 +547,41 @@ static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
                rcar_i2c_dma(priv);
        } else if (priv->pos < msg->len) {
                /* get received data */
-               msg->buf[priv->pos] = rcar_i2c_read(priv, ICRXTX);
+               u8 data = rcar_i2c_read(priv, ICRXTX);
+
+               msg->buf[priv->pos] = data;
+               if (recv_len_init) {
+                       if (data == 0 || data > I2C_SMBUS_BLOCK_MAX) {
+                               priv->flags |= ID_DONE | ID_EPROTO;
+                               return;
+                       }
+                       msg->len += msg->buf[0];
+                       /* Enough data for DMA? */
+                       if (rcar_i2c_dma(priv))
+                               return;
+                       /* new length after RECV_LEN now properly initialized */
+                       recv_len_init = false;
+               }
                priv->pos++;
        }
 
-       /* If next received data is the _LAST_, go to new phase. */
-       if (priv->pos + 1 == msg->len) {
+       /*
+        * If next received data is the _LAST_ and we are not waiting for a new
+        * length because of RECV_LEN, then go to a new phase.
+        */
+       if (priv->pos + 1 == msg->len && !recv_len_init) {
                if (priv->flags & ID_LAST_MSG) {
                        rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
                } else {
                        rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
-                       priv->flags |= ID_P_REP_AFTER_RD;
+                       priv->flags |= ID_REP_AFTER_RD;
                }
        }
 
        if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG))
                rcar_i2c_next_msg(priv);
-       else
-               rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_RECV);
+
+       rcar_i2c_clear_irq(priv, irqs_to_clear);
 }
 
 static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
@@ -641,7 +670,7 @@ static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
        /* Nack */
        if (msr & MNR) {
                /* HW automatically sends STOP after received NACK */
-               if (!priv->atomic_xfer)
+               if (priv->flags & ID_P_NOT_ATOMIC)
                        rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
                priv->flags |= ID_NACK;
                goto out;
@@ -663,7 +692,7 @@ out:
        if (priv->flags & ID_DONE) {
                rcar_i2c_write(priv, ICMIER, 0);
                rcar_i2c_write(priv, ICMSR, 0);
-               if (!priv->atomic_xfer)
+               if (priv->flags & ID_P_NOT_ATOMIC)
                        wake_up(&priv->wait);
        }
 
@@ -676,12 +705,12 @@ static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
        u32 msr;
 
        /* Clear START or STOP immediately, except for REPSTART after read */
-       if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
+       if (likely(!(priv->flags & ID_REP_AFTER_RD)))
                rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
 
        /* Only handle interrupts that are currently enabled */
        msr = rcar_i2c_read(priv, ICMSR);
-       if (!priv->atomic_xfer)
+       if (priv->flags & ID_P_NOT_ATOMIC)
                msr &= rcar_i2c_read(priv, ICMIER);
 
        return rcar_i2c_irq(irq, priv, msr);
@@ -694,14 +723,14 @@ static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
 
        /* Only handle interrupts that are currently enabled */
        msr = rcar_i2c_read(priv, ICMSR);
-       if (!priv->atomic_xfer)
+       if (priv->flags & ID_P_NOT_ATOMIC)
                msr &= rcar_i2c_read(priv, ICMIER);
 
        /*
         * Clear START or STOP immediately, except for REPSTART after read or
         * if a spurious interrupt was detected.
         */
-       if (likely(!(priv->flags & ID_P_REP_AFTER_RD) && msr))
+       if (likely(!(priv->flags & ID_REP_AFTER_RD) && msr))
                rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
 
        return rcar_i2c_irq(irq, priv, msr);
@@ -803,7 +832,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
        int i, ret;
        long time_left;
 
-       priv->atomic_xfer = false;
+       priv->flags |= ID_P_NOT_ATOMIC;
 
        pm_runtime_get_sync(dev);
 
@@ -827,11 +856,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
        for (i = 0; i < num; i++)
                rcar_i2c_request_dma(priv, msgs + i);
 
-       /* init first message */
-       priv->msg = msgs;
-       priv->msgs_left = num;
-       priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG;
-       rcar_i2c_prepare_msg(priv);
+       rcar_i2c_first_msg(priv, msgs, num);
 
        time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE,
                                     num * adap->timeout);
@@ -847,6 +872,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
                ret = -ENXIO;
        } else if (priv->flags & ID_ARBLOST) {
                ret = -EAGAIN;
+       } else if (priv->flags & ID_EPROTO) {
+               ret = -EPROTO;
        } else {
                ret = num - priv->msgs_left; /* The number of transfer */
        }
@@ -869,7 +896,7 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
        bool time_left;
        int ret;
 
-       priv->atomic_xfer = true;
+       priv->flags &= ~ID_P_NOT_ATOMIC;
 
        pm_runtime_get_sync(dev);
 
@@ -879,12 +906,7 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
                goto out;
 
        rcar_i2c_init(priv);
-
-       /* init first message */
-       priv->msg = msgs;
-       priv->msgs_left = num;
-       priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG;
-       rcar_i2c_prepare_msg(priv);
+       rcar_i2c_first_msg(priv, msgs, num);
 
        j = jiffies + num * adap->timeout;
        do {
@@ -909,6 +931,8 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap,
                ret = -ENXIO;
        } else if (priv->flags & ID_ARBLOST) {
                ret = -EAGAIN;
+       } else if (priv->flags & ID_EPROTO) {
+               ret = -EPROTO;
        } else {
                ret = num - priv->msgs_left; /* The number of transfer */
        }
@@ -975,7 +999,7 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap)
         * I2C_M_IGNORE_NAK (automatically sends STOP after NAK)
         */
        u32 func = I2C_FUNC_I2C | I2C_FUNC_SLAVE |
-                  (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+                  (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK);
 
        if (priv->flags & ID_P_HOST_NOTIFY)
                func |= I2C_FUNC_SMBUS_HOST_NOTIFY;
@@ -1063,8 +1087,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        pm_runtime_enable(dev);
        pm_runtime_get_sync(dev);
        ret = rcar_i2c_clock_calculate(priv);
-       if (ret < 0)
-               goto out_pm_put;
+       if (ret < 0) {
+               pm_runtime_put(dev);
+               goto out_pm_disable;
+       }
 
        rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
 
@@ -1093,19 +1119,19 @@ static int rcar_i2c_probe(struct platform_device *pdev)
 
        ret = platform_get_irq(pdev, 0);
        if (ret < 0)
-               goto out_pm_disable;
+               goto out_pm_put;
        priv->irq = ret;
        ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
        if (ret < 0) {
                dev_err(dev, "cannot get irq %d\n", priv->irq);
-               goto out_pm_disable;
+               goto out_pm_put;
        }
 
        platform_set_drvdata(pdev, priv);
 
        ret = i2c_add_numbered_adapter(adap);
        if (ret < 0)
-               goto out_pm_disable;
+               goto out_pm_put;
 
        if (priv->flags & ID_P_HOST_NOTIFY) {
                priv->host_notify_client = i2c_new_slave_host_notify_device(adap);
@@ -1122,7 +1148,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
  out_del_device:
        i2c_del_adapter(&priv->adap);
  out_pm_put:
-       pm_runtime_put(dev);
+       if (priv->flags & ID_P_PM_BLOCKED)
+               pm_runtime_put(dev);
  out_pm_disable:
        pm_runtime_disable(dev);
        return ret;
index ffefe3c..9a1c3f8 100644 (file)
@@ -78,24 +78,23 @@ struct xiic_i2c {
        bool singlemaster;
 };
 
-
 #define XIIC_MSB_OFFSET 0
-#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
+#define XIIC_REG_OFFSET (0x100 + XIIC_MSB_OFFSET)
 
 /*
  * Register offsets in bytes from RegisterBase. Three is added to the
  * base offset to access LSB (IBM style) of the word
  */
-#define XIIC_CR_REG_OFFSET   (0x00+XIIC_REG_OFFSET)    /* Control Register   */
-#define XIIC_SR_REG_OFFSET   (0x04+XIIC_REG_OFFSET)    /* Status Register    */
-#define XIIC_DTR_REG_OFFSET  (0x08+XIIC_REG_OFFSET)    /* Data Tx Register   */
-#define XIIC_DRR_REG_OFFSET  (0x0C+XIIC_REG_OFFSET)    /* Data Rx Register   */
-#define XIIC_ADR_REG_OFFSET  (0x10+XIIC_REG_OFFSET)    /* Address Register   */
-#define XIIC_TFO_REG_OFFSET  (0x14+XIIC_REG_OFFSET)    /* Tx FIFO Occupancy  */
-#define XIIC_RFO_REG_OFFSET  (0x18+XIIC_REG_OFFSET)    /* Rx FIFO Occupancy  */
-#define XIIC_TBA_REG_OFFSET  (0x1C+XIIC_REG_OFFSET)    /* 10 Bit Address reg */
-#define XIIC_RFD_REG_OFFSET  (0x20+XIIC_REG_OFFSET)    /* Rx FIFO Depth reg  */
-#define XIIC_GPO_REG_OFFSET  (0x24+XIIC_REG_OFFSET)    /* Output Register    */
+#define XIIC_CR_REG_OFFSET   (0x00 + XIIC_REG_OFFSET)  /* Control Register   */
+#define XIIC_SR_REG_OFFSET   (0x04 + XIIC_REG_OFFSET)  /* Status Register    */
+#define XIIC_DTR_REG_OFFSET  (0x08 + XIIC_REG_OFFSET)  /* Data Tx Register   */
+#define XIIC_DRR_REG_OFFSET  (0x0C + XIIC_REG_OFFSET)  /* Data Rx Register   */
+#define XIIC_ADR_REG_OFFSET  (0x10 + XIIC_REG_OFFSET)  /* Address Register   */
+#define XIIC_TFO_REG_OFFSET  (0x14 + XIIC_REG_OFFSET)  /* Tx FIFO Occupancy  */
+#define XIIC_RFO_REG_OFFSET  (0x18 + XIIC_REG_OFFSET)  /* Rx FIFO Occupancy  */
+#define XIIC_TBA_REG_OFFSET  (0x1C + XIIC_REG_OFFSET)  /* 10 Bit Address reg */
+#define XIIC_RFD_REG_OFFSET  (0x20 + XIIC_REG_OFFSET)  /* Rx FIFO Depth reg  */
+#define XIIC_GPO_REG_OFFSET  (0x24 + XIIC_REG_OFFSET)  /* Output Register    */
 
 /* Control Register masks */
 #define XIIC_CR_ENABLE_DEVICE_MASK        0x01 /* Device enable = 1      */
@@ -233,18 +232,21 @@ static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
 static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
 {
        u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+
        xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
 }
 
 static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
 {
        u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+
        xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
 }
 
 static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
 {
        u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+
        xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
 }
 
@@ -355,7 +357,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
 
        while (len--) {
                u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
-               if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
+
+               if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) {
                        /* last message in transfer -> STOP */
                        data |= XIIC_TX_DYN_STOP_MASK;
                        dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
@@ -381,6 +384,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
        int xfer_more = 0;
        int wakeup_req = 0;
        int wakeup_code = 0;
+       int ret;
 
        /* Get the interrupt Status from the IPIF. There is no clearing of
         * interrupts in the IPIF. Interrupts must be cleared at the source.
@@ -401,8 +405,8 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
 
        /* Service requesting interrupt */
        if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
-               ((pend & XIIC_INTR_TX_ERROR_MASK) &&
-               !(pend & XIIC_INTR_RX_FULL_MASK))) {
+           ((pend & XIIC_INTR_TX_ERROR_MASK) &&
+           !(pend & XIIC_INTR_RX_FULL_MASK))) {
                /* bus arbritration lost, or...
                 * Transmit error _OR_ RX completed
                 * if this happens when RX_FULL is not set
@@ -415,7 +419,9 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
                 * fifos and the next message is a TX with len 0 (only addr)
                 * reset the IP instead of just flush fifos
                 */
-               xiic_reinit(i2c);
+               ret = xiic_reinit(i2c);
+               if (!ret)
+                       dev_dbg(i2c->adap.dev.parent, "reinit failed\n");
 
                if (i2c->rx_msg) {
                        wakeup_req = 1;
@@ -462,24 +468,6 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
                        }
                }
        }
-       if (pend & XIIC_INTR_BNB_MASK) {
-               /* IIC bus has transitioned to not busy */
-               clr |= XIIC_INTR_BNB_MASK;
-
-               /* The bus is not busy, disable BusNotBusy interrupt */
-               xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
-
-               if (!i2c->tx_msg)
-                       goto out;
-
-               wakeup_req = 1;
-
-               if (i2c->nmsgs == 1 && !i2c->rx_msg &&
-                   xiic_tx_space(i2c) == 0)
-                       wakeup_code = STATE_DONE;
-               else
-                       wakeup_code = STATE_ERROR;
-       }
        if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
                /* Transmit register/FIFO is empty or ½ empty */
 
@@ -516,6 +504,26 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
                         */
                        xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
        }
+
+       if (pend & XIIC_INTR_BNB_MASK) {
+               /* IIC bus has transitioned to not busy */
+               clr |= XIIC_INTR_BNB_MASK;
+
+               /* The bus is not busy, disable BusNotBusy interrupt */
+               xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
+
+               if (!i2c->tx_msg)
+                       goto out;
+
+               wakeup_req = 1;
+
+               if (i2c->nmsgs == 1 && !i2c->rx_msg &&
+                   xiic_tx_space(i2c) == 0)
+                       wakeup_code = STATE_DONE;
+               else
+                       wakeup_code = STATE_ERROR;
+       }
+
 out:
        dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
 
@@ -570,7 +578,7 @@ static int xiic_busy(struct xiic_i2c *i2c)
 
 static void xiic_start_recv(struct xiic_i2c *i2c)
 {
-       u8 rx_watermark;
+       u16 rx_watermark;
        struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
 
        /* Clear and enable Rx full interrupt. */
@@ -585,7 +593,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
        rx_watermark = msg->len;
        if (rx_watermark > IIC_RX_FIFO_DEPTH)
                rx_watermark = IIC_RX_FIFO_DEPTH;
-       xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
+       xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, (u8)(rx_watermark - 1));
 
        if (!(msg->flags & I2C_M_NOSTART))
                /* write the address */
@@ -638,6 +646,7 @@ static void xiic_start_send(struct xiic_i2c *i2c)
 static void __xiic_start_xfer(struct xiic_i2c *i2c)
 {
        int fifo_space = xiic_tx_fifo_space(i2c);
+
        dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
                __func__, i2c->tx_msg, fifo_space);
 
@@ -739,7 +748,6 @@ static const struct i2c_adapter xiic_adapter = {
        .quirks = &xiic_quirks,
 };
 
-
 static int xiic_i2c_probe(struct platform_device *pdev)
 {
        struct xiic_i2c *i2c;
@@ -899,6 +907,7 @@ static const struct dev_pm_ops xiic_dev_pm_ops = {
        SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
                           xiic_i2c_runtime_resume, NULL)
 };
+
 static struct platform_driver xiic_i2c_driver = {
        .probe   = xiic_i2c_probe,
        .remove  = xiic_i2c_remove,
@@ -914,4 +923,3 @@ module_platform_driver(xiic_i2c_driver);
 MODULE_AUTHOR("info@mocean-labs.com");
 MODULE_DESCRIPTION("Xilinx I2C bus driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:"DRIVER_NAME);
index 8c01123..6aef5ce 100644 (file)
@@ -768,13 +768,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
 static int i3c_hci_remove(struct platform_device *pdev)
 {
        struct i3c_hci *hci = platform_get_drvdata(pdev);
-       int ret;
 
-       ret = i3c_master_unregister(&hci->master);
-       if (ret)
-               return ret;
-
-       return 0;
+       return i3c_master_unregister(&hci->master);
 }
 
 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
index 7550dad..d6e9ed7 100644 (file)
@@ -1597,12 +1597,11 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
 {
        struct svc_i3c_master *master = dev_get_drvdata(dev);
-       int ret = 0;
 
        pinctrl_pm_select_default_state(dev);
        svc_i3c_master_prepare_clks(master);
 
-       return ret;
+       return 0;
 }
 
 static const struct dev_pm_ops svc_i3c_pm_ops = {
index eac3f02..b53f010 100644 (file)
@@ -290,7 +290,6 @@ config DA311
 
 config DMARD06
        tristate "Domintech DMARD06 Digital Accelerometer Driver"
-       depends on OF || COMPILE_TEST
        depends on I2C
        help
          Say yes here to build support for the Domintech low-g tri-axial
index e9c10c8..7561399 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/units.h>
 
@@ -745,10 +745,7 @@ int adxl355_core_probe(struct device *dev, struct regmap *regmap,
                return ret;
        }
 
-       /*
-        * TODO: Would be good to move it to the generic version.
-        */
-       irq = of_irq_get_byname(dev->of_node, "DRDY");
+       irq = fwnode_irq_get_byname(dev_fwnode(dev), "DRDY");
        if (irq > 0) {
                ret = adxl355_probe_trigger(indio_dev, irq);
                if (ret)
index 6296013..0289ed8 100644 (file)
@@ -1567,7 +1567,6 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
                return ret;
 
        ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev,
-                                             INDIO_BUFFER_SOFTWARE,
                                              &adxl367_buffer_ops,
                                              adxl367_fifo_attributes);
        if (ret)
index 7516d7d..57e8a83 100644 (file)
@@ -1525,7 +1525,7 @@ static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev)
        struct bmc150_accel_data *data = iio_priv(indio_dev);
        int ret = 0;
 
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+       if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
                return 0;
 
        mutex_lock(&data->mutex);
@@ -1557,7 +1557,7 @@ static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev)
 {
        struct bmc150_accel_data *data = iio_priv(indio_dev);
 
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+       if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
                return 0;
 
        mutex_lock(&data->mutex);
index 53ab607..cb0246c 100644 (file)
@@ -24,7 +24,7 @@
 #define DMARD09_AXIS_Y 1
 #define DMARD09_AXIS_Z 2
 #define DMARD09_AXIS_X_OFFSET ((DMARD09_AXIS_X + 1) * 2)
-#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1 )* 2)
+#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1* 2)
 #define DMARD09_AXIS_Z_OFFSET ((DMARD09_AXIS_Z + 1) * 2)
 
 struct dmard09_data {
index a9d2f10..8874d6d 100644 (file)
@@ -1217,7 +1217,6 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
                        return ret;
 
                ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
-                                                 INDIO_BUFFER_SOFTWARE,
                                                  &fxls8962af_buffer_ops);
                if (ret)
                        return ret;
index ec17e35..b7b5af4 100644 (file)
@@ -44,8 +44,8 @@ static const struct spi_device_id kxsd9_spi_id[] = {
 MODULE_DEVICE_TABLE(spi, kxsd9_spi_id);
 
 static const struct of_device_id kxsd9_of_match[] = {
-        { .compatible = "kionix,kxsd9" },
-        { },
+       { .compatible = "kionix,kxsd9" },
+       { }
 };
 MODULE_DEVICE_TABLE(of, kxsd9_of_match);
 
index 9c02c68..912a447 100644 (file)
@@ -166,6 +166,7 @@ static const struct mma8452_event_regs trans_ev_regs = {
 
 /**
  * struct mma_chip_info - chip specific data
+ * @name:                      part number of device reported via 'name' attr
  * @chip_id:                   WHO_AM_I register's value
  * @channels:                  struct iio_chan_spec matching the device's
  *                             capabilities
index 83c8107..29a68a7 100644 (file)
@@ -1474,7 +1474,6 @@ static int sca3000_probe(struct spi_device *spi)
        indio_dev->modes = INDIO_DIRECT_MODE;
 
        ret = devm_iio_kfifo_buffer_setup(&spi->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &sca3000_ring_setup_ops);
        if (ret)
                return ret;
index a1164b4..7ca9d0d 100644 (file)
@@ -113,7 +113,6 @@ static int ssp_accel_probe(struct platform_device *pdev)
        indio_dev->available_scan_masks = ssp_accel_scan_mask;
 
        ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &ssp_accel_buffer_ops);
        if (ret)
                return ret;
index 00e056c..5b0f54e 100644 (file)
 #include <linux/types.h>
 #include <linux/iio/common/st_sensors.h>
 
-enum st_accel_type {
-       LSM303DLH,
-       LSM303DLHC,
-       LIS3DH,
-       LSM330D,
-       LSM330DL,
-       LSM330DLC,
-       LIS331DLH,
-       LSM303DL,
-       LSM303DLM,
-       LSM330,
-       LSM303AGR,
-       LIS2DH12,
-       LIS3L02DQ,
-       LNG2DM,
-       H3LIS331DL,
-       LIS331DL,
-       LIS3LV02DL,
-       LIS2DW12,
-       LIS3DHH,
-       LIS2DE12,
-       LIS2HH12,
-       SC7A20,
-       ST_ACCEL_MAX,
-};
-
 #define H3LIS331DL_ACCEL_DEV_NAME      "h3lis331dl_accel"
 #define LIS3LV02DL_ACCEL_DEV_NAME      "lis3lv02dl_accel"
 #define LSM303DLHC_ACCEL_DEV_NAME      "lsm303dlhc_accel"
@@ -62,8 +36,10 @@ enum st_accel_type {
 #define LIS3DE_ACCEL_DEV_NAME          "lis3de"
 #define LIS2DE12_ACCEL_DEV_NAME                "lis2de12"
 #define LIS2HH12_ACCEL_DEV_NAME                "lis2hh12"
+#define LIS302DL_ACCEL_DEV_NAME                "lis302dl"
 #define SC7A20_ACCEL_DEV_NAME          "sc7a20"
 
+
 #ifdef CONFIG_IIO_BUFFER
 int st_accel_allocate_ring(struct iio_dev *indio_dev);
 int st_accel_trig_set_state(struct iio_trigger *trig, bool state);
index 5c5da6f..c8c8eb1 100644 (file)
@@ -444,6 +444,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
                .sensors_supported = {
                        [0] = LIS331DL_ACCEL_DEV_NAME,
+                       [1] = LIS302DL_ACCEL_DEV_NAME,
                },
                .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
                .odr = {
@@ -1209,28 +1210,21 @@ read_error:
 static int st_accel_write_raw(struct iio_dev *indio_dev,
                struct iio_chan_spec const *chan, int val, int val2, long mask)
 {
-       int err;
-
        switch (mask) {
        case IIO_CHAN_INFO_SCALE: {
                int gain;
 
                gain = val * 1000000 + val2;
-               err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
-               break;
+               return st_sensors_set_fullscale_by_gain(indio_dev, gain);
        }
        case IIO_CHAN_INFO_SAMP_FREQ:
                if (val2)
                        return -EINVAL;
-               mutex_lock(&indio_dev->mlock);
-               err = st_sensors_set_odr(indio_dev, val);
-               mutex_unlock(&indio_dev->mlock);
-               return err;
+
+               return st_sensors_set_odr(indio_dev, val);
        default:
                return -EINVAL;
        }
-
-       return err;
 }
 
 static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
index 96adc43..45ee0dd 100644 (file)
@@ -108,6 +108,10 @@ static const struct of_device_id st_accel_of_match[] = {
                .data = LIS2HH12_ACCEL_DEV_NAME,
        },
        {
+               .compatible = "st,lis302dl",
+               .data = LIS302DL_ACCEL_DEV_NAME,
+       },
+       {
                .compatible = "silan,sc7a20",
                .data = SC7A20_ACCEL_DEV_NAME,
        },
@@ -146,6 +150,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
        { LIS3DE_ACCEL_DEV_NAME },
        { LIS2DE12_ACCEL_DEV_NAME },
        { LIS2HH12_ACCEL_DEV_NAME },
+       { LIS302DL_ACCEL_DEV_NAME },
        { SC7A20_ACCEL_DEV_NAME },
        {},
 };
index 108b63d..6c09177 100644 (file)
@@ -92,6 +92,10 @@ static const struct of_device_id st_accel_of_match[] = {
                .compatible = "st,lis3de",
                .data = LIS3DE_ACCEL_DEV_NAME,
        },
+       {
+               .compatible = "st,lis302dl",
+               .data = LIS302DL_ACCEL_DEV_NAME,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -147,6 +151,7 @@ static const struct spi_device_id st_accel_id_table[] = {
        { LIS2DW12_ACCEL_DEV_NAME },
        { LIS3DHH_ACCEL_DEV_NAME },
        { LIS3DE_ACCEL_DEV_NAME },
+       { LIS302DL_ACCEL_DEV_NAME },
        {},
 };
 MODULE_DEVICE_TABLE(spi, st_accel_id_table);
index 71ab0a0..48ace74 100644 (file)
@@ -910,7 +910,7 @@ config ROCKCHIP_SARADC
 
 config RZG2L_ADC
        tristate "Renesas RZ/G2L ADC driver"
-       depends on ARCH_R9A07G044 || COMPILE_TEST
+       depends on ARCH_RZG2L || COMPILE_TEST
        help
          Say yes here to build support for the ADC found in Renesas
          RZ/G2L family.
index c47ead1..c5b785d 100644 (file)
@@ -43,6 +43,8 @@
 #define AD7124_STATUS_POR_FLAG_MSK     BIT(4)
 
 /* AD7124_ADC_CONTROL */
+#define AD7124_ADC_STATUS_EN_MSK       BIT(10)
+#define AD7124_ADC_STATUS_EN(x)                FIELD_PREP(AD7124_ADC_STATUS_EN_MSK, x)
 #define AD7124_ADC_CTRL_REF_EN_MSK     BIT(8)
 #define AD7124_ADC_CTRL_REF_EN(x)      FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x)
 #define AD7124_ADC_CTRL_PWR_MSK        GENMASK(7, 6)
@@ -188,7 +190,6 @@ static const struct iio_chan_spec ad7124_channel_template = {
                .sign = 'u',
                .realbits = 24,
                .storagebits = 32,
-               .shift = 8,
                .endianness = IIO_BE,
        },
 };
@@ -501,26 +502,70 @@ static int ad7124_prepare_read(struct ad7124_state *st, int address)
        return ad7124_enable_channel(st, &st->channels[address]);
 }
 
+static int __ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
+{
+       struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+
+       return ad7124_prepare_read(st, channel);
+}
+
 static int ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
 {
        struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
        int ret;
 
        mutex_lock(&st->cfgs_lock);
-       ret = ad7124_prepare_read(st, channel);
+       ret = __ad7124_set_channel(sd, channel);
        mutex_unlock(&st->cfgs_lock);
 
        return ret;
 }
 
+static int ad7124_append_status(struct ad_sigma_delta *sd, bool append)
+{
+       struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+       unsigned int adc_control = st->adc_control;
+       int ret;
+
+       adc_control &= ~AD7124_ADC_STATUS_EN_MSK;
+       adc_control |= AD7124_ADC_STATUS_EN(append);
+
+       ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, adc_control);
+       if (ret < 0)
+               return ret;
+
+       st->adc_control = adc_control;
+
+       return 0;
+}
+
+static int ad7124_disable_all(struct ad_sigma_delta *sd)
+{
+       struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+       int ret;
+       int i;
+
+       for (i = 0; i < st->num_channels; i++) {
+               ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, 0, 2);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
        .set_channel = ad7124_set_channel,
+       .append_status = ad7124_append_status,
+       .disable_all = ad7124_disable_all,
        .set_mode = ad7124_set_mode,
        .has_registers = true,
        .addr_shift = 0,
        .read_mask = BIT(6),
+       .status_ch_mask = GENMASK(3, 0),
        .data_reg = AD7124_DATA,
-       .irq_flags = IRQF_TRIGGER_FALLING
+       .num_slots = 8,
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 static int ad7124_read_raw(struct iio_dev *indio_dev,
@@ -670,11 +715,40 @@ static const struct attribute_group ad7124_attrs_group = {
        .attrs = ad7124_attributes,
 };
 
+static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
+                                  const unsigned long *scan_mask)
+{
+       struct ad7124_state *st = iio_priv(indio_dev);
+       bool bit_set;
+       int ret;
+       int i;
+
+       mutex_lock(&st->cfgs_lock);
+       for (i = 0; i < st->num_channels; i++) {
+               bit_set = test_bit(i, scan_mask);
+               if (bit_set)
+                       ret = __ad7124_set_channel(&st->sd, i);
+               else
+                       ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK,
+                                                   0, 2);
+               if (ret < 0) {
+                       mutex_unlock(&st->cfgs_lock);
+
+                       return ret;
+               }
+       }
+
+       mutex_unlock(&st->cfgs_lock);
+
+       return 0;
+}
+
 static const struct iio_info ad7124_info = {
        .read_raw = ad7124_read_raw,
        .write_raw = ad7124_write_raw,
        .debugfs_reg_access = &ad7124_reg_access,
        .validate_trigger = ad_sd_validate_trigger,
+       .update_scan_mode = ad7124_update_scan_mode,
        .attrs = &ad7124_attrs_group,
 };
 
@@ -886,12 +960,14 @@ static int ad7124_probe(struct spi_device *spi)
 
        st->chip_info = info;
 
-       ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info);
-
        indio_dev->name = st->chip_info->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &ad7124_info;
 
+       ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info);
+       if (ret < 0)
+               return ret;
+
        ret = ad7124_of_parse_channel_config(indio_dev, spi->dev.of_node);
        if (ret < 0)
                return ret;
index 770b4e5..d71977b 100644 (file)
@@ -58,7 +58,8 @@
 /* Mode Register Bit Designations (AD7192_REG_MODE) */
 #define AD7192_MODE_SEL(x)     (((x) & 0x7) << 21) /* Operation Mode Select */
 #define AD7192_MODE_SEL_MASK   (0x7 << 21) /* Operation Mode Select Mask */
-#define AD7192_MODE_DAT_STA    BIT(20) /* Status Register transmission */
+#define AD7192_MODE_STA(x)     (((x) & 0x1) << 20) /* Status Register transmission */
+#define AD7192_MODE_STA_MASK   BIT(20) /* Status Register transmission Mask */
 #define AD7192_MODE_CLKSRC(x)  (((x) & 0x3) << 18) /* Clock Source Select */
 #define AD7192_MODE_SINC3      BIT(15) /* SINC3 Filter Select */
 #define AD7192_MODE_ACX                BIT(14) /* AC excitation enable(AD7195 only)*/
@@ -225,7 +226,7 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
        bool sys_calib;
        int ret, temp;
 
-       ret = strtobool(buf, &sys_calib);
+       ret = kstrtobool(buf, &sys_calib);
        if (ret)
                return ret;
 
@@ -288,12 +289,51 @@ static int ad7192_set_mode(struct ad_sigma_delta *sd,
        return ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
 }
 
+static int ad7192_append_status(struct ad_sigma_delta *sd, bool append)
+{
+       struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
+       unsigned int mode = st->mode;
+       int ret;
+
+       mode &= ~AD7192_MODE_STA_MASK;
+       mode |= AD7192_MODE_STA(append);
+
+       ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, mode);
+       if (ret < 0)
+               return ret;
+
+       st->mode = mode;
+
+       return 0;
+}
+
+static int ad7192_disable_all(struct ad_sigma_delta *sd)
+{
+       struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd);
+       u32 conf = st->conf;
+       int ret;
+
+       conf &= ~AD7192_CONF_CHAN_MASK;
+
+       ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
+       if (ret < 0)
+               return ret;
+
+       st->conf = conf;
+
+       return 0;
+}
+
 static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
        .set_channel = ad7192_set_channel,
+       .append_status = ad7192_append_status,
+       .disable_all = ad7192_disable_all,
        .set_mode = ad7192_set_mode,
        .has_registers = true,
        .addr_shift = 3,
        .read_mask = BIT(6),
+       .status_ch_mask = GENMASK(3, 0),
+       .num_slots = 4,
        .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
@@ -457,7 +497,7 @@ static ssize_t ad7192_set(struct device *dev,
        int ret;
        bool val;
 
-       ret = strtobool(buf, &val);
+       ret = kstrtobool(buf, &val);
        if (ret < 0)
                return ret;
 
@@ -783,6 +823,26 @@ static int ad7192_read_avail(struct iio_dev *indio_dev,
        return -EINVAL;
 }
 
+static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask)
+{
+       struct ad7192_state *st = iio_priv(indio_dev);
+       u32 conf = st->conf;
+       int ret;
+       int i;
+
+       conf &= ~AD7192_CONF_CHAN_MASK;
+       for_each_set_bit(i, scan_mask, 8)
+               conf |= AD7192_CONF_CHAN(i);
+
+       ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
+       if (ret < 0)
+               return ret;
+
+       st->conf = conf;
+
+       return 0;
+}
+
 static const struct iio_info ad7192_info = {
        .read_raw = ad7192_read_raw,
        .write_raw = ad7192_write_raw,
@@ -790,6 +850,7 @@ static const struct iio_info ad7192_info = {
        .read_avail = ad7192_read_avail,
        .attrs = &ad7192_attribute_group,
        .validate_trigger = ad_sd_validate_trigger,
+       .update_scan_mode = ad7192_update_scan_mode,
 };
 
 static const struct iio_info ad7195_info = {
@@ -799,6 +860,7 @@ static const struct iio_info ad7195_info = {
        .read_avail = ad7192_read_avail,
        .attrs = &ad7195_attribute_group,
        .validate_trigger = ad_sd_validate_trigger,
+       .update_scan_mode = ad7192_update_scan_mode,
 };
 
 #define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _extend_name, \
index c17d9b5..f20d39f 100644 (file)
@@ -378,6 +378,11 @@ static const char * const ad7266_gpio_labels[] = {
        "ad0", "ad1", "ad2",
 };
 
+static void ad7266_reg_disable(void *reg)
+{
+       regulator_disable(reg);
+}
+
 static int ad7266_probe(struct spi_device *spi)
 {
        struct ad7266_platform_data *pdata = spi->dev.platform_data;
@@ -398,9 +403,13 @@ static int ad7266_probe(struct spi_device *spi)
                if (ret)
                        return ret;
 
+               ret = devm_add_action_or_reset(&spi->dev, ad7266_reg_disable, st->reg);
+               if (ret)
+                       return ret;
+
                ret = regulator_get_voltage(st->reg);
                if (ret < 0)
-                       goto error_disable_reg;
+                       return ret;
 
                st->vref_mv = ret / 1000;
        } else {
@@ -423,7 +432,7 @@ static int ad7266_probe(struct spi_device *spi)
                                                      GPIOD_OUT_LOW);
                                if (IS_ERR(st->gpios[i])) {
                                        ret = PTR_ERR(st->gpios[i]);
-                                       goto error_disable_reg;
+                                       return ret;
                                }
                        }
                }
@@ -433,7 +442,6 @@ static int ad7266_probe(struct spi_device *spi)
                st->mode = AD7266_MODE_DIFF;
        }
 
-       spi_set_drvdata(spi, indio_dev);
        st->spi = spi;
 
        indio_dev->name = spi_get_device_id(spi)->name;
@@ -459,35 +467,12 @@ static int ad7266_probe(struct spi_device *spi)
        spi_message_add_tail(&st->single_xfer[1], &st->single_msg);
        spi_message_add_tail(&st->single_xfer[2], &st->single_msg);
 
-       ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, &iio_pollfunc_store_time,
                &ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
        if (ret)
-               goto error_disable_reg;
-
-       ret = iio_device_register(indio_dev);
-       if (ret)
-               goto error_buffer_cleanup;
-
-       return 0;
-
-error_buffer_cleanup:
-       iio_triggered_buffer_cleanup(indio_dev);
-error_disable_reg:
-       if (!IS_ERR(st->reg))
-               regulator_disable(st->reg);
-
-       return ret;
-}
-
-static void ad7266_remove(struct spi_device *spi)
-{
-       struct iio_dev *indio_dev = spi_get_drvdata(spi);
-       struct ad7266_state *st = iio_priv(indio_dev);
+               return ret;
 
-       iio_device_unregister(indio_dev);
-       iio_triggered_buffer_cleanup(indio_dev);
-       if (!IS_ERR(st->reg))
-               regulator_disable(st->reg);
+       return devm_iio_device_register(&spi->dev, indio_dev);
 }
 
 static const struct spi_device_id ad7266_id[] = {
@@ -502,7 +487,6 @@ static struct spi_driver ad7266_driver = {
                .name   = "ad7266",
        },
        .probe          = ad7266_probe,
-       .remove         = ad7266_remove,
        .id_table       = ad7266_id,
 };
 module_spi_driver(ad7266_driver);
index ec9acbf..3bdf3d9 100644 (file)
@@ -488,7 +488,7 @@ static ssize_t ad7280_store_balance_sw(struct iio_dev *indio_dev,
        bool readin;
        int ret;
 
-       ret = strtobool(buf, &readin);
+       ret = kstrtobool(buf, &readin);
        if (ret)
                return ret;
 
index ebcd525..261a9a6 100644 (file)
@@ -6,6 +6,7 @@
  *  Author: Lars-Peter Clausen <lars@metafoo.de>
  */
 
+#include <linux/align.h>
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
@@ -342,15 +343,49 @@ EXPORT_SYMBOL_NS_GPL(ad_sigma_delta_single_conversion, IIO_AD_SIGMA_DELTA);
 static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
 {
        struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+       unsigned int i, slot, samples_buf_size;
        unsigned int channel;
+       uint8_t *samples_buf;
        int ret;
 
-       channel = find_first_bit(indio_dev->active_scan_mask,
-                                indio_dev->masklength);
-       ret = ad_sigma_delta_set_channel(sigma_delta,
-               indio_dev->channels[channel].address);
-       if (ret)
-               return ret;
+       if (sigma_delta->num_slots == 1) {
+               channel = find_first_bit(indio_dev->active_scan_mask,
+                                        indio_dev->masklength);
+               ret = ad_sigma_delta_set_channel(sigma_delta,
+                                                indio_dev->channels[channel].address);
+               if (ret)
+                       return ret;
+               slot = 1;
+       } else {
+               /*
+                * At this point update_scan_mode already enabled the required channels.
+                * For sigma-delta sequencer drivers with multiple slots, an update_scan_mode
+                * implementation is mandatory.
+                */
+               slot = 0;
+               for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) {
+                       sigma_delta->slots[slot] = indio_dev->channels[i].address;
+                       slot++;
+               }
+       }
+
+       sigma_delta->active_slots = slot;
+       sigma_delta->current_slot = 0;
+
+       if (sigma_delta->active_slots > 1) {
+               ret = ad_sigma_delta_append_status(sigma_delta, true);
+               if (ret)
+                       return ret;
+       }
+
+       samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits, 8);
+       samples_buf_size += sizeof(int64_t);
+       samples_buf = devm_krealloc(&sigma_delta->spi->dev, sigma_delta->samples_buf,
+                                   samples_buf_size, GFP_KERNEL);
+       if (!samples_buf)
+               return -ENOMEM;
+
+       sigma_delta->samples_buf = samples_buf;
 
        spi_bus_lock(sigma_delta->spi->master);
        sigma_delta->bus_locked = true;
@@ -386,6 +421,10 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
        sigma_delta->keep_cs_asserted = false;
        ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
 
+       if (sigma_delta->status_appended)
+               ad_sigma_delta_append_status(sigma_delta, false);
+
+       ad_sigma_delta_disable_all(sigma_delta);
        sigma_delta->bus_locked = false;
        return spi_bus_unlock(sigma_delta->spi->master);
 }
@@ -396,6 +435,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
        struct iio_dev *indio_dev = pf->indio_dev;
        struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
        uint8_t *data = sigma_delta->rx_buf;
+       unsigned int transfer_size;
+       unsigned int sample_size;
+       unsigned int sample_pos;
+       unsigned int status_pos;
        unsigned int reg_size;
        unsigned int data_reg;
 
@@ -408,21 +451,69 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
        else
                data_reg = AD_SD_REG_DATA;
 
+       /* Status word will be appended to the sample during transfer */
+       if (sigma_delta->status_appended)
+               transfer_size = reg_size + 1;
+       else
+               transfer_size = reg_size;
+
        switch (reg_size) {
        case 4:
        case 2:
        case 1:
-               ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[0]);
+               status_pos = reg_size;
+               ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[0]);
                break;
        case 3:
+               /*
+                * Data array after transfer will look like (if status is appended):
+                * data[] = { [0][sample][sample][sample][status] }
+                * Keeping the first byte 0 shifts the status postion by 1 byte to the right.
+                */
+               status_pos = reg_size + 1;
+
                /* We store 24 bit samples in a 32 bit word. Keep the upper
                 * byte set to zero. */
-               ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[1]);
+               ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]);
                break;
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+       /*
+        * For devices sampling only one channel at
+        * once, there is no need for sample number tracking.
+        */
+       if (sigma_delta->active_slots == 1) {
+               iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+               goto irq_handled;
+       }
+
+       if (sigma_delta->status_appended) {
+               u8 converted_channel;
+
+               converted_channel = data[status_pos] & sigma_delta->info->status_ch_mask;
+               if (converted_channel != sigma_delta->slots[sigma_delta->current_slot]) {
+                       /*
+                        * Desync occurred during continuous sampling of multiple channels.
+                        * Drop this incomplete sample and start from first channel again.
+                        */
+
+                       sigma_delta->current_slot = 0;
+                       goto irq_handled;
+               }
+       }
+
+       sample_size = indio_dev->channels[0].scan_type.storagebits / 8;
+       sample_pos = sample_size * sigma_delta->current_slot;
+       memcpy(&sigma_delta->samples_buf[sample_pos], data, sample_size);
+       sigma_delta->current_slot++;
 
+       if (sigma_delta->current_slot == sigma_delta->active_slots) {
+               sigma_delta->current_slot = 0;
+               iio_push_to_buffers_with_timestamp(indio_dev, sigma_delta->samples_buf,
+                                                  pf->timestamp);
+       }
+
+irq_handled:
        iio_trigger_notify_done(indio_dev->trig);
        sigma_delta->irq_dis = false;
        enable_irq(sigma_delta->spi->irq);
@@ -430,10 +521,17 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
        return IRQ_HANDLED;
 }
 
+static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned long *mask)
+{
+       struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+
+       return bitmap_weight(mask, indio_dev->masklength) <= sigma_delta->num_slots;
+}
+
 static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
        .postenable = &ad_sd_buffer_postenable,
        .postdisable = &ad_sd_buffer_postdisable,
-       .validate_scan_mask = &iio_validate_scan_mask_onehot,
+       .validate_scan_mask = &ad_sd_validate_scan_mask,
 };
 
 static irqreturn_t ad_sd_data_rdy_trig_poll(int irq, void *private)
@@ -513,8 +611,14 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
  */
 int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev)
 {
+       struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
        int ret;
 
+       sigma_delta->slots = devm_kcalloc(dev, sigma_delta->num_slots,
+                                         sizeof(*sigma_delta->slots), GFP_KERNEL);
+       if (!sigma_delta->slots)
+               return -ENOMEM;
+
        ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
                                              &iio_pollfunc_store_time,
                                              &ad_sd_trigger_handler,
@@ -541,6 +645,25 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
 {
        sigma_delta->spi = spi;
        sigma_delta->info = info;
+
+       /* If the field is unset in ad_sigma_delta_info, asume there can only be 1 slot. */
+       if (!info->num_slots)
+               sigma_delta->num_slots = 1;
+       else
+               sigma_delta->num_slots = info->num_slots;
+
+       if (sigma_delta->num_slots > 1) {
+               if (!indio_dev->info->update_scan_mode) {
+                       dev_err(&spi->dev, "iio_dev lacks update_scan_mode().\n");
+                       return -EINVAL;
+               }
+
+               if (!info->disable_all) {
+                       dev_err(&spi->dev, "ad_sigma_delta_info lacks disable_all().\n");
+                       return -EINVAL;
+               }
+       }
+
        iio_device_set_drvdata(indio_dev, sigma_delta);
 
        return 0;
index 854b1f8..b764823 100644 (file)
@@ -1117,7 +1117,7 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
                return at91_adc_configure_touch(st, true);
 
        /* if we are not in triggered mode, we cannot enable the buffer. */
-       if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+       if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
                return -EINVAL;
 
        /* we continue with the triggered buffer */
@@ -1159,7 +1159,7 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
                return at91_adc_configure_touch(st, false);
 
        /* if we are not in triggered mode, nothing to do here */
-       if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
+       if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
                return -EINVAL;
 
        /*
index 8d902a3..abad168 100644 (file)
@@ -550,7 +550,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
        bool val;
        int ret;
 
-       ret = strtobool(buf, &val);
+       ret = kstrtobool(buf, &val);
        if (ret)
                return ret;
 
@@ -1027,7 +1027,6 @@ static int ina2xx_probe(struct i2c_client *client,
        indio_dev->name = id->name;
 
        ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &ina2xx_setup_ops);
        if (ret)
                return ret;
index 61e80bf..fd00034 100644 (file)
@@ -376,7 +376,8 @@ static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc,
                                        adc->adc_info[adc_chan].gain_error;
 
        if (val < 0) {
-               dev_err(adc->dev, "Mismatch with calibration\n");
+               if (val < -10)
+                       dev_err(adc->dev, "Mismatch with calibration var = %d\n", val);
                return 0;
        }
 
index 00098ca..e9ff2d6 100644 (file)
@@ -9,12 +9,16 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 
 /* PMIC global registers definition */
-#define SC27XX_MODULE_EN               0xc08
+#define SC2730_MODULE_EN               0x1808
+#define SC2731_MODULE_EN               0xc08
 #define SC27XX_MODULE_ADC_EN           BIT(5)
-#define SC27XX_ARM_CLK_EN              0xc10
+#define SC2721_ARM_CLK_EN              0xc0c
+#define SC2730_ARM_CLK_EN              0x180c
+#define SC2731_ARM_CLK_EN              0xc10
 #define SC27XX_CLK_ADC_EN              BIT(5)
 #define SC27XX_CLK_ADC_CLK_EN          BIT(6)
 
 
 /* Bits and mask definition for SC27XX_ADC_CH_CFG register */
 #define SC27XX_ADC_CHN_ID_MASK         GENMASK(4, 0)
-#define SC27XX_ADC_SCALE_MASK          GENMASK(10, 8)
-#define SC27XX_ADC_SCALE_SHIFT         8
+#define SC27XX_ADC_SCALE_MASK          GENMASK(10, 9)
+#define SC2721_ADC_SCALE_MASK          BIT(5)
+#define SC27XX_ADC_SCALE_SHIFT         9
+#define SC2721_ADC_SCALE_SHIFT         5
 
 /* Bits definitions for SC27XX_ADC_INT_EN registers */
 #define SC27XX_ADC_IRQ_EN              BIT(0)
 #define SC27XX_RATIO_NUMERATOR_OFFSET  16
 #define SC27XX_RATIO_DENOMINATOR_MASK  GENMASK(15, 0)
 
+/* ADC specific channel reference voltage 3.5V */
+#define SC27XX_ADC_REFVOL_VDD35                3500000
+
+/* ADC default channel reference voltage is 2.8V */
+#define SC27XX_ADC_REFVOL_VDD28                2800000
+
 struct sc27xx_adc_data {
        struct device *dev;
+       struct regulator *volref;
        struct regmap *regmap;
        /*
         * One hardware spinlock to synchronize between the multiple
@@ -78,6 +91,24 @@ struct sc27xx_adc_data {
        int channel_scale[SC27XX_ADC_CHANNEL_MAX];
        u32 base;
        int irq;
+       const struct sc27xx_adc_variant_data *var_data;
+};
+
+/*
+ * Since different PMICs of SC27xx series can have different
+ * address and ratio, we should save ratio config and base
+ * in the device data structure.
+ */
+struct sc27xx_adc_variant_data {
+       u32 module_en;
+       u32 clk_en;
+       u32 scale_shift;
+       u32 scale_mask;
+       const struct sc27xx_adc_linear_graph *bscale_cal;
+       const struct sc27xx_adc_linear_graph *sscale_cal;
+       void (*init_scale)(struct sc27xx_adc_data *data);
+       int (*get_ratio)(int channel, int scale);
+       bool set_volref;
 };
 
 struct sc27xx_adc_linear_graph {
@@ -103,6 +134,16 @@ static struct sc27xx_adc_linear_graph small_scale_graph = {
        100, 341,
 };
 
+static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
+       4200, 850,
+       3600, 728,
+};
+
+static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
+       1000, 838,
+       100, 84,
+};
+
 static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
        4200, 856,
        3600, 733,
@@ -118,49 +159,225 @@ static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
        return ((calib_data & 0xff) + calib_adc - 128) * 4;
 }
 
+/* get the adc nvmem cell calibration data */
+static int adc_nvmem_cell_calib_data(struct sc27xx_adc_data *data, const char *cell_name)
+{
+       struct nvmem_cell *cell;
+       void *buf;
+       u32 origin_calib_data = 0;
+       size_t len;
+
+       if (!data)
+               return -EINVAL;
+
+       cell = nvmem_cell_get(data->dev, cell_name);
+       if (IS_ERR(cell))
+               return PTR_ERR(cell);
+
+       buf = nvmem_cell_read(cell, &len);
+       if (IS_ERR(buf)) {
+               nvmem_cell_put(cell);
+               return PTR_ERR(buf);
+       }
+
+       memcpy(&origin_calib_data, buf, min(len, sizeof(u32)));
+
+       kfree(buf);
+       nvmem_cell_put(cell);
+       return origin_calib_data;
+}
+
 static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data,
                                        bool big_scale)
 {
        const struct sc27xx_adc_linear_graph *calib_graph;
        struct sc27xx_adc_linear_graph *graph;
-       struct nvmem_cell *cell;
        const char *cell_name;
        u32 calib_data = 0;
-       void *buf;
-       size_t len;
 
        if (big_scale) {
-               calib_graph = &big_scale_graph_calib;
+               calib_graph = data->var_data->bscale_cal;
                graph = &big_scale_graph;
                cell_name = "big_scale_calib";
        } else {
-               calib_graph = &small_scale_graph_calib;
+               calib_graph = data->var_data->sscale_cal;
                graph = &small_scale_graph;
                cell_name = "small_scale_calib";
        }
 
-       cell = nvmem_cell_get(data->dev, cell_name);
-       if (IS_ERR(cell))
-               return PTR_ERR(cell);
-
-       buf = nvmem_cell_read(cell, &len);
-       nvmem_cell_put(cell);
-
-       if (IS_ERR(buf))
-               return PTR_ERR(buf);
-
-       memcpy(&calib_data, buf, min(len, sizeof(u32)));
+       calib_data = adc_nvmem_cell_calib_data(data, cell_name);
 
        /* Only need to calibrate the adc values in the linear graph. */
        graph->adc0 = sc27xx_adc_get_calib_data(calib_data, calib_graph->adc0);
        graph->adc1 = sc27xx_adc_get_calib_data(calib_data >> 8,
                                                calib_graph->adc1);
 
-       kfree(buf);
        return 0;
 }
 
-static int sc27xx_adc_get_ratio(int channel, int scale)
+static int sc2720_adc_get_ratio(int channel, int scale)
+{
+       switch (channel) {
+       case 14:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(68, 900);
+               case 1:
+                       return SC27XX_VOLT_RATIO(68, 1760);
+               case 2:
+                       return SC27XX_VOLT_RATIO(68, 2327);
+               case 3:
+                       return SC27XX_VOLT_RATIO(68, 3654);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       case 16:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(48, 100);
+               case 1:
+                       return SC27XX_VOLT_RATIO(480, 1955);
+               case 2:
+                       return SC27XX_VOLT_RATIO(480, 2586);
+               case 3:
+                       return SC27XX_VOLT_RATIO(48, 406);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       case 21:
+       case 22:
+       case 23:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(3, 8);
+               case 1:
+                       return SC27XX_VOLT_RATIO(375, 1955);
+               case 2:
+                       return SC27XX_VOLT_RATIO(375, 2586);
+               case 3:
+                       return SC27XX_VOLT_RATIO(300, 3248);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       default:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               case 1:
+                       return SC27XX_VOLT_RATIO(1000, 1955);
+               case 2:
+                       return SC27XX_VOLT_RATIO(1000, 2586);
+               case 3:
+                       return SC27XX_VOLT_RATIO(100, 406);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       }
+       return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2721_adc_get_ratio(int channel, int scale)
+{
+       switch (channel) {
+       case 1:
+       case 2:
+       case 3:
+       case 4:
+               return scale ? SC27XX_VOLT_RATIO(400, 1025) :
+                       SC27XX_VOLT_RATIO(1, 1);
+       case 5:
+               return SC27XX_VOLT_RATIO(7, 29);
+       case 7:
+       case 9:
+               return scale ? SC27XX_VOLT_RATIO(100, 125) :
+                       SC27XX_VOLT_RATIO(1, 1);
+       case 14:
+               return SC27XX_VOLT_RATIO(68, 900);
+       case 16:
+               return SC27XX_VOLT_RATIO(48, 100);
+       case 19:
+               return SC27XX_VOLT_RATIO(1, 3);
+       default:
+               return SC27XX_VOLT_RATIO(1, 1);
+       }
+       return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2730_adc_get_ratio(int channel, int scale)
+{
+       switch (channel) {
+       case 14:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(68, 900);
+               case 1:
+                       return SC27XX_VOLT_RATIO(68, 1760);
+               case 2:
+                       return SC27XX_VOLT_RATIO(68, 2327);
+               case 3:
+                       return SC27XX_VOLT_RATIO(68, 3654);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       case 15:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(1, 3);
+               case 1:
+                       return SC27XX_VOLT_RATIO(1000, 5865);
+               case 2:
+                       return SC27XX_VOLT_RATIO(500, 3879);
+               case 3:
+                       return SC27XX_VOLT_RATIO(500, 6090);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       case 16:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(48, 100);
+               case 1:
+                       return SC27XX_VOLT_RATIO(480, 1955);
+               case 2:
+                       return SC27XX_VOLT_RATIO(480, 2586);
+               case 3:
+                       return SC27XX_VOLT_RATIO(48, 406);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       case 21:
+       case 22:
+       case 23:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(3, 8);
+               case 1:
+                       return SC27XX_VOLT_RATIO(375, 1955);
+               case 2:
+                       return SC27XX_VOLT_RATIO(375, 2586);
+               case 3:
+                       return SC27XX_VOLT_RATIO(300, 3248);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       default:
+               switch (scale) {
+               case 0:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               case 1:
+                       return SC27XX_VOLT_RATIO(1000, 1955);
+               case 2:
+                       return SC27XX_VOLT_RATIO(1000, 2586);
+               case 3:
+                       return SC27XX_VOLT_RATIO(1000, 4060);
+               default:
+                       return SC27XX_VOLT_RATIO(1, 1);
+               }
+       }
+       return SC27XX_VOLT_RATIO(1, 1);
+}
+
+static int sc2731_adc_get_ratio(int channel, int scale)
 {
        switch (channel) {
        case 1:
@@ -185,10 +402,87 @@ static int sc27xx_adc_get_ratio(int channel, int scale)
        return SC27XX_VOLT_RATIO(1, 1);
 }
 
+/*
+ * According to the datasheet set specific value on some channel.
+ */
+static void sc2720_adc_scale_init(struct sc27xx_adc_data *data)
+{
+       int i;
+
+       for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+               switch (i) {
+               case 5:
+                       data->channel_scale[i] = 3;
+                       break;
+               case 7:
+               case 9:
+                       data->channel_scale[i] = 2;
+                       break;
+               case 13:
+                       data->channel_scale[i] = 1;
+                       break;
+               case 19:
+               case 30:
+               case 31:
+                       data->channel_scale[i] = 3;
+                       break;
+               default:
+                       data->channel_scale[i] = 0;
+                       break;
+               }
+       }
+}
+
+static void sc2730_adc_scale_init(struct sc27xx_adc_data *data)
+{
+       int i;
+
+       for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+               switch (i) {
+               case 5:
+               case 10:
+               case 19:
+               case 30:
+               case 31:
+                       data->channel_scale[i] = 3;
+                       break;
+               case 7:
+               case 9:
+                       data->channel_scale[i] = 2;
+                       break;
+               case 13:
+                       data->channel_scale[i] = 1;
+                       break;
+               default:
+                       data->channel_scale[i] = 0;
+                       break;
+               }
+       }
+}
+
+static void sc2731_adc_scale_init(struct sc27xx_adc_data *data)
+{
+       int i;
+       /*
+        * In the current software design, SC2731 support 2 scales,
+        * channels 5 uses big scale, others use smale.
+        */
+       for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) {
+               switch (i) {
+               case 5:
+                       data->channel_scale[i] = 1;
+                       break;
+               default:
+                       data->channel_scale[i] = 0;
+                       break;
+               }
+       }
+}
+
 static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
                           int scale, int *val)
 {
-       int ret;
+       int ret, ret_volref;
        u32 tmp, value, status;
 
        ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT);
@@ -197,10 +491,25 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
                return ret;
        }
 
+       /*
+        * According to the sc2721 chip data sheet, the reference voltage of
+        * specific channel 30 and channel 31 in ADC module needs to be set from
+        * the default 2.8v to 3.5v.
+        */
+       if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) {
+               ret = regulator_set_voltage(data->volref,
+                                       SC27XX_ADC_REFVOL_VDD35,
+                                       SC27XX_ADC_REFVOL_VDD35);
+               if (ret) {
+                       dev_err(data->dev, "failed to set the volref 3.5v\n");
+                       goto unlock_adc;
+               }
+       }
+
        ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
                                 SC27XX_ADC_EN, SC27XX_ADC_EN);
        if (ret)
-               goto unlock_adc;
+               goto regulator_restore;
 
        ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
                                 SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
@@ -208,10 +517,11 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
                goto disable_adc;
 
        /* Configure the channel id and scale */
-       tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK;
+       tmp = (scale << data->var_data->scale_shift) & data->var_data->scale_mask;
        tmp |= channel & SC27XX_ADC_CHN_ID_MASK;
        ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CH_CFG,
-                                SC27XX_ADC_CHN_ID_MASK | SC27XX_ADC_SCALE_MASK,
+                                SC27XX_ADC_CHN_ID_MASK |
+                                data->var_data->scale_mask,
                                 tmp);
        if (ret)
                goto disable_adc;
@@ -249,6 +559,17 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
 disable_adc:
        regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
                           SC27XX_ADC_EN, 0);
+regulator_restore:
+       if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) {
+               ret_volref = regulator_set_voltage(data->volref,
+                                           SC27XX_ADC_REFVOL_VDD28,
+                                           SC27XX_ADC_REFVOL_VDD28);
+               if (ret_volref) {
+                       dev_err(data->dev, "failed to set the volref 2.8v,ret_volref = 0x%x\n",
+                                        ret_volref);
+                       ret = ret || ret_volref;
+               }
+       }
 unlock_adc:
        hwspin_unlock_raw(data->hwlock);
 
@@ -262,13 +583,14 @@ static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data,
                                  int channel, int scale,
                                  u32 *div_numerator, u32 *div_denominator)
 {
-       u32 ratio = sc27xx_adc_get_ratio(channel, scale);
+       u32 ratio;
 
+       ratio = data->var_data->get_ratio(channel, scale);
        *div_numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET;
        *div_denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK;
 }
 
-static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
+static int adc_to_volt(struct sc27xx_adc_linear_graph *graph,
                              int raw_adc)
 {
        int tmp;
@@ -277,6 +599,16 @@ static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
        tmp /= (graph->adc0 - graph->adc1);
        tmp += graph->volt1;
 
+       return tmp;
+}
+
+static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph,
+                             int raw_adc)
+{
+       int tmp;
+
+       tmp = adc_to_volt(graph, raw_adc);
+
        return tmp < 0 ? 0 : tmp;
 }
 
@@ -432,13 +764,13 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
 {
        int ret;
 
-       ret = regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+       ret = regmap_update_bits(data->regmap, data->var_data->module_en,
                                 SC27XX_MODULE_ADC_EN, SC27XX_MODULE_ADC_EN);
        if (ret)
                return ret;
 
        /* Enable ADC work clock and controller clock */
-       ret = regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+       ret = regmap_update_bits(data->regmap, data->var_data->clk_en,
                                 SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN,
                                 SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN);
        if (ret)
@@ -456,10 +788,10 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
        return 0;
 
 disable_clk:
-       regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+       regmap_update_bits(data->regmap, data->var_data->clk_en,
                           SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
 disable_adc:
-       regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+       regmap_update_bits(data->regmap, data->var_data->module_en,
                           SC27XX_MODULE_ADC_EN, 0);
 
        return ret;
@@ -470,21 +802,76 @@ static void sc27xx_adc_disable(void *_data)
        struct sc27xx_adc_data *data = _data;
 
        /* Disable ADC work clock and controller clock */
-       regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
+       regmap_update_bits(data->regmap, data->var_data->clk_en,
                           SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
 
-       regmap_update_bits(data->regmap, SC27XX_MODULE_EN,
+       regmap_update_bits(data->regmap, data->var_data->module_en,
                           SC27XX_MODULE_ADC_EN, 0);
 }
 
+static const struct sc27xx_adc_variant_data sc2731_data = {
+       .module_en = SC2731_MODULE_EN,
+       .clk_en = SC2731_ARM_CLK_EN,
+       .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+       .scale_mask = SC27XX_ADC_SCALE_MASK,
+       .bscale_cal = &sc2731_big_scale_graph_calib,
+       .sscale_cal = &sc2731_small_scale_graph_calib,
+       .init_scale = sc2731_adc_scale_init,
+       .get_ratio = sc2731_adc_get_ratio,
+       .set_volref = false,
+};
+
+static const struct sc27xx_adc_variant_data sc2730_data = {
+       .module_en = SC2730_MODULE_EN,
+       .clk_en = SC2730_ARM_CLK_EN,
+       .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+       .scale_mask = SC27XX_ADC_SCALE_MASK,
+       .bscale_cal = &big_scale_graph_calib,
+       .sscale_cal = &small_scale_graph_calib,
+       .init_scale = sc2730_adc_scale_init,
+       .get_ratio = sc2730_adc_get_ratio,
+       .set_volref = false,
+};
+
+static const struct sc27xx_adc_variant_data sc2721_data = {
+       .module_en = SC2731_MODULE_EN,
+       .clk_en = SC2721_ARM_CLK_EN,
+       .scale_shift = SC2721_ADC_SCALE_SHIFT,
+       .scale_mask = SC2721_ADC_SCALE_MASK,
+       .bscale_cal = &sc2731_big_scale_graph_calib,
+       .sscale_cal = &sc2731_small_scale_graph_calib,
+       .init_scale = sc2731_adc_scale_init,
+       .get_ratio = sc2721_adc_get_ratio,
+       .set_volref = true,
+};
+
+static const struct sc27xx_adc_variant_data sc2720_data = {
+       .module_en = SC2731_MODULE_EN,
+       .clk_en = SC2721_ARM_CLK_EN,
+       .scale_shift = SC27XX_ADC_SCALE_SHIFT,
+       .scale_mask = SC27XX_ADC_SCALE_MASK,
+       .bscale_cal = &big_scale_graph_calib,
+       .sscale_cal = &small_scale_graph_calib,
+       .init_scale = sc2720_adc_scale_init,
+       .get_ratio = sc2720_adc_get_ratio,
+       .set_volref = false,
+};
+
 static int sc27xx_adc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        struct sc27xx_adc_data *sc27xx_data;
+       const struct sc27xx_adc_variant_data *pdata;
        struct iio_dev *indio_dev;
        int ret;
 
+       pdata = of_device_get_match_data(dev);
+       if (!pdata) {
+               dev_err(dev, "No matching driver data found\n");
+               return -EINVAL;
+       }
+
        indio_dev = devm_iio_device_alloc(dev, sizeof(*sc27xx_data));
        if (!indio_dev)
                return -ENOMEM;
@@ -520,6 +907,16 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
        }
 
        sc27xx_data->dev = dev;
+       if (pdata->set_volref) {
+               sc27xx_data->volref = devm_regulator_get(dev, "vref");
+               if (IS_ERR(sc27xx_data->volref)) {
+                       ret = PTR_ERR(sc27xx_data->volref);
+                       return dev_err_probe(dev, ret, "failed to get ADC volref\n");
+               }
+       }
+
+       sc27xx_data->var_data = pdata;
+       sc27xx_data->var_data->init_scale(sc27xx_data);
 
        ret = sc27xx_adc_enable(sc27xx_data);
        if (ret) {
@@ -546,7 +943,10 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id sc27xx_adc_of_match[] = {
-       { .compatible = "sprd,sc2731-adc", },
+       { .compatible = "sprd,sc2731-adc", .data = &sc2731_data},
+       { .compatible = "sprd,sc2730-adc", .data = &sc2730_data},
+       { .compatible = "sprd,sc2721-adc", .data = &sc2721_data},
+       { .compatible = "sprd,sc2720-adc", .data = &sc2720_data},
        { }
 };
 MODULE_DEVICE_TABLE(of, sc27xx_adc_of_match);
index 9704cf0..6d21ea8 100644 (file)
@@ -466,8 +466,7 @@ static int stm32_dfsdm_channels_configure(struct iio_dev *indio_dev,
         * In continuous mode, use fast mode configuration,
         * if it provides a better resolution.
         */
-       if (adc->nconv == 1 && !trig &&
-           (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)) {
+       if (adc->nconv == 1 && !trig && iio_buffer_enabled(indio_dev)) {
                if (fl->flo[1].res >= fl->flo[0].res) {
                        fl->fast = 1;
                        flo = &fl->flo[1];
@@ -562,7 +561,7 @@ static int stm32_dfsdm_filter_configure(struct iio_dev *indio_dev,
                cr1 = DFSDM_CR1_RCH(chan->channel);
 
                /* Continuous conversions triggered by SPI clk in buffer mode */
-               if (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)
+               if (iio_buffer_enabled(indio_dev))
                        cr1 |= DFSDM_CR1_RCONT(1);
 
                cr1 |= DFSDM_CR1_RSYNC(fl->sync_mode);
index d2d4053..000e5cf 100644 (file)
@@ -61,7 +61,7 @@ struct stmpe_adc {
 static int stmpe_read_voltage(struct stmpe_adc *info,
                struct iio_chan_spec const *chan, int *val)
 {
-       long ret;
+       unsigned long ret;
 
        mutex_lock(&info->lock);
 
@@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
 
        ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
 
-       if (ret <= 0) {
+       if (ret == 0) {
                stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
                                STMPE_ADC_CH(info->channel));
                mutex_unlock(&info->lock);
@@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
 static int stmpe_read_temp(struct stmpe_adc *info,
                struct iio_chan_spec const *chan, int *val)
 {
-       long ret;
+       unsigned long ret;
 
        mutex_lock(&info->lock);
 
@@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info,
 
        ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
 
-       if (ret <= 0) {
+       if (ret == 0) {
                mutex_unlock(&info->lock);
                return -ETIMEDOUT;
        }
@@ -345,21 +345,22 @@ static int __maybe_unused stmpe_adc_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume);
 
+static const struct of_device_id stmpe_adc_ids[] = {
+       { .compatible = "st,stmpe-adc", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
+
 static struct platform_driver stmpe_adc_driver = {
        .probe          = stmpe_adc_probe,
        .driver         = {
                .name   = "stmpe-adc",
                .pm     = &stmpe_adc_pm_ops,
+               .of_match_table = stmpe_adc_ids,
        },
 };
 module_platform_driver(stmpe_adc_driver);
 
-static const struct of_device_id stmpe_adc_ids[] = {
-       { .compatible = "st,stmpe-adc", },
-       { },
-};
-MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
-
 MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
 MODULE_DESCRIPTION("STMPEXXX ADC driver");
 MODULE_LICENSE("GPL v2");
index 068efbc..5544da8 100644 (file)
 #define ADS1015_DEFAULT_DATA_RATE      4
 #define ADS1015_DEFAULT_CHAN           0
 
-enum chip_ids {
-       ADSXXXX = 0,
-       ADS1015,
-       ADS1115,
+struct ads1015_chip_data {
+       struct iio_chan_spec const      *channels;
+       int                             num_channels;
+       const struct iio_info           *info;
+       const int                       *data_rate;
+       const int                       data_rate_len;
+       const int                       *scale;
+       const int                       scale_len;
+       bool                            has_comparator;
 };
 
 enum ads1015_channels {
@@ -94,11 +99,11 @@ enum ads1015_channels {
        ADS1015_TIMESTAMP,
 };
 
-static const unsigned int ads1015_data_rate[] = {
+static const int ads1015_data_rate[] = {
        128, 250, 490, 920, 1600, 2400, 3300, 3300
 };
 
-static const unsigned int ads1115_data_rate[] = {
+static const int ads1115_data_rate[] = {
        8, 16, 32, 64, 128, 250, 475, 860
 };
 
@@ -106,10 +111,28 @@ static const unsigned int ads1115_data_rate[] = {
  * Translation from PGA bits to full-scale positive and negative input voltage
  * range in mV
  */
-static int ads1015_fullscale_range[] = {
+static const int ads1015_fullscale_range[] = {
        6144, 4096, 2048, 1024, 512, 256, 256, 256
 };
 
+static const int ads1015_scale[] = {   /* 12bit ADC */
+       256, 11,
+       512, 11,
+       1024, 11,
+       2048, 11,
+       4096, 11,
+       6144, 11
+};
+
+static const int ads1115_scale[] = {   /* 16bit ADC */
+       256, 15,
+       512, 15,
+       1024, 15,
+       2048, 15,
+       4096, 15,
+       6144, 15
+};
+
 /*
  * Translation from COMP_QUE field value to the number of successive readings
  * exceed the threshold values before an interrupt is generated
@@ -134,71 +157,53 @@ static const struct iio_event_spec ads1015_events[] = {
        },
 };
 
-#define ADS1015_V_CHAN(_chan, _addr) {                         \
-       .type = IIO_VOLTAGE,                                    \
-       .indexed = 1,                                           \
-       .address = _addr,                                       \
-       .channel = _chan,                                       \
-       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |          \
-                               BIT(IIO_CHAN_INFO_SCALE) |      \
-                               BIT(IIO_CHAN_INFO_SAMP_FREQ),   \
-       .scan_index = _addr,                                    \
-       .scan_type = {                                          \
-               .sign = 's',                                    \
-               .realbits = 12,                                 \
-               .storagebits = 16,                              \
-               .shift = 4,                                     \
-               .endianness = IIO_CPU,                          \
-       },                                                      \
-       .event_spec = ads1015_events,                           \
-       .num_event_specs = ARRAY_SIZE(ads1015_events),          \
-       .datasheet_name = "AIN"#_chan,                          \
-}
-
-#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) {            \
+/*
+ * Compile-time check whether _fitbits can accommodate up to _testbits
+ * bits. Returns _fitbits on success, fails to compile otherwise.
+ *
+ * The test works such that it multiplies constant _fitbits by constant
+ * double-negation of size of a non-empty structure, i.e. it multiplies
+ * constant _fitbits by constant 1 in each successful compilation case.
+ * The non-empty structure may contain C11 _Static_assert(), make use of
+ * this and place the kernel variant of static assert in there, so that
+ * it performs the compile-time check for _testbits <= _fitbits. Note
+ * that it is not possible to directly use static_assert in compound
+ * statements, hence this convoluted construct.
+ */
+#define FIT_CHECK(_testbits, _fitbits)                                 \
+       (                                                               \
+               (_fitbits) *                                            \
+               !!sizeof(struct {                                       \
+                       static_assert((_testbits) <= (_fitbits));       \
+                       int pad;                                        \
+               })                                                      \
+       )
+
+#define ADS1015_V_CHAN(_chan, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \
        .type = IIO_VOLTAGE,                                    \
-       .differential = 1,                                      \
        .indexed = 1,                                           \
        .address = _addr,                                       \
        .channel = _chan,                                       \
-       .channel2 = _chan2,                                     \
        .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |          \
                                BIT(IIO_CHAN_INFO_SCALE) |      \
                                BIT(IIO_CHAN_INFO_SAMP_FREQ),   \
-       .scan_index = _addr,                                    \
-       .scan_type = {                                          \
-               .sign = 's',                                    \
-               .realbits = 12,                                 \
-               .storagebits = 16,                              \
-               .shift = 4,                                     \
-               .endianness = IIO_CPU,                          \
-       },                                                      \
-       .event_spec = ads1015_events,                           \
-       .num_event_specs = ARRAY_SIZE(ads1015_events),          \
-       .datasheet_name = "AIN"#_chan"-AIN"#_chan2,             \
-}
-
-#define ADS1115_V_CHAN(_chan, _addr) {                         \
-       .type = IIO_VOLTAGE,                                    \
-       .indexed = 1,                                           \
-       .address = _addr,                                       \
-       .channel = _chan,                                       \
-       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |          \
+       .info_mask_shared_by_all_available =                    \
                                BIT(IIO_CHAN_INFO_SCALE) |      \
                                BIT(IIO_CHAN_INFO_SAMP_FREQ),   \
        .scan_index = _addr,                                    \
        .scan_type = {                                          \
                .sign = 's',                                    \
-               .realbits = 16,                                 \
-               .storagebits = 16,                              \
+               .realbits = (_realbits),                        \
+               .storagebits = FIT_CHECK((_realbits) + (_shift), 16),   \
+               .shift = (_shift),                              \
                .endianness = IIO_CPU,                          \
        },                                                      \
-       .event_spec = ads1015_events,                           \
-       .num_event_specs = ARRAY_SIZE(ads1015_events),          \
+       .event_spec = (_event_spec),                            \
+       .num_event_specs = (_num_event_specs),                  \
        .datasheet_name = "AIN"#_chan,                          \
 }
 
-#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) {            \
+#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \
        .type = IIO_VOLTAGE,                                    \
        .differential = 1,                                      \
        .indexed = 1,                                           \
@@ -208,15 +213,19 @@ static const struct iio_event_spec ads1015_events[] = {
        .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |          \
                                BIT(IIO_CHAN_INFO_SCALE) |      \
                                BIT(IIO_CHAN_INFO_SAMP_FREQ),   \
+       .info_mask_shared_by_all_available =                    \
+                               BIT(IIO_CHAN_INFO_SCALE) |      \
+                               BIT(IIO_CHAN_INFO_SAMP_FREQ),   \
        .scan_index = _addr,                                    \
        .scan_type = {                                          \
                .sign = 's',                                    \
-               .realbits = 16,                                 \
-               .storagebits = 16,                              \
+               .realbits = (_realbits),                        \
+               .storagebits = FIT_CHECK((_realbits) + (_shift), 16),   \
+               .shift = (_shift),                              \
                .endianness = IIO_CPU,                          \
        },                                                      \
-       .event_spec = ads1015_events,                           \
-       .num_event_specs = ARRAY_SIZE(ads1015_events),          \
+       .event_spec = (_event_spec),                            \
+       .num_event_specs = (_num_event_specs),                  \
        .datasheet_name = "AIN"#_chan"-AIN"#_chan2,             \
 }
 
@@ -245,7 +254,7 @@ struct ads1015_data {
        unsigned int comp_mode;
        struct ads1015_thresh_data thresh_data[ADS1015_CHANNELS];
 
-       unsigned int *data_rate;
+       const struct ads1015_chip_data *chip;
        /*
         * Set to true when the ADC is switched to the continuous-conversion
         * mode and exits from a power-down state.  This flag is used to avoid
@@ -273,49 +282,91 @@ static void ads1015_event_channel_disable(struct ads1015_data *data, int chan)
        data->event_channel = ADS1015_CHANNELS;
 }
 
-static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
-{
-       switch (reg) {
-       case ADS1015_CFG_REG:
-       case ADS1015_LO_THRESH_REG:
-       case ADS1015_HI_THRESH_REG:
-               return true;
-       default:
-               return false;
-       }
-}
+static const struct regmap_range ads1015_writeable_ranges[] = {
+       regmap_reg_range(ADS1015_CFG_REG, ADS1015_HI_THRESH_REG),
+};
+
+static const struct regmap_access_table ads1015_writeable_table = {
+       .yes_ranges = ads1015_writeable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(ads1015_writeable_ranges),
+};
 
 static const struct regmap_config ads1015_regmap_config = {
        .reg_bits = 8,
        .val_bits = 16,
        .max_register = ADS1015_HI_THRESH_REG,
-       .writeable_reg = ads1015_is_writeable_reg,
+       .wr_table = &ads1015_writeable_table,
+};
+
+static const struct regmap_range tla2024_writeable_ranges[] = {
+       regmap_reg_range(ADS1015_CFG_REG, ADS1015_CFG_REG),
+};
+
+static const struct regmap_access_table tla2024_writeable_table = {
+       .yes_ranges = tla2024_writeable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(tla2024_writeable_ranges),
+};
+
+static const struct regmap_config tla2024_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+       .max_register = ADS1015_CFG_REG,
+       .wr_table = &tla2024_writeable_table,
 };
 
 static const struct iio_chan_spec ads1015_channels[] = {
-       ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
-       ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
-       ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
-       ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
-       ADS1015_V_CHAN(0, ADS1015_AIN0),
-       ADS1015_V_CHAN(1, ADS1015_AIN1),
-       ADS1015_V_CHAN(2, ADS1015_AIN2),
-       ADS1015_V_CHAN(3, ADS1015_AIN3),
+       ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
        IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
 };
 
 static const struct iio_chan_spec ads1115_channels[] = {
-       ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
-       ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
-       ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
-       ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
-       ADS1115_V_CHAN(0, ADS1015_AIN0),
-       ADS1115_V_CHAN(1, ADS1015_AIN1),
-       ADS1115_V_CHAN(2, ADS1015_AIN2),
-       ADS1115_V_CHAN(3, ADS1015_AIN3),
+       ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 16, 0,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 16, 0,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 16, 0,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 16, 0,
+                           ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(0, ADS1015_AIN0, 16, 0,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(1, ADS1015_AIN1, 16, 0,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(2, ADS1015_AIN2, 16, 0,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
+       ADS1015_V_CHAN(3, ADS1015_AIN3, 16, 0,
+                      ads1015_events, ARRAY_SIZE(ads1015_events)),
        IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
 };
 
+static const struct iio_chan_spec tla2024_channels[] = {
+       ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4, NULL, 0),
+       ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4, NULL, 0),
+       ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4, NULL, 0),
+       ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4, NULL, 0),
+       ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4, NULL, 0),
+       ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4, NULL, 0),
+       ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4, NULL, 0),
+       ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4, NULL, 0),
+       IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
+
 #ifdef CONFIG_PM
 static int ads1015_set_power_state(struct ads1015_data *data, bool on)
 {
@@ -344,6 +395,7 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
 static
 int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
 {
+       const int *data_rate = data->chip->data_rate;
        int ret, pga, dr, dr_old, conv_time;
        unsigned int old, mask, cfg;
 
@@ -378,8 +430,8 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
        }
        if (data->conv_invalid) {
                dr_old = (old & ADS1015_CFG_DR_MASK) >> ADS1015_CFG_DR_SHIFT;
-               conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
-               conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+               conv_time = DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr_old]);
+               conv_time += DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr]);
                conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
                usleep_range(conv_time, conv_time + 1);
                data->conv_invalid = false;
@@ -445,8 +497,8 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) {
-               if (data->data_rate[i] == rate) {
+       for (i = 0; i < data->chip->data_rate_len; i++) {
+               if (data->chip->data_rate[i] == rate) {
                        data->channel_data[chan].data_rate = i;
                        return 0;
                }
@@ -455,6 +507,32 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
        return -EINVAL;
 }
 
+static int ads1015_read_avail(struct iio_dev *indio_dev,
+                             struct iio_chan_spec const *chan,
+                             const int **vals, int *type, int *length,
+                             long mask)
+{
+       struct ads1015_data *data = iio_priv(indio_dev);
+
+       if (chan->type != IIO_VOLTAGE)
+               return -EINVAL;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_SCALE:
+               *type = IIO_VAL_FRACTIONAL_LOG2;
+               *vals =  data->chip->scale;
+               *length = data->chip->scale_len;
+               return IIO_AVAIL_LIST;
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               *type = IIO_VAL_INT;
+               *vals = data->chip->data_rate;
+               *length = data->chip->data_rate_len;
+               return IIO_AVAIL_LIST;
+       default:
+               return -EINVAL;
+       }
+}
+
 static int ads1015_read_raw(struct iio_dev *indio_dev,
                            struct iio_chan_spec const *chan, int *val,
                            int *val2, long mask)
@@ -504,7 +582,7 @@ release_direct:
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
                idx = data->channel_data[chan->address].data_rate;
-               *val = data->data_rate[idx];
+               *val = data->chip->data_rate[idx];
                ret = IIO_VAL_INT;
                break;
        default:
@@ -564,7 +642,7 @@ static int ads1015_read_event(struct iio_dev *indio_dev,
                dr = data->channel_data[chan->address].data_rate;
                comp_queue = data->thresh_data[chan->address].comp_queue;
                period = ads1015_comp_queue[comp_queue] *
-                       USEC_PER_SEC / data->data_rate[dr];
+                       USEC_PER_SEC / data->chip->data_rate[dr];
 
                *val = period / USEC_PER_SEC;
                *val2 = period % USEC_PER_SEC;
@@ -586,6 +664,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
        int val2)
 {
        struct ads1015_data *data = iio_priv(indio_dev);
+       const int *data_rate = data->chip->data_rate;
        int realbits = chan->scan_type.realbits;
        int ret = 0;
        long long period;
@@ -611,7 +690,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
 
                for (i = 0; i < ARRAY_SIZE(ads1015_comp_queue) - 1; i++) {
                        if (period <= ads1015_comp_queue[i] *
-                                       USEC_PER_SEC / data->data_rate[dr])
+                                       USEC_PER_SEC / data_rate[dr])
                                break;
                }
                data->thresh_data[chan->address].comp_queue = i;
@@ -802,54 +881,20 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
        .validate_scan_mask = &iio_validate_scan_mask_onehot,
 };
 
-static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available,
-       "3 2 1 0.5 0.25 0.125");
-static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available,
-       "0.1875 0.125 0.0625 0.03125 0.015625 0.007813");
-
-static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
-       sampling_frequency_available, "128 250 490 920 1600 2400 3300");
-static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
-       sampling_frequency_available, "8 16 32 64 128 250 475 860");
-
-static struct attribute *ads1015_attributes[] = {
-       &iio_const_attr_ads1015_scale_available.dev_attr.attr,
-       &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
-       NULL,
-};
-
-static const struct attribute_group ads1015_attribute_group = {
-       .attrs = ads1015_attributes,
-};
-
-static struct attribute *ads1115_attributes[] = {
-       &iio_const_attr_ads1115_scale_available.dev_attr.attr,
-       &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
-       NULL,
-};
-
-static const struct attribute_group ads1115_attribute_group = {
-       .attrs = ads1115_attributes,
-};
-
 static const struct iio_info ads1015_info = {
+       .read_avail     = ads1015_read_avail,
        .read_raw       = ads1015_read_raw,
        .write_raw      = ads1015_write_raw,
        .read_event_value = ads1015_read_event,
        .write_event_value = ads1015_write_event,
        .read_event_config = ads1015_read_event_config,
        .write_event_config = ads1015_write_event_config,
-       .attrs          = &ads1015_attribute_group,
 };
 
-static const struct iio_info ads1115_info = {
+static const struct iio_info tla2024_info = {
+       .read_avail     = ads1015_read_avail,
        .read_raw       = ads1015_read_raw,
        .write_raw      = ads1015_write_raw,
-       .read_event_value = ads1015_read_event,
-       .write_event_value = ads1015_write_event,
-       .read_event_config = ads1015_read_event_config,
-       .write_event_config = ads1015_write_event_config,
-       .attrs          = &ads1115_attribute_group,
 };
 
 static int ads1015_client_get_channels_config(struct i2c_client *client)
@@ -932,12 +977,18 @@ static int ads1015_set_conv_mode(struct ads1015_data *data, int mode)
 static int ads1015_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
+       const struct ads1015_chip_data *chip;
        struct iio_dev *indio_dev;
        struct ads1015_data *data;
        int ret;
-       enum chip_ids chip;
        int i;
 
+       chip = device_get_match_data(&client->dev);
+       if (!chip)
+               chip = (const struct ads1015_chip_data *)id->driver_data;
+       if (!chip)
+               return dev_err_probe(&client->dev, -EINVAL, "Unknown chip\n");
+
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
@@ -950,28 +1001,12 @@ static int ads1015_probe(struct i2c_client *client,
        indio_dev->name = ADS1015_DRV_NAME;
        indio_dev->modes = INDIO_DIRECT_MODE;
 
-       chip = (uintptr_t)device_get_match_data(&client->dev);
-       if (chip == ADSXXXX)
-               chip = id->driver_data;
-       switch (chip) {
-       case ADS1015:
-               indio_dev->channels = ads1015_channels;
-               indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
-               indio_dev->info = &ads1015_info;
-               data->data_rate = (unsigned int *) &ads1015_data_rate;
-               break;
-       case ADS1115:
-               indio_dev->channels = ads1115_channels;
-               indio_dev->num_channels = ARRAY_SIZE(ads1115_channels);
-               indio_dev->info = &ads1115_info;
-               data->data_rate = (unsigned int *) &ads1115_data_rate;
-               break;
-       default:
-               dev_err(&client->dev, "Unknown chip %d\n", chip);
-               return -EINVAL;
-       }
-
+       indio_dev->channels = chip->channels;
+       indio_dev->num_channels = chip->num_channels;
+       indio_dev->info = chip->info;
+       data->chip = chip;
        data->event_channel = ADS1015_CHANNELS;
+
        /*
         * Set default lower and upper threshold to min and max value
         * respectively.
@@ -986,7 +1021,9 @@ static int ads1015_probe(struct i2c_client *client,
        /* we need to keep this ABI the same as used by hwmon ADS1015 driver */
        ads1015_get_channels_config(client);
 
-       data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config);
+       data->regmap = devm_regmap_init_i2c(client, chip->has_comparator ?
+                                           &ads1015_regmap_config :
+                                           &tla2024_regmap_config);
        if (IS_ERR(data->regmap)) {
                dev_err(&client->dev, "Failed to allocate register map\n");
                return PTR_ERR(data->regmap);
@@ -1000,7 +1037,7 @@ static int ads1015_probe(struct i2c_client *client,
                return ret;
        }
 
-       if (client->irq) {
+       if (client->irq && chip->has_comparator) {
                unsigned long irq_trig =
                        irqd_get_trigger_type(irq_get_irq_data(client->irq));
                unsigned int cfg_comp_mask = ADS1015_CFG_COMP_QUE_MASK |
@@ -1099,22 +1136,51 @@ static const struct dev_pm_ops ads1015_pm_ops = {
                           ads1015_runtime_resume, NULL)
 };
 
+static const struct ads1015_chip_data ads1015_data = {
+       .channels       = ads1015_channels,
+       .num_channels   = ARRAY_SIZE(ads1015_channels),
+       .info           = &ads1015_info,
+       .data_rate      = ads1015_data_rate,
+       .data_rate_len  = ARRAY_SIZE(ads1015_data_rate),
+       .scale          = ads1015_scale,
+       .scale_len      = ARRAY_SIZE(ads1015_scale),
+       .has_comparator = true,
+};
+
+static const struct ads1015_chip_data ads1115_data = {
+       .channels       = ads1115_channels,
+       .num_channels   = ARRAY_SIZE(ads1115_channels),
+       .info           = &ads1015_info,
+       .data_rate      = ads1115_data_rate,
+       .data_rate_len  = ARRAY_SIZE(ads1115_data_rate),
+       .scale          = ads1115_scale,
+       .scale_len      = ARRAY_SIZE(ads1115_scale),
+       .has_comparator = true,
+};
+
+static const struct ads1015_chip_data tla2024_data = {
+       .channels       = tla2024_channels,
+       .num_channels   = ARRAY_SIZE(tla2024_channels),
+       .info           = &tla2024_info,
+       .data_rate      = ads1015_data_rate,
+       .data_rate_len  = ARRAY_SIZE(ads1015_data_rate),
+       .scale          = ads1015_scale,
+       .scale_len      = ARRAY_SIZE(ads1015_scale),
+       .has_comparator = false,
+};
+
 static const struct i2c_device_id ads1015_id[] = {
-       {"ads1015", ADS1015},
-       {"ads1115", ADS1115},
+       { "ads1015", (kernel_ulong_t)&ads1015_data },
+       { "ads1115", (kernel_ulong_t)&ads1115_data },
+       { "tla2024", (kernel_ulong_t)&tla2024_data },
        {}
 };
 MODULE_DEVICE_TABLE(i2c, ads1015_id);
 
 static const struct of_device_id ads1015_of_match[] = {
-       {
-               .compatible = "ti,ads1015",
-               .data = (void *)ADS1015
-       },
-       {
-               .compatible = "ti,ads1115",
-               .data = (void *)ADS1115
-       },
+       { .compatible = "ti,ads1015", .data = &ads1015_data },
+       { .compatible = "ti,ads1115", .data = &ads1115_data },
+       { .compatible = "ti,tla2024", .data = &tla2024_data },
        {}
 };
 MODULE_DEVICE_TABLE(of, ads1015_of_match);
index 22c2583..708cca0 100644 (file)
@@ -508,6 +508,7 @@ MODULE_DEVICE_TABLE(of, ads8688_of_match);
 static struct spi_driver ads8688_driver = {
        .driver = {
                .name   = "ads8688",
+               .of_match_table = ads8688_of_match,
        },
        .probe          = ads8688_probe,
        .remove         = ads8688_remove,
index dbdc1ef..567d43a 100644 (file)
@@ -376,9 +376,7 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
 {
        int ret;
 
-       ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
-                                         setup_ops);
+       ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops);
        if (ret)
                return ret;
 
index 4fa3978..9a1d95c 100644 (file)
@@ -8,7 +8,6 @@ menu "Analog Front Ends"
 
 config IIO_RESCALE
        tristate "IIO rescale"
-       depends on OF || COMPILE_TEST
        help
          Say yes here to build support for the IIO rescaling
          that handles voltage dividers, current sense shunts and
index 7e51129..c6cf709 100644 (file)
@@ -10,9 +10,8 @@
 
 #include <linux/err.h>
 #include <linux/gcd.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
 
@@ -536,7 +535,7 @@ static int rescale_probe(struct platform_device *pdev)
 
        rescale = iio_priv(indio_dev);
 
-       rescale->cfg = of_device_get_match_data(dev);
+       rescale->cfg = device_get_match_data(dev);
        rescale->numerator = 1;
        rescale->denominator = 1;
        rescale->offset = 0;
index 416d35a..35d8b40 100644 (file)
@@ -259,8 +259,6 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
  * devm_iio_kfifo_buffer_setup_ext - Allocate a kfifo buffer & attach it to an IIO device
  * @dev: Device object to which to attach the life-time of this kfifo buffer
  * @indio_dev: The device the buffer should be attached to
- * @mode_flags: The mode flags for this buffer (INDIO_BUFFER_SOFTWARE and/or
- *             INDIO_BUFFER_TRIGGERED).
  * @setup_ops: The setup_ops required to configure the HW part of the buffer (optional)
  * @buffer_attrs: Extra sysfs buffer attributes for this IIO buffer
  *
@@ -271,22 +269,16 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
  */
 int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
                                    struct iio_dev *indio_dev,
-                                   int mode_flags,
                                    const struct iio_buffer_setup_ops *setup_ops,
                                    const struct attribute **buffer_attrs)
 {
        struct iio_buffer *buffer;
 
-       if (!mode_flags)
-               return -EINVAL;
-
        buffer = devm_iio_kfifo_allocate(dev);
        if (!buffer)
                return -ENOMEM;
 
-       mode_flags &= kfifo_access_funcs.modes;
-
-       indio_dev->modes |= mode_flags;
+       indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
        indio_dev->setup_ops = setup_ops;
 
        buffer->attrs = buffer_attrs;
index b2725c6..5976aca 100644 (file)
@@ -333,8 +333,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
                         * We can not use trigger here, as events are generated
                         * as soon as sample_frequency is set.
                         */
-                       ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev,
-                                                             INDIO_BUFFER_SOFTWARE, NULL,
+                       ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, NULL,
                                                              cros_ec_sensor_fifo_attributes);
                        if (ret)
                                return ret;
@@ -413,7 +412,7 @@ static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
        int ret, i;
        bool calibrate;
 
-       ret = strtobool(buf, &calibrate);
+       ret = kstrtobool(buf, &calibrate);
        if (ret < 0)
                return ret;
        if (!calibrate)
index d538bf3..793d628 100644 (file)
@@ -686,7 +686,6 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
 
                err = devm_iio_kfifo_buffer_setup(&scmi_iio_dev->dev,
                                                  scmi_iio_dev,
-                                                 INDIO_BUFFER_SOFTWARE,
                                                  &scmi_iio_buffer_ops);
                if (err < 0) {
                        dev_err(dev,
index 769bd92..f32b04b 100644 (file)
@@ -331,12 +331,11 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
 /* threaded irq */
 int ssp_irq_msg(struct ssp_data *data)
 {
-       bool found = false;
        char *buffer;
        u8 msg_type;
        int ret;
        u16 length, msg_options;
-       struct ssp_msg *msg, *n;
+       struct ssp_msg *msg = NULL, *iter, *n;
 
        ret = spi_read(data->spi, data->header_buffer, SSP_HEADER_BUFFER_SIZE);
        if (ret < 0) {
@@ -362,15 +361,15 @@ int ssp_irq_msg(struct ssp_data *data)
                 * received with no order
                 */
                mutex_lock(&data->pending_lock);
-               list_for_each_entry_safe(msg, n, &data->pending_list, list) {
-                       if (msg->options == msg_options) {
-                               list_del(&msg->list);
-                               found = true;
+               list_for_each_entry_safe(iter, n, &data->pending_list, list) {
+                       if (iter->options == msg_options) {
+                               list_del(&iter->list);
+                               msg = iter;
                                break;
                        }
                }
 
-               if (!found) {
+               if (!msg) {
                        /*
                         * here can be implemented dead messages handling
                         * but the slave should not send such ones - it is to
index fa9bcdf..9910ba1 100644 (file)
@@ -71,16 +71,18 @@ st_sensors_match_odr_error:
 
 int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
 {
-       int err;
+       int err = 0;
        struct st_sensor_odr_avl odr_out = {0, 0};
        struct st_sensor_data *sdata = iio_priv(indio_dev);
 
+       mutex_lock(&sdata->odr_lock);
+
        if (!sdata->sensor_settings->odr.mask)
-               return 0;
+               goto unlock_mutex;
 
        err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
        if (err < 0)
-               goto st_sensors_match_odr_error;
+               goto unlock_mutex;
 
        if ((sdata->sensor_settings->odr.addr ==
                                        sdata->sensor_settings->pw.addr) &&
@@ -103,7 +105,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
        if (err >= 0)
                sdata->odr = odr_out.hz;
 
-st_sensors_match_odr_error:
+unlock_mutex:
+       mutex_unlock(&sdata->odr_lock);
+
        return err;
 }
 EXPORT_SYMBOL_NS(st_sensors_set_odr, IIO_ST_SENSORS);
@@ -361,6 +365,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
        struct st_sensors_platform_data *of_pdata;
        int err = 0;
 
+       mutex_init(&sdata->odr_lock);
+
        /* If OF/DT pdata exists, it will take precedence of anything else */
        of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
        if (IS_ERR(of_pdata))
@@ -549,26 +555,28 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
        int err;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
 
-       mutex_lock(&indio_dev->mlock);
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
-               err = -EBUSY;
+       err = iio_device_claim_direct_mode(indio_dev);
+       if (err)
+               return err;
+
+       mutex_lock(&sdata->odr_lock);
+
+       err = st_sensors_set_enable(indio_dev, true);
+       if (err < 0)
                goto out;
-       } else {
-               err = st_sensors_set_enable(indio_dev, true);
-               if (err < 0)
-                       goto out;
 
-               msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
-               err = st_sensors_read_axis_data(indio_dev, ch, val);
-               if (err < 0)
-                       goto out;
+       msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
+       err = st_sensors_read_axis_data(indio_dev, ch, val);
+       if (err < 0)
+               goto out;
 
-               *val = *val >> ch->scan_type.shift;
+       *val = *val >> ch->scan_type.shift;
+
+       err = st_sensors_set_enable(indio_dev, false);
 
-               err = st_sensors_set_enable(indio_dev, false);
-       }
 out:
-       mutex_unlock(&indio_dev->mlock);
+       mutex_unlock(&sdata->odr_lock);
+       iio_device_release_direct_mode(indio_dev);
 
        return err;
 }
@@ -641,7 +649,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
        struct st_sensor_data *sdata = iio_priv(indio_dev);
 
-       mutex_lock(&indio_dev->mlock);
        for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
                if (sdata->sensor_settings->odr.odr_avl[i].hz == 0)
                        break;
@@ -649,7 +656,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
                len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
                                sdata->sensor_settings->odr.odr_avl[i].hz);
        }
-       mutex_unlock(&indio_dev->mlock);
        buf[len - 1] = '\n';
 
        return len;
@@ -663,7 +669,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
        struct st_sensor_data *sdata = iio_priv(indio_dev);
 
-       mutex_lock(&indio_dev->mlock);
        for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
                if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
                        break;
@@ -673,7 +678,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
 
                len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
        }
-       mutex_unlock(&indio_dev->mlock);
        buf[len - 1] = '\n';
 
        return len;
index c0bf0d8..d1c7bde 100644 (file)
@@ -285,7 +285,6 @@ config CIO_DAC
 
 config DPOT_DAC
        tristate "DAC emulation using a DPOT"
-       depends on OF
        help
          Say yes here to build support for DAC emulation using a digital
          potentiometer.
@@ -305,7 +304,7 @@ config DS4424
 config LPC18XX_DAC
        tristate "NXP LPC18xx DAC driver"
        depends on ARCH_LPC18XX || COMPILE_TEST
-       depends on OF && HAS_IOMEM
+       depends on HAS_IOMEM
        help
          Say yes here to build support for NXP LPC18XX DAC.
 
@@ -442,7 +441,6 @@ config TI_DAC7612
 
 config VF610_DAC
        tristate "Vybrid vf610 DAC driver"
-       depends on OF
        depends on HAS_IOMEM
        help
          Say yes here to support Vybrid board digital-to-analog converter.
index 27ee2c6..d87cf14 100644 (file)
@@ -288,7 +288,7 @@ static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
        bool pwr_down;
        int ret;
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index ecbc6a5..22b000a 100644 (file)
@@ -284,7 +284,7 @@ static ssize_t ad5360_write_dac_powerdown(struct device *dev,
        bool pwr_down;
        int ret;
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index 82e1d9b..a44c832 100644 (file)
@@ -96,7 +96,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
        bool pwr_down;
        int ret;
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index fdf8240..09e2429 100644 (file)
@@ -114,7 +114,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
        bool powerdown;
        int ret;
 
-       ret = strtobool(buf, &powerdown);
+       ret = kstrtobool(buf, &powerdown);
        if (ret)
                return ret;
 
index 8507573..a0817e7 100644 (file)
@@ -182,7 +182,7 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
        int ret;
        struct ad5504_state *st = iio_priv(indio_dev);
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index 371e812..7e6f824 100644 (file)
@@ -129,7 +129,7 @@ static ssize_t ad5624r_write_dac_powerdown(struct iio_dev *indio_dev,
        int ret;
        struct ad5624r_state *st = iio_priv(indio_dev);
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index f78dd3f..15361d8 100644 (file)
@@ -73,7 +73,7 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
        unsigned int val, ref_bit_msk;
        u8 shift, address = 0;
 
-       ret = strtobool(buf, &readin);
+       ret = kstrtobool(buf, &readin);
        if (ret)
                return ret;
 
index 7a62e6e..1a63b84 100644 (file)
@@ -502,7 +502,7 @@ static ssize_t ad5755_write_powerdown(struct iio_dev *indio_dev, uintptr_t priv,
        bool pwr_down;
        int ret;
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index 2b14914..339564f 100644 (file)
@@ -188,7 +188,7 @@ static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev,
        int ret;
        struct ad5791_state *st = iio_priv(indio_dev);
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index 91eaaf7..03edf04 100644 (file)
@@ -77,7 +77,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
        bool pwr_down;
        int ret;
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
index aed46c8..3a3c4f4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/spi/spi.h>
 #include <linux/module.h>
 #include <linux/iio/iio.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
 
 #include <asm/unaligned.h>
@@ -149,7 +150,7 @@ static ssize_t ltc2632_write_dac_powerdown(struct iio_dev *indio_dev,
        int ret;
        struct ltc2632_state *st = iio_priv(indio_dev);
 
-       ret = strtobool(buf, &pwr_down);
+       ret = kstrtobool(buf, &pwr_down);
        if (ret)
                return ret;
 
@@ -362,8 +363,7 @@ static int ltc2632_probe(struct spi_device *spi)
                }
        }
 
-       indio_dev->name = dev_of_node(&spi->dev) ? dev_of_node(&spi->dev)->name
-                                                : spi_get_device_id(spi)->name;
+       indio_dev->name = fwnode_get_name(dev_fwnode(&spi->dev)) ?: spi_get_device_id(spi)->name;
        indio_dev->info = &ltc2632_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = chip_info->channels;
@@ -469,7 +469,7 @@ MODULE_DEVICE_TABLE(of, ltc2632_of_match);
 static struct spi_driver ltc2632_driver = {
        .driver         = {
                .name   = "ltc2632",
-               .of_match_table = of_match_ptr(ltc2632_of_match),
+               .of_match_table = ltc2632_of_match,
        },
        .probe          = ltc2632_probe,
        .remove         = ltc2632_remove,
index 2f9c384..937b0d2 100644 (file)
@@ -703,21 +703,20 @@ static int ltc2688_tgp_clk_setup(struct ltc2688_state *st,
                                 struct ltc2688_chan *chan,
                                 struct fwnode_handle *node, int tgp)
 {
+       struct device *dev = &st->spi->dev;
        unsigned long rate;
        struct clk *clk;
        int ret, f;
 
-       clk = devm_get_clk_from_child(&st->spi->dev, to_of_node(node), NULL);
+       clk = devm_get_clk_from_child(dev, to_of_node(node), NULL);
        if (IS_ERR(clk))
-               return dev_err_probe(&st->spi->dev, PTR_ERR(clk),
-                                    "failed to get tgp clk.\n");
+               return dev_err_probe(dev, PTR_ERR(clk), "failed to get tgp clk.\n");
 
        ret = clk_prepare_enable(clk);
        if (ret)
-               return dev_err_probe(&st->spi->dev, ret,
-                                    "failed to enable tgp clk.\n");
+               return dev_err_probe(dev, ret, "failed to enable tgp clk.\n");
 
-       ret = devm_add_action_or_reset(&st->spi->dev, ltc2688_clk_disable, clk);
+       ret = devm_add_action_or_reset(dev, ltc2688_clk_disable, clk);
        if (ret)
                return ret;
 
@@ -858,6 +857,7 @@ static int ltc2688_channel_config(struct ltc2688_state *st)
 
 static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
 {
+       struct device *dev = &st->spi->dev;
        struct gpio_desc *gpio;
        int ret;
 
@@ -865,10 +865,9 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
         * If we have a reset pin, use that to reset the board, If not, use
         * the reset bit.
         */
-       gpio = devm_gpiod_get_optional(&st->spi->dev, "clr", GPIOD_OUT_HIGH);
+       gpio = devm_gpiod_get_optional(dev, "clr", GPIOD_OUT_HIGH);
        if (IS_ERR(gpio))
-               return dev_err_probe(&st->spi->dev, PTR_ERR(gpio),
-                                    "Failed to get reset gpio");
+               return dev_err_probe(dev, PTR_ERR(gpio), "Failed to get reset gpio");
        if (gpio) {
                usleep_range(1000, 1200);
                /* bring device out of reset */
@@ -887,7 +886,7 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref)
         * Duplicate the default channel configuration as it can change during
         * @ltc2688_channel_config()
         */
-       st->iio_chan = devm_kmemdup(&st->spi->dev, ltc2688_channels,
+       st->iio_chan = devm_kmemdup(dev, ltc2688_channels,
                                    sizeof(ltc2688_channels), GFP_KERNEL);
        if (!st->iio_chan)
                return -ENOMEM;
index fce640b..540f9ea 100644 (file)
@@ -116,7 +116,7 @@ static ssize_t max5821_write_dac_powerdown(struct iio_dev *indio_dev,
        bool powerdown;
        int ret;
 
-       ret = strtobool(buf, &powerdown);
+       ret = kstrtobool(buf, &powerdown);
        if (ret)
                return ret;
 
index 842bad5..7fcb862 100644 (file)
@@ -80,7 +80,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
        bool state;
        int ret;
 
-       ret = strtobool(buf, &state);
+       ret = kstrtobool(buf, &state);
        if (ret < 0)
                return ret;
 
@@ -178,7 +178,7 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
        bool state;
        int ret;
 
-       ret = strtobool(buf, &state);
+       ret = kstrtobool(buf, &state);
        if (ret)
                return ret;
 
index b20192a..daa42bc 100644 (file)
@@ -220,7 +220,7 @@ static ssize_t stm32_dac_write_powerdown(struct iio_dev *indio_dev,
        bool powerdown;
        int ret;
 
-       ret = strtobool(buf, &powerdown);
+       ret = kstrtobool(buf, &powerdown);
        if (ret)
                return ret;
 
index 4e1156e..106ce35 100644 (file)
@@ -133,7 +133,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
        bool powerdown;
        int ret;
 
-       ret = strtobool(buf, &powerdown);
+       ret = kstrtobool(buf, &powerdown);
        if (ret)
                return ret;
 
index 0b775f9..4b6b040 100644 (file)
@@ -179,7 +179,7 @@ static ssize_t dac5571_write_powerdown(struct iio_dev *indio_dev,
        bool powerdown;
        int ret;
 
-       ret = strtobool(buf, &powerdown);
+       ret = kstrtobool(buf, &powerdown);
        if (ret)
                return ret;
 
index e10d17e..4afc411 100644 (file)
@@ -123,7 +123,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
        u8 power;
        int ret;
 
-       ret = strtobool(buf, &powerdown);
+       ret = kstrtobool(buf, &powerdown);
        if (ret)
                return ret;
 
index c0b7ef9..c24f609 100644 (file)
@@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
         */
 
        swd = kzalloc(sizeof(*swd), GFP_KERNEL);
-       if (!swd) {
-               ret = -ENOMEM;
-               goto error_kzalloc;
-       }
+       if (!swd)
+               return ERR_PTR(-ENOMEM);
+
        /*
         * Allocate an IIO device.
         *
@@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
        indio_dev = iio_device_alloc(parent, sizeof(*st));
        if (!indio_dev) {
                ret = -ENOMEM;
-               goto error_ret;
+               goto error_free_swd;
        }
 
        st = iio_priv(indio_dev);
@@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
         *    indio_dev->name = spi_get_device_id(spi)->name;
         */
        indio_dev->name = kstrdup(name, GFP_KERNEL);
+       if (!indio_dev->name) {
+               ret = -ENOMEM;
+               goto error_free_device;
+       }
 
        /* Provide description of available channels */
        indio_dev->channels = iio_dummy_channels;
@@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
 
        ret = iio_simple_dummy_events_register(indio_dev);
        if (ret < 0)
-               goto error_free_device;
+               goto error_free_name;
 
        ret = iio_simple_dummy_configure_buffer(indio_dev);
        if (ret < 0)
@@ -649,11 +652,12 @@ error_unconfigure_buffer:
        iio_simple_dummy_unconfigure_buffer(indio_dev);
 error_unregister_events:
        iio_simple_dummy_events_unregister(indio_dev);
+error_free_name:
+       kfree(indio_dev->name);
 error_free_device:
        iio_device_free(indio_dev);
-error_ret:
+error_free_swd:
        kfree(swd);
-error_kzalloc:
        return ERR_PTR(ret);
 }
 
index d81c2b2..9b2f994 100644 (file)
@@ -45,41 +45,31 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
 {
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
+       int i = 0, j;
        u16 *data;
 
        data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
        if (!data)
                goto done;
 
-       if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
-               /*
-                * Three common options here:
-                * hardware scans: certain combinations of channels make
-                *   up a fast read.  The capture will consist of all of them.
-                *   Hence we just call the grab data function and fill the
-                *   buffer without processing.
-                * software scans: can be considered to be random access
-                *   so efficient reading is just a case of minimal bus
-                *   transactions.
-                * software culled hardware scans:
-                *   occasionally a driver may process the nearest hardware
-                *   scan to avoid storing elements that are not desired. This
-                *   is the fiddliest option by far.
-                * Here let's pretend we have random access. And the values are
-                * in the constant table fakedata.
-                */
-               int i, j;
-
-               for (i = 0, j = 0;
-                    i < bitmap_weight(indio_dev->active_scan_mask,
-                                      indio_dev->masklength);
-                    i++, j++) {
-                       j = find_next_bit(indio_dev->active_scan_mask,
-                                         indio_dev->masklength, j);
-                       /* random access read from the 'device' */
-                       data[i] = fakedata[j];
-               }
-       }
+       /*
+        * Three common options here:
+        * hardware scans:
+        *   certain combinations of channels make up a fast read. The capture
+        *   will consist of all of them. Hence we just call the grab data
+        *   function and fill the buffer without processing.
+        * software scans:
+        *   can be considered to be random access so efficient reading is just
+        *   a case of minimal bus transactions.
+        * software culled hardware scans:
+        *   occasionally a driver may process the nearest hardware scan to avoid
+        *   storing elements that are not desired. This is the fiddliest option
+        *   by far.
+        * Here let's pretend we have random access. And the values are in the
+        * constant table fakedata.
+        */
+       for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength)
+               data[i++] = fakedata[j];
 
        iio_push_to_buffers_with_timestamp(indio_dev, data,
                                           iio_get_time_ns(indio_dev));
index a0f92c3..9428705 100644 (file)
@@ -516,7 +516,7 @@ static ssize_t ad9523_store(struct device *dev,
        bool state;
        int ret;
 
-       ret = strtobool(buf, &state);
+       ret = kstrtobool(buf, &state);
        if (ret < 0)
                return ret;
 
index 410e5e9..0923fd7 100644 (file)
@@ -7,9 +7,9 @@
 
 #include <linux/interrupt.h>
 #include <linux/module.h>
-#include <linux/of_irq.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 
@@ -822,7 +822,6 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
 {
        struct device *dev = regmap_get_device(data->regmap);
        struct iio_dev *indio_dev = dev_get_drvdata(dev);
-       struct device_node *np = indio_dev->dev.of_node;
        unsigned long irq_trig;
        bool irq_open_drain;
        int irq1;
@@ -831,8 +830,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
        if (!data->irq)
                return 0;
 
-       irq1 = of_irq_get_byname(np, "INT1");
-
+       irq1 = fwnode_irq_get_byname(dev_fwnode(dev), "INT1");
        if (irq1 == data->irq) {
                dev_info(dev, "using interrupt line INT1\n");
                ret = regmap_field_write(data->regmap_fields[F_INT_CFG_DRDY],
@@ -843,7 +841,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data)
 
        dev_info(dev, "using interrupt line INT2\n");
 
-       irq_open_drain = of_property_read_bool(np, "drive-open-drain");
+       irq_open_drain = device_property_read_bool(dev, "drive-open-drain");
 
        data->dready_trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
                                                   indio_dev->name,
index ea387ef..4f19dc7 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/random.h>
 #include <linux/slab.h>
 
@@ -1050,6 +1051,7 @@ static const struct iio_trigger_ops mpu3050_trigger_ops = {
 static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
 {
        struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+       struct device *dev = mpu3050->dev;
        unsigned long irq_trig;
        int ret;
 
@@ -1061,8 +1063,7 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
                return -ENOMEM;
 
        /* Check if IRQ is open drain */
-       if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
-               mpu3050->irq_opendrain = true;
+       mpu3050->irq_opendrain = device_property_read_bool(dev, "drive-open-drain");
 
        irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
        /*
@@ -1118,13 +1119,12 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
                                   mpu3050->trig->name,
                                   mpu3050->trig);
        if (ret) {
-               dev_err(mpu3050->dev,
-                       "can't get IRQ %d, error %d\n", irq, ret);
+               dev_err(dev, "can't get IRQ %d, error %d\n", irq, ret);
                return ret;
        }
 
        mpu3050->irq = irq;
-       mpu3050->trig->dev.parent = mpu3050->dev;
+       mpu3050->trig->dev.parent = dev;
        mpu3050->trig->ops = &mpu3050_trigger_ops;
        iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
 
@@ -1263,7 +1263,7 @@ err_power_down:
 }
 EXPORT_SYMBOL(mpu3050_common_probe);
 
-int mpu3050_common_remove(struct device *dev)
+void mpu3050_common_remove(struct device *dev)
 {
        struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct mpu3050 *mpu3050 = iio_priv(indio_dev);
@@ -1276,8 +1276,6 @@ int mpu3050_common_remove(struct device *dev)
                free_irq(mpu3050->irq, mpu3050);
        iio_device_unregister(indio_dev);
        mpu3050_power_down(mpu3050);
-
-       return 0;
 }
 EXPORT_SYMBOL(mpu3050_common_remove);
 
index ef5bcbc..5b5f58b 100644 (file)
@@ -86,7 +86,9 @@ static int mpu3050_i2c_remove(struct i2c_client *client)
        if (mpu3050->i2cmux)
                i2c_mux_del_adapters(mpu3050->i2cmux);
 
-       return mpu3050_common_remove(&client->dev);
+       mpu3050_common_remove(&client->dev);
+
+       return 0;
 }
 
 /*
index 835b024..faf4168 100644 (file)
@@ -91,7 +91,7 @@ int mpu3050_common_probe(struct device *dev,
                         struct regmap *map,
                         int irq,
                         const char *name);
-int mpu3050_common_remove(struct device *dev);
+void mpu3050_common_remove(struct device *dev);
 
 /* PM ops */
 extern const struct dev_pm_ops mpu3050_dev_pm_ops;
index 5fd1bf9..d332474 100644 (file)
@@ -113,7 +113,6 @@ static int ssp_gyro_probe(struct platform_device *pdev)
        indio_dev->available_scan_masks = ssp_gyro_scan_mask;
 
        ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &ssp_gyro_buffer_ops);
        if (ret)
                return ret;
index 62172e1..eaa35da 100644 (file)
@@ -406,24 +406,17 @@ read_error:
 static int st_gyro_write_raw(struct iio_dev *indio_dev,
                struct iio_chan_spec const *chan, int val, int val2, long mask)
 {
-       int err;
-
        switch (mask) {
        case IIO_CHAN_INFO_SCALE:
-               err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
-               break;
+               return st_sensors_set_fullscale_by_gain(indio_dev, val2);
        case IIO_CHAN_INFO_SAMP_FREQ:
                if (val2)
                        return -EINVAL;
-               mutex_lock(&indio_dev->mlock);
-               err = st_sensors_set_odr(indio_dev, val);
-               mutex_unlock(&indio_dev->mlock);
-               return err;
+
+               return st_sensors_set_odr(indio_dev, val);
        default:
-               err = -EINVAL;
+               return -EINVAL;
        }
-
-       return err;
 }
 
 static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
index 36ba761..ad57179 100644 (file)
@@ -433,7 +433,6 @@ static int max30100_probe(struct i2c_client *client,
        indio_dev->modes = INDIO_DIRECT_MODE;
 
        ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &max30100_buffer_setup_ops);
        if (ret)
                return ret;
index 2292876..abbcef5 100644 (file)
@@ -542,7 +542,6 @@ static int max30102_probe(struct i2c_client *client,
        }
 
        ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &max30102_buffer_setup_ops);
        if (ret)
                return ret;
index 44bbe3d..fe52019 100644 (file)
@@ -7,14 +7,16 @@
 
 #include <linux/clk.h>
 #include <linux/bitfield.h>
-#include <linux/of_irq.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/math.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/spi/spi.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/lcm.h>
+#include <linux/property.h>
 #include <linux/swab.h>
 #include <linux/crc32.h>
 
@@ -1119,6 +1121,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
        struct iio_dev *indio_dev = pf->indio_dev;
        struct adis16480 *st = iio_priv(indio_dev);
        struct adis *adis = &st->adis;
+       struct device *dev = &adis->spi->dev;
        int ret, bit, offset, i = 0;
        __be16 *buffer;
        u32 crc;
@@ -1130,7 +1133,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
                adis->tx[1] = 0;
                ret = spi_write(adis->spi, adis->tx, 2);
                if (ret) {
-                       dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret);
+                       dev_err(dev, "Failed to change device page: %d\n", ret);
                        adis_dev_unlock(adis);
                        goto irq_done;
                }
@@ -1140,7 +1143,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
 
        ret = spi_sync(adis->spi, &adis->msg);
        if (ret) {
-               dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+               dev_err(dev, "Failed to read data: %d\n", ret);
                adis_dev_unlock(adis);
                goto irq_done;
        }
@@ -1168,14 +1171,14 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
        }
 
        if (offset == 4) {
-               dev_err(&adis->spi->dev, "Invalid burst data\n");
+               dev_err(dev, "Invalid burst data\n");
                goto irq_done;
        }
 
        crc = be16_to_cpu(buffer[offset + 16]) << 16 | be16_to_cpu(buffer[offset + 15]);
        valid = adis16480_validate_crc((u16 *)&buffer[offset], 15, crc);
        if (!valid) {
-               dev_err(&adis->spi->dev, "Invalid crc\n");
+               dev_err(dev, "Invalid crc\n");
                goto irq_done;
        }
 
@@ -1214,12 +1217,12 @@ static const struct iio_info adis16480_info = {
 static int adis16480_stop_device(struct iio_dev *indio_dev)
 {
        struct adis16480 *st = iio_priv(indio_dev);
+       struct device *dev = &st->adis.spi->dev;
        int ret;
 
        ret = adis_write_reg_16(&st->adis, ADIS16480_REG_SLP_CNT, BIT(9));
        if (ret)
-               dev_err(&indio_dev->dev,
-                       "Could not power down device: %d\n", ret);
+               dev_err(dev, "Could not power down device: %d\n", ret);
 
        return ret;
 }
@@ -1239,9 +1242,10 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
        return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
 }
 
-static int adis16480_config_irq_pin(struct device_node *of_node,
-                                   struct adis16480 *st)
+static int adis16480_config_irq_pin(struct adis16480 *st)
 {
+       struct device *dev = &st->adis.spi->dev;
+       struct fwnode_handle *fwnode = dev_fwnode(dev);
        struct irq_data *desc;
        enum adis16480_int_pin pin;
        unsigned int irq_type;
@@ -1250,7 +1254,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
 
        desc = irq_get_irq_data(st->adis.spi->irq);
        if (!desc) {
-               dev_err(&st->adis.spi->dev, "Could not find IRQ %d\n", irq);
+               dev_err(dev, "Could not find IRQ %d\n", irq);
                return -EINVAL;
        }
 
@@ -1267,7 +1271,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
         */
        pin = ADIS16480_PIN_DIO1;
        for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
-               irq = of_irq_get_byname(of_node, adis16480_int_pin_names[i]);
+               irq = fwnode_irq_get_byname(fwnode, adis16480_int_pin_names[i]);
                if (irq > 0) {
                        pin = i;
                        break;
@@ -1287,23 +1291,22 @@ static int adis16480_config_irq_pin(struct device_node *of_node,
        } else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
                val |= ADIS16480_DRDY_POL(0);
        } else {
-               dev_err(&st->adis.spi->dev,
-                       "Invalid interrupt type 0x%x specified\n", irq_type);
+               dev_err(dev, "Invalid interrupt type 0x%x specified\n", irq_type);
                return -EINVAL;
        }
        /* Write the data ready configuration to the FNCTIO_CTRL register */
        return adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
 }
 
-static int adis16480_of_get_ext_clk_pin(struct adis16480 *st,
-                                       struct device_node *of_node)
+static int adis16480_fw_get_ext_clk_pin(struct adis16480 *st)
 {
+       struct device *dev = &st->adis.spi->dev;
        const char *ext_clk_pin;
        enum adis16480_int_pin pin;
        int i;
 
        pin = ADIS16480_PIN_DIO2;
-       if (of_property_read_string(of_node, "adi,ext-clk-pin", &ext_clk_pin))
+       if (device_property_read_string(dev, "adi,ext-clk-pin", &ext_clk_pin))
                goto clk_input_not_found;
 
        for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) {
@@ -1312,15 +1315,13 @@ static int adis16480_of_get_ext_clk_pin(struct adis16480 *st,
        }
 
 clk_input_not_found:
-       dev_info(&st->adis.spi->dev,
-               "clk input line not specified, using DIO2\n");
+       dev_info(dev, "clk input line not specified, using DIO2\n");
        return pin;
 }
 
-static int adis16480_ext_clk_config(struct adis16480 *st,
-                                   struct device_node *of_node,
-                                   bool enable)
+static int adis16480_ext_clk_config(struct adis16480 *st, bool enable)
 {
+       struct device *dev = &st->adis.spi->dev;
        unsigned int mode, mask;
        enum adis16480_int_pin pin;
        uint16_t val;
@@ -1330,16 +1331,14 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
        if (ret)
                return ret;
 
-       pin = adis16480_of_get_ext_clk_pin(st, of_node);
+       pin = adis16480_fw_get_ext_clk_pin(st);
        /*
         * Each DIOx pin supports only one function at a time. When a single pin
         * has two assignments, the enable bit for a lower priority function
         * automatically resets to zero (disabling the lower priority function).
         */
        if (pin == ADIS16480_DRDY_SEL(val))
-               dev_warn(&st->adis.spi->dev,
-                       "DIO%x pin supports only one function at a time\n",
-                       pin + 1);
+               dev_warn(dev, "DIO%x pin supports only one function at a time\n", pin + 1);
 
        mode = ADIS16480_SYNC_EN(enable) | ADIS16480_SYNC_SEL(pin);
        mask = ADIS16480_SYNC_EN_MSK | ADIS16480_SYNC_SEL_MSK;
@@ -1361,31 +1360,27 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
 
 static int adis16480_get_ext_clocks(struct adis16480 *st)
 {
-       st->clk_mode = ADIS16480_CLK_INT;
-       st->ext_clk = devm_clk_get(&st->adis.spi->dev, "sync");
-       if (!IS_ERR_OR_NULL(st->ext_clk)) {
+       struct device *dev = &st->adis.spi->dev;
+
+       st->ext_clk = devm_clk_get_optional(dev, "sync");
+       if (IS_ERR(st->ext_clk))
+               return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n");
+       if (st->ext_clk) {
                st->clk_mode = ADIS16480_CLK_SYNC;
                return 0;
        }
 
-       if (PTR_ERR(st->ext_clk) != -ENOENT) {
-               dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
-               return PTR_ERR(st->ext_clk);
-       }
-
        if (st->chip_info->has_pps_clk_mode) {
-               st->ext_clk = devm_clk_get(&st->adis.spi->dev, "pps");
-               if (!IS_ERR_OR_NULL(st->ext_clk)) {
+               st->ext_clk = devm_clk_get_optional(dev, "pps");
+               if (IS_ERR(st->ext_clk))
+                       return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n");
+               if (st->ext_clk) {
                        st->clk_mode = ADIS16480_CLK_PPS;
                        return 0;
                }
-
-               if (PTR_ERR(st->ext_clk) != -ENOENT) {
-                       dev_err(&st->adis.spi->dev, "failed to get ext clk\n");
-                       return PTR_ERR(st->ext_clk);
-               }
        }
 
+       st->clk_mode = ADIS16480_CLK_INT;
        return 0;
 }
 
@@ -1404,11 +1399,12 @@ static int adis16480_probe(struct spi_device *spi)
        const struct spi_device_id *id = spi_get_device_id(spi);
        const struct adis_data *adis16480_data;
        irq_handler_t trigger_handler = NULL;
+       struct device *dev = &spi->dev;
        struct iio_dev *indio_dev;
        struct adis16480 *st;
        int ret;
 
-       indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+       indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
        if (indio_dev == NULL)
                return -ENOMEM;
 
@@ -1432,13 +1428,12 @@ static int adis16480_probe(struct spi_device *spi)
                return ret;
 
        if (st->chip_info->has_sleep_cnt) {
-               ret = devm_add_action_or_reset(&spi->dev, adis16480_stop,
-                                              indio_dev);
+               ret = devm_add_action_or_reset(dev, adis16480_stop, indio_dev);
                if (ret)
                        return ret;
        }
 
-       ret = adis16480_config_irq_pin(spi->dev.of_node, st);
+       ret = adis16480_config_irq_pin(st);
        if (ret)
                return ret;
 
@@ -1446,12 +1441,12 @@ static int adis16480_probe(struct spi_device *spi)
        if (ret)
                return ret;
 
-       if (!IS_ERR_OR_NULL(st->ext_clk)) {
-               ret = adis16480_ext_clk_config(st, spi->dev.of_node, true);
+       if (st->ext_clk) {
+               ret = adis16480_ext_clk_config(st, true);
                if (ret)
                        return ret;
 
-               ret = devm_add_action_or_reset(&spi->dev, adis16480_clk_disable, st->ext_clk);
+               ret = devm_add_action_or_reset(dev, adis16480_clk_disable, st->ext_clk);
                if (ret)
                        return ret;
 
@@ -1484,7 +1479,7 @@ static int adis16480_probe(struct spi_device *spi)
        if (ret)
                return ret;
 
-       ret = devm_iio_device_register(&spi->dev, indio_dev);
+       ret = devm_iio_device_register(dev, indio_dev);
        if (ret)
                return ret;
 
index 0133610..e7aec56 100644 (file)
  */
 #include <linux/module.h>
 #include <linux/regmap.h>
-#include <linux/acpi.h>
 #include <linux/delay.h>
 #include <linux/irq.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
 
 #include <linux/iio/iio.h>
@@ -525,17 +524,6 @@ static const struct iio_info bmi160_info = {
        .attrs = &bmi160_attrs_group,
 };
 
-static const char *bmi160_match_acpi_device(struct device *dev)
-{
-       const struct acpi_device_id *id;
-
-       id = acpi_match_device(dev->driver->acpi_match_table, dev);
-       if (!id)
-               return NULL;
-
-       return dev_name(dev);
-}
-
 static int bmi160_write_conf_reg(struct regmap *regmap, unsigned int reg,
                                 unsigned int mask, unsigned int bits,
                                 unsigned int write_usleep)
@@ -647,18 +635,18 @@ int bmi160_enable_irq(struct regmap *regmap, bool enable)
 }
 EXPORT_SYMBOL(bmi160_enable_irq);
 
-static int bmi160_get_irq(struct device_node *of_node, enum bmi160_int_pin *pin)
+static int bmi160_get_irq(struct fwnode_handle *fwnode, enum bmi160_int_pin *pin)
 {
        int irq;
 
        /* Use INT1 if possible, otherwise fall back to INT2. */
-       irq = of_irq_get_byname(of_node, "INT1");
+       irq = fwnode_irq_get_byname(fwnode, "INT1");
        if (irq > 0) {
                *pin = BMI160_PIN_INT1;
                return irq;
        }
 
-       irq = of_irq_get_byname(of_node, "INT2");
+       irq = fwnode_irq_get_byname(fwnode, "INT2");
        if (irq > 0)
                *pin = BMI160_PIN_INT2;
 
@@ -688,7 +676,7 @@ static int bmi160_config_device_irq(struct iio_dev *indio_dev, int irq_type,
                return -EINVAL;
        }
 
-       open_drain = of_property_read_bool(dev->of_node, "drive-open-drain");
+       open_drain = device_property_read_bool(dev, "drive-open-drain");
 
        return bmi160_config_pin(data->regmap, pin, open_drain, irq_mask,
                                 BMI160_NORMAL_WRITE_USLEEP);
@@ -872,9 +860,6 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
        if (ret)
                return ret;
 
-       if (!name && ACPI_HANDLE(dev))
-               name = bmi160_match_acpi_device(dev);
-
        indio_dev->channels = bmi160_channels;
        indio_dev->num_channels = ARRAY_SIZE(bmi160_channels);
        indio_dev->name = name;
@@ -887,7 +872,7 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
        if (ret)
                return ret;
 
-       irq = bmi160_get_irq(dev->of_node, &int_pin);
+       irq = bmi160_get_irq(dev_fwnode(dev), &int_pin);
        if (irq > 0) {
                ret = bmi160_setup_irq(indio_dev, irq, int_pin);
                if (ret)
index 2639861..02f149d 100644 (file)
@@ -8,10 +8,9 @@
  *      - 0x68 if SDO is pulled to GND
  *      - 0x69 if SDO is pulled to VDDIO
  */
-#include <linux/acpi.h>
 #include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 
 #include "bmi160.h"
@@ -20,7 +19,7 @@ static int bmi160_i2c_probe(struct i2c_client *client,
                            const struct i2c_device_id *id)
 {
        struct regmap *regmap;
-       const char *name = NULL;
+       const char *name;
 
        regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
        if (IS_ERR(regmap)) {
@@ -31,6 +30,8 @@ static int bmi160_i2c_probe(struct i2c_client *client,
 
        if (id)
                name = id->name;
+       else
+               name = dev_name(&client->dev);
 
        return bmi160_core_probe(&client->dev, regmap, name, false);
 }
@@ -47,19 +48,17 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
 
-#ifdef CONFIG_OF
 static const struct of_device_id bmi160_of_match[] = {
        { .compatible = "bosch,bmi160" },
        { },
 };
 MODULE_DEVICE_TABLE(of, bmi160_of_match);
-#endif
 
 static struct i2c_driver bmi160_i2c_driver = {
        .driver = {
                .name                   = "bmi160_i2c",
-               .acpi_match_table       = ACPI_PTR(bmi160_acpi_match),
-               .of_match_table         = of_match_ptr(bmi160_of_match),
+               .acpi_match_table       = bmi160_acpi_match,
+               .of_match_table         = bmi160_of_match,
        },
        .probe          = bmi160_i2c_probe,
        .id_table       = bmi160_i2c_id,
index 61389b4..24f7d75 100644 (file)
@@ -5,9 +5,8 @@
  * Copyright (c) 2016, Intel Corporation.
  *
  */
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/spi/spi.h>
 
@@ -17,6 +16,7 @@ static int bmi160_spi_probe(struct spi_device *spi)
 {
        struct regmap *regmap;
        const struct spi_device_id *id = spi_get_device_id(spi);
+       const char *name;
 
        regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
        if (IS_ERR(regmap)) {
@@ -24,7 +24,13 @@ static int bmi160_spi_probe(struct spi_device *spi)
                        regmap);
                return PTR_ERR(regmap);
        }
-       return bmi160_core_probe(&spi->dev, regmap, id->name, true);
+
+       if (id)
+               name = id->name;
+       else
+               name = dev_name(&spi->dev);
+
+       return bmi160_core_probe(&spi->dev, regmap, name, true);
 }
 
 static const struct spi_device_id bmi160_spi_id[] = {
@@ -39,20 +45,18 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
 
-#ifdef CONFIG_OF
 static const struct of_device_id bmi160_of_match[] = {
        { .compatible = "bosch,bmi160" },
        { },
 };
 MODULE_DEVICE_TABLE(of, bmi160_of_match);
-#endif
 
 static struct spi_driver bmi160_spi_driver = {
        .probe          = bmi160_spi_probe,
        .id_table       = bmi160_spi_id,
        .driver = {
-               .acpi_match_table       = ACPI_PTR(bmi160_acpi_match),
-               .of_match_table         = of_match_ptr(bmi160_of_match),
+               .acpi_match_table       = bmi160_acpi_match,
+               .of_match_table         = bmi160_of_match,
                .name                   = "bmi160_spi",
        },
 };
index 383cc32..c3f433a 100644 (file)
@@ -731,7 +731,6 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st)
        indio_dev->available_scan_masks = inv_icm42600_accel_scan_masks;
 
        ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &inv_icm42600_buffer_ops);
        if (ret)
                return ERR_PTR(ret);
index cec1dd0..9d94a85 100644 (file)
@@ -743,7 +743,6 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st)
        indio_dev->setup_ops = &inv_icm42600_buffer_ops;
 
        ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &inv_icm42600_buffer_ops);
        if (ret)
                return ERR_PTR(ret);
index 9c62551..3636b1b 100644 (file)
@@ -16,7 +16,7 @@ config INV_MPU6050_I2C
        select REGMAP_I2C
        help
          This driver supports the Invensense MPU6050/9150,
-         MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+         MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
          and IAM20680 motion tracking devices over I2C.
          This driver can be built as a module. The module will be called
          inv-mpu6050-i2c.
@@ -28,7 +28,7 @@ config INV_MPU6050_SPI
        select REGMAP_SPI
        help
          This driver supports the Invensense MPU6000,
-         MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+         MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690
          and IAM20680 motion tracking devices over SPI.
          This driver can be built as a module. The module will be called
          inv-mpu6050-spi.
index 597768c..86fbbe9 100644 (file)
@@ -218,6 +218,15 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
        },
        {
+               .whoami = INV_ICM20608D_WHOAMI_VALUE,
+               .name = "ICM20608D",
+               .reg = &reg_set_6500,
+               .config = &chip_config_6500,
+               .fifo_size = 512,
+               .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+               .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME},
+       },
+       {
                .whoami = INV_ICM20609_WHOAMI_VALUE,
                .name = "ICM20609",
                .reg = &reg_set_6500,
index 55cffb5..2aa6477 100644 (file)
@@ -29,6 +29,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
 
        switch (st->chip_type) {
        case INV_ICM20608:
+       case INV_ICM20608D:
        case INV_ICM20609:
        case INV_ICM20689:
        case INV_ICM20602:
@@ -182,6 +183,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
        {"mpu9250", INV_MPU9250},
        {"mpu9255", INV_MPU9255},
        {"icm20608", INV_ICM20608},
+       {"icm20608d", INV_ICM20608D},
        {"icm20609", INV_ICM20609},
        {"icm20689", INV_ICM20689},
        {"icm20602", INV_ICM20602},
@@ -226,6 +228,10 @@ static const struct of_device_id inv_of_match[] = {
                .data = (void *)INV_ICM20608
        },
        {
+               .compatible = "invensense,icm20608d",
+               .data = (void *)INV_ICM20608D
+       },
+       {
                .compatible = "invensense,icm20609",
                .data = (void *)INV_ICM20609
        },
index c6aa36e..8e14f20 100644 (file)
@@ -76,6 +76,7 @@ enum inv_devices {
        INV_MPU9250,
        INV_MPU9255,
        INV_ICM20608,
+       INV_ICM20608D,
        INV_ICM20609,
        INV_ICM20689,
        INV_ICM20602,
@@ -394,6 +395,7 @@ struct inv_mpu6050_state {
 #define INV_MPU9255_WHOAMI_VALUE               0x73
 #define INV_MPU6515_WHOAMI_VALUE               0x74
 #define INV_ICM20608_WHOAMI_VALUE              0xAF
+#define INV_ICM20608D_WHOAMI_VALUE             0xAE
 #define INV_ICM20609_WHOAMI_VALUE              0xA6
 #define INV_ICM20689_WHOAMI_VALUE              0x98
 #define INV_ICM20602_WHOAMI_VALUE              0x12
index 26a7c25..e6107b0 100644 (file)
@@ -73,6 +73,7 @@ static const struct spi_device_id inv_mpu_id[] = {
        {"mpu9250", INV_MPU9250},
        {"mpu9255", INV_MPU9255},
        {"icm20608", INV_ICM20608},
+       {"icm20608d", INV_ICM20608D},
        {"icm20609", INV_ICM20609},
        {"icm20689", INV_ICM20689},
        {"icm20602", INV_ICM20602},
@@ -113,6 +114,10 @@ static const struct of_device_id inv_of_match[] = {
                .data = (void *)INV_ICM20608
        },
        {
+               .compatible = "invensense,icm20608d",
+               .data = (void *)INV_ICM20608D
+       },
+       {
                .compatible = "invensense,icm20609",
                .data = (void *)INV_ICM20609
        },
index 8586021..fefd0b9 100644 (file)
@@ -11,9 +11,9 @@ config IIO_ST_LSM6DSX
        help
          Say yes here to build support for STMicroelectronics LSM6DSx imu
          sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
-         ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c,
-         ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, the accelerometer/gyroscope
-         of lsm9ds1 and lsm6dst.
+         ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr,
+         lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop,
+         the accelerometer/gyroscope of lsm9ds1 and lsm6dst.
 
          To compile this driver as a module, choose M here: the module
          will be called st_lsm6dsx.
index 6ac4eac..a86dd29 100644 (file)
@@ -31,6 +31,7 @@
 #define ST_LSM6DSRX_DEV_NAME   "lsm6dsrx"
 #define ST_LSM6DST_DEV_NAME    "lsm6dst"
 #define ST_LSM6DSOP_DEV_NAME   "lsm6dsop"
+#define ST_ASM330LHHX_DEV_NAME "asm330lhhx"
 
 enum st_lsm6dsx_hw_id {
        ST_LSM6DS3_ID,
@@ -49,6 +50,7 @@ enum st_lsm6dsx_hw_id {
        ST_LSM6DSRX_ID,
        ST_LSM6DST_ID,
        ST_LSM6DSOP_ID,
+       ST_ASM330LHHX_ID,
        ST_LSM6DSX_MAX_ID,
 };
 
index 16730a7..c7d3730 100644 (file)
@@ -14,7 +14,8 @@
  * (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
  * value of the decimation factor and ODR set for each FIFO data set.
  *
- * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/LSM6DSRX/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/
+ * LSM6DST/LSM6DSOP:
  * The FIFO buffer can be configured to store data from gyroscope and
  * accelerometer. Each sample is queued with a tag (1B) indicating data
  * source (gyroscope, accelerometer, hw timer).
@@ -746,7 +747,6 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
                        continue;
 
                ret = devm_iio_kfifo_buffer_setup(hw->dev, hw->iio_devs[i],
-                                                 INDIO_BUFFER_SOFTWARE,
                                                  &st_lsm6dsx_buffer_ops);
                if (ret)
                        return ret;
index b1d8d5a..9103977 100644 (file)
@@ -26,7 +26,7 @@
  *   - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
  *   - FIFO size: 4KB
  *
- * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
  *   - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
  *     833
  *   - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
@@ -786,6 +786,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .hw_id = ST_LSM6DST_ID,
                                .name = ST_LSM6DST_DEV_NAME,
                                .wai = 0x6d,
+                       }, {
+                               .hw_id = ST_ASM330LHHX_ID,
+                               .name = ST_ASM330LHHX_DEV_NAME,
+                               .wai = 0x6b,
                        },
                },
                .channels = {
index 8b4fc2c..715fbdc 100644 (file)
@@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
                .compatible = "st,lsm6dsop",
                .data = (void *)ST_LSM6DSOP_ID,
        },
+       {
+               .compatible = "st,asm330lhhx",
+               .data = (void *)ST_ASM330LHHX_ID,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -122,6 +126,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
        { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
        { ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
        { ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
+       { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
        {},
 };
 MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
index e80110b..f5767cf 100644 (file)
@@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
                .compatible = "st,lsm6dsop",
                .data = (void *)ST_LSM6DSOP_ID,
        },
+       {
+               .compatible = "st,asm330lhhx",
+               .data = (void *)ST_ASM330LHHX_ID,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -122,6 +126,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
        { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
        { ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
        { ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
+       { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
        {},
 };
 MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
index b078eb2..06141ca 100644 (file)
@@ -510,7 +510,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
        struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
        struct iio_buffer *buffer = this_attr->buffer;
 
-       ret = strtobool(buf, &state);
+       ret = kstrtobool(buf, &state);
        if (ret < 0)
                return ret;
        mutex_lock(&indio_dev->mlock);
@@ -557,7 +557,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
        struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
        bool state;
 
-       ret = strtobool(buf, &state);
+       ret = kstrtobool(buf, &state);
        if (ret < 0)
                return ret;
 
@@ -915,7 +915,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
                if (scan_mask == NULL)
                        return -EINVAL;
        } else {
-           scan_mask = compound_mask;
+               scan_mask = compound_mask;
        }
 
        config->scan_bytes = iio_compute_scan_bytes(indio_dev,
@@ -1059,13 +1059,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
        struct iio_device_config *config)
 {
        struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
-       struct iio_buffer *buffer;
+       struct iio_buffer *buffer, *tmp = NULL;
        int ret;
 
        indio_dev->active_scan_mask = config->scan_mask;
        indio_dev->scan_timestamp = config->scan_timestamp;
        indio_dev->scan_bytes = config->scan_bytes;
-       indio_dev->currentmode = config->mode;
+       iio_dev_opaque->currentmode = config->mode;
 
        iio_update_demux(indio_dev);
 
@@ -1097,11 +1097,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
 
        list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
                ret = iio_buffer_enable(buffer, indio_dev);
-               if (ret)
+               if (ret) {
+                       tmp = buffer;
                        goto err_disable_buffers;
+               }
        }
 
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+       if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
                ret = iio_trigger_attach_poll_func(indio_dev->trig,
                                                   indio_dev->pollfunc);
                if (ret)
@@ -1120,11 +1122,12 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
        return 0;
 
 err_detach_pollfunc:
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+       if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
                iio_trigger_detach_poll_func(indio_dev->trig,
                                             indio_dev->pollfunc);
        }
 err_disable_buffers:
+       buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
        list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
                                             buffer_list)
                iio_buffer_disable(buffer, indio_dev);
@@ -1132,7 +1135,7 @@ err_run_postdisable:
        if (indio_dev->setup_ops->postdisable)
                indio_dev->setup_ops->postdisable(indio_dev);
 err_undo_config:
-       indio_dev->currentmode = INDIO_DIRECT_MODE;
+       iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
        indio_dev->active_scan_mask = NULL;
 
        return ret;
@@ -1162,7 +1165,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
                        ret = ret2;
        }
 
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+       if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
                iio_trigger_detach_poll_func(indio_dev->trig,
                                             indio_dev->pollfunc);
        }
@@ -1181,7 +1184,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
 
        iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
        indio_dev->active_scan_mask = NULL;
-       indio_dev->currentmode = INDIO_DIRECT_MODE;
+       iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
 
        return ret;
 }
@@ -1300,7 +1303,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
        struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
        bool inlist;
 
-       ret = strtobool(buf, &requested_state);
+       ret = kstrtobool(buf, &requested_state);
        if (ret < 0)
                return ret;
 
@@ -1629,6 +1632,19 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
                        if (channels[i].scan_index < 0)
                                continue;
 
+                       /* Verify that sample bits fit into storage */
+                       if (channels[i].scan_type.storagebits <
+                           channels[i].scan_type.realbits +
+                           channels[i].scan_type.shift) {
+                               dev_err(&indio_dev->dev,
+                                       "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
+                                       i, channels[i].scan_type.storagebits,
+                                       channels[i].scan_type.realbits,
+                                       channels[i].scan_type.shift);
+                               ret = -EINVAL;
+                               goto error_cleanup_dynamic;
+                       }
+
                        ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
                                                         &channels[i]);
                        if (ret < 0)
@@ -1649,7 +1665,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
        }
 
        attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
-       attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
+       attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
        if (!attr) {
                ret = -ENOMEM;
                goto error_free_scan_mask;
index e1ed44d..adf054c 100644 (file)
@@ -185,6 +185,20 @@ int iio_device_id(struct iio_dev *indio_dev)
 EXPORT_SYMBOL_GPL(iio_device_id);
 
 /**
+ * iio_buffer_enabled() - helper function to test if the buffer is enabled
+ * @indio_dev:         IIO device structure for device
+ */
+bool iio_buffer_enabled(struct iio_dev *indio_dev)
+{
+       struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+       return iio_dev_opaque->currentmode
+               & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
+                  INDIO_BUFFER_SOFTWARE);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_enabled);
+
+/**
  * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
  * @array: array of strings
  * @n: number of strings in the array
@@ -892,8 +906,7 @@ static int __iio_str_to_fixpoint(const char *str, int fract_mult,
                } else if (*str == '\n') {
                        if (*(str + 1) == '\0')
                                break;
-                       else
-                               return -EINVAL;
+                       return -EINVAL;
                } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
                        /* Ignore the dB suffix */
                        str += sizeof(" dB") - 1;
@@ -1894,20 +1907,22 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops;
 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
 {
        struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
-       const char *label;
+       struct fwnode_handle *fwnode;
        int ret;
 
        if (!indio_dev->info)
                return -EINVAL;
 
        iio_dev_opaque->driver_module = this_mod;
-       /* If the calling driver did not initialize of_node, do it here */
-       if (!indio_dev->dev.of_node && indio_dev->dev.parent)
-               indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
 
-       label = of_get_property(indio_dev->dev.of_node, "label", NULL);
-       if (label)
-               indio_dev->label = label;
+       /* If the calling driver did not initialize firmware node, do it here */
+       if (dev_fwnode(&indio_dev->dev))
+               fwnode = dev_fwnode(&indio_dev->dev);
+       else
+               fwnode = dev_fwnode(indio_dev->dev.parent);
+       device_set_node(&indio_dev->dev, fwnode);
+
+       fwnode_property_read_string(fwnode, "label", &indio_dev->label);
 
        ret = iio_check_unique_scan_index(indio_dev);
        if (ret < 0)
@@ -2059,6 +2074,19 @@ void iio_device_release_direct_mode(struct iio_dev *indio_dev)
 }
 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
 
+/**
+ * iio_device_get_current_mode() - helper function providing read-only access to
+ *                                the opaque @currentmode variable
+ * @indio_dev:                    IIO device structure for device
+ */
+int iio_device_get_current_mode(struct iio_dev *indio_dev)
+{
+       struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+       return iio_dev_opaque->currentmode;
+}
+EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
+
 subsys_initcall(iio_init);
 module_exit(iio_exit);
 
index ce8b102..b5e059e 100644 (file)
@@ -274,7 +274,7 @@ static ssize_t iio_ev_state_store(struct device *dev,
        int ret;
        bool val;
 
-       ret = strtobool(buf, &val);
+       ret = kstrtobool(buf, &val);
        if (ret < 0)
                return ret;
 
index f504ed3..585b6ce 100644 (file)
@@ -444,7 +444,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
        int ret;
 
        mutex_lock(&indio_dev->mlock);
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+       if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
                mutex_unlock(&indio_dev->mlock);
                return -EBUSY;
        }
index a62c7b4..8537e88 100644 (file)
@@ -155,7 +155,6 @@ config CM3323
 
 config CM3605
        tristate "Capella CM3605 ambient light and proximity sensor"
-       depends on OF
        help
          Say Y here if you want to build a driver for Capella CM3605
          ambient light and short range proximity sensor.
index 4141c0f..09b831f 100644 (file)
@@ -1003,7 +1003,6 @@ static int apds9960_probe(struct i2c_client *client,
        indio_dev->modes = INDIO_DIRECT_MODE;
 
        ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &apds9960_buffer_setup_ops);
        if (ret)
                return ret;
index 1d02dfb..b578b46 100644 (file)
@@ -106,6 +106,7 @@ struct stk3310_data {
        struct mutex lock;
        bool als_enabled;
        bool ps_enabled;
+       uint32_t ps_near_level;
        u64 timestamp;
        struct regmap *regmap;
        struct regmap_field *reg_state;
@@ -135,6 +136,25 @@ static const struct iio_event_spec stk3310_events[] = {
        },
 };
 
+static ssize_t stk3310_read_near_level(struct iio_dev *indio_dev,
+                                      uintptr_t priv,
+                                      const struct iio_chan_spec *chan,
+                                      char *buf)
+{
+       struct stk3310_data *data = iio_priv(indio_dev);
+
+       return sprintf(buf, "%u\n", data->ps_near_level);
+}
+
+static const struct iio_chan_spec_ext_info stk3310_ext_info[] = {
+       {
+               .name = "nearlevel",
+               .shared = IIO_SEPARATE,
+               .read = stk3310_read_near_level,
+       },
+       { /* sentinel */ }
+};
+
 static const struct iio_chan_spec stk3310_channels[] = {
        {
                .type = IIO_LIGHT,
@@ -151,6 +171,7 @@ static const struct iio_chan_spec stk3310_channels[] = {
                        BIT(IIO_CHAN_INFO_INT_TIME),
                .event_spec = stk3310_events,
                .num_event_specs = ARRAY_SIZE(stk3310_events),
+               .ext_info = stk3310_ext_info,
        }
 };
 
@@ -581,6 +602,10 @@ static int stk3310_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        data->client = client;
        i2c_set_clientdata(client, indio_dev);
+
+       device_property_read_u32(&client->dev, "proximity-near-level",
+                                &data->ps_near_level);
+
        mutex_init(&data->lock);
 
        ret = stk3310_regmap_init(data);
index 729f14d..dd9051f 100644 (file)
@@ -15,7 +15,9 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/property.h>
 #include <linux/slab.h>
+
 #include <linux/iio/events.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -549,10 +551,10 @@ prox_poll_err:
 
 static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip)
 {
-       struct device_node *of_node = chip->client->dev.of_node;
+       struct device *dev = &chip->client->dev;
        int ret, tmp, i;
 
-       ret = of_property_read_u32(of_node, "led-max-microamp", &tmp);
+       ret = device_property_read_u32(dev, "led-max-microamp", &tmp);
        if (ret < 0)
                return ret;
 
@@ -563,20 +565,18 @@ static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip)
                }
        }
 
-       dev_err(&chip->client->dev, "Invalid value %d for led-max-microamp\n",
-               tmp);
+       dev_err(dev, "Invalid value %d for led-max-microamp\n", tmp);
 
        return -EINVAL;
-
 }
 
 static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
 {
-       struct device_node *of_node = chip->client->dev.of_node;
+       struct device *dev = &chip->client->dev;
        int i, ret, num_leds, prox_diode_mask;
        u32 leds[TSL2772_MAX_PROX_LEDS];
 
-       ret = of_property_count_u32_elems(of_node, "amstaos,proximity-diodes");
+       ret = device_property_count_u32(dev, "amstaos,proximity-diodes");
        if (ret < 0)
                return ret;
 
@@ -584,12 +584,9 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
        if (num_leds > TSL2772_MAX_PROX_LEDS)
                num_leds = TSL2772_MAX_PROX_LEDS;
 
-       ret = of_property_read_u32_array(of_node, "amstaos,proximity-diodes",
-                                        leds, num_leds);
+       ret = device_property_read_u32_array(dev, "amstaos,proximity-diodes", leds, num_leds);
        if (ret < 0) {
-               dev_err(&chip->client->dev,
-                       "Invalid value for amstaos,proximity-diodes: %d.\n",
-                       ret);
+               dev_err(dev, "Invalid value for amstaos,proximity-diodes: %d.\n", ret);
                return ret;
        }
 
@@ -600,9 +597,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
                else if (leds[i] == 1)
                        prox_diode_mask |= TSL2772_DIODE1;
                else {
-                       dev_err(&chip->client->dev,
-                               "Invalid value %d in amstaos,proximity-diodes.\n",
-                               leds[i]);
+                       dev_err(dev, "Invalid value %d in amstaos,proximity-diodes.\n", leds[i]);
                        return -EINVAL;
                }
        }
index 5444536..07eb619 100644 (file)
@@ -9,7 +9,6 @@ menu "Magnetometer sensors"
 config AK8974
        tristate "Asahi Kasei AK8974 3-Axis Magnetometer"
        depends on I2C
-       depends on OF
        select REGMAP_I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
index 2619573..707ba25 100644 (file)
@@ -141,18 +141,10 @@ static irqreturn_t rm3100_irq_handler(int irq, void *d)
        struct iio_dev *indio_dev = d;
        struct rm3100_data *data = iio_priv(indio_dev);
 
-       switch (indio_dev->currentmode) {
-       case INDIO_DIRECT_MODE:
+       if (!iio_buffer_enabled(indio_dev))
                complete(&data->measuring_done);
-               break;
-       case INDIO_BUFFER_TRIGGERED:
+       else
                iio_trigger_poll(data->drdy_trig);
-               break;
-       default:
-               dev_err(indio_dev->dev.parent,
-                       "device mode out of control, current mode: %d",
-                       indio_dev->currentmode);
-       }
 
        return IRQ_WAKE_THREAD;
 }
@@ -377,7 +369,7 @@ static int rm3100_set_samp_freq(struct iio_dev *indio_dev, int val, int val2)
                        goto unlock_return;
        }
 
-       if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+       if (iio_buffer_enabled(indio_dev)) {
                /* Writing TMRC registers requires CMM reset. */
                ret = regmap_write(regmap, RM3100_REG_CMM, 0);
                if (ret < 0)
@@ -553,7 +545,6 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
        indio_dev->channels = rm3100_channels;
        indio_dev->num_channels = ARRAY_SIZE(rm3100_channels);
        indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
-       indio_dev->currentmode = INDIO_DIRECT_MODE;
 
        if (!irq)
                data->use_interrupt = false;
index 74435f4..e2fd233 100644 (file)
@@ -540,24 +540,17 @@ read_error:
 static int st_magn_write_raw(struct iio_dev *indio_dev,
                struct iio_chan_spec const *chan, int val, int val2, long mask)
 {
-       int err;
-
        switch (mask) {
        case IIO_CHAN_INFO_SCALE:
-               err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
-               break;
+               return st_sensors_set_fullscale_by_gain(indio_dev, val2);
        case IIO_CHAN_INFO_SAMP_FREQ:
                if (val2)
                        return -EINVAL;
-               mutex_lock(&indio_dev->mlock);
-               err = st_sensors_set_odr(indio_dev, val);
-               mutex_unlock(&indio_dev->mlock);
-               return err;
+
+               return st_sensors_set_odr(indio_dev, val);
        default:
-               err = -EINVAL;
+               return -EINVAL;
        }
-
-       return err;
 }
 
 static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
index a1e1332..928f424 100644 (file)
@@ -9,7 +9,6 @@ menu "Multiplexers"
 config IIO_MUX
        tristate "IIO multiplexer driver"
        select MULTIPLEXER
-       depends on OF || COMPILE_TEST
        help
          Say yes here to build support for the IIO multiplexer.
 
index f422d44..93558fd 100644 (file)
 #include <linux/err.h>
 #include <linux/iio/consumer.h>
 #include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/mux/consumer.h>
-#include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 
 struct mux_ext_info_cache {
        char *data;
@@ -324,37 +325,21 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
        return 0;
 }
 
-/*
- * Same as of_property_for_each_string(), but also keeps track of the
- * index of each string.
- */
-#define of_property_for_each_string_index(np, propname, prop, s, i)    \
-       for (prop = of_find_property(np, propname, NULL),               \
-            s = of_prop_next_string(prop, NULL),                       \
-            i = 0;                                                     \
-            s;                                                         \
-            s = of_prop_next_string(prop, s),                          \
-            i++)
-
 static int mux_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct device_node *np = pdev->dev.of_node;
        struct iio_dev *indio_dev;
        struct iio_channel *parent;
        struct mux *mux;
-       struct property *prop;
-       const char *label;
+       const char **labels;
+       int all_children;
+       int children;
        u32 state;
        int sizeof_ext_info;
-       int children;
        int sizeof_priv;
        int i;
        int ret;
 
-       if (!np)
-               return -ENODEV;
-
        parent = devm_iio_channel_get(dev, "parent");
        if (IS_ERR(parent))
                return dev_err_probe(dev, PTR_ERR(parent),
@@ -366,9 +351,21 @@ static int mux_probe(struct platform_device *pdev)
                sizeof_ext_info *= sizeof(*mux->ext_info);
        }
 
+       all_children = device_property_string_array_count(dev, "channels");
+       if (all_children < 0)
+               return all_children;
+
+       labels = devm_kmalloc_array(dev, all_children, sizeof(*labels), GFP_KERNEL);
+       if (!labels)
+               return -ENOMEM;
+
+       ret = device_property_read_string_array(dev, "channels", labels, all_children);
+       if (ret < 0)
+               return ret;
+
        children = 0;
-       of_property_for_each_string(np, "channels", prop, label) {
-               if (*label)
+       for (state = 0; state < all_children; state++) {
+               if (*labels[state])
                        children++;
        }
        if (children <= 0) {
@@ -395,7 +392,7 @@ static int mux_probe(struct platform_device *pdev)
        mux->cached_state = -1;
 
        mux->delay_us = 0;
-       of_property_read_u32(np, "settle-time-us", &mux->delay_us);
+       device_property_read_u32(dev, "settle-time-us", &mux->delay_us);
 
        indio_dev->name = dev_name(dev);
        indio_dev->info = &mux_info;
@@ -426,11 +423,11 @@ static int mux_probe(struct platform_device *pdev)
        }
 
        i = 0;
-       of_property_for_each_string_index(np, "channels", prop, label, state) {
-               if (!*label)
+       for (state = 0; state < all_children; state++) {
+               if (!*labels[state])
                        continue;
 
-               ret = mux_configure_channel(dev, mux, state, label, i++);
+               ret = mux_configure_channel(dev, mux, state, labels[state], i++);
                if (ret < 0)
                        return ret;
        }
index 5b93933..76913a2 100644 (file)
@@ -560,16 +560,12 @@ static int st_press_write_raw(struct iio_dev *indio_dev,
                              int val2,
                              long mask)
 {
-       int err;
-
        switch (mask) {
        case IIO_CHAN_INFO_SAMP_FREQ:
                if (val2)
                        return -EINVAL;
-               mutex_lock(&indio_dev->mlock);
-               err = st_sensors_set_odr(indio_dev, val);
-               mutex_unlock(&indio_dev->mlock);
-               return err;
+
+               return st_sensors_set_odr(indio_dev, val);
        default:
                return -EINVAL;
        }
index ad4b1fb..0bca5f7 100644 (file)
  * https://www.maxbotix.com/documents/I2CXL-MaxSonar-EZ_Datasheet.pdf
  */
 
+#include <linux/bitops.h>
 #include <linux/err.h>
 #include <linux/i2c.h>
-#include <linux/of_irq.h>
 #include <linux/delay.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/bitops.h>
+#include <linux/property.h>
+
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
 #include <linux/iio/buffer.h>
@@ -209,7 +211,7 @@ static int mb1232_probe(struct i2c_client *client,
 
        init_completion(&data->ranging);
 
-       data->irqnr = irq_of_parse_and_map(dev->of_node, 0);
+       data->irqnr = fwnode_irq_get(dev_fwnode(&client->dev), 0);
        if (data->irqnr <= 0) {
                /* usage of interrupt is optional */
                data->irqnr = -1;
index 24a97d4..d56e037 100644 (file)
@@ -29,9 +29,8 @@
 #include <linux/err.h>
 #include <linux/gpio/consumer.h>
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
 #include <linux/sched.h>
@@ -288,7 +287,7 @@ static int ping_probe(struct platform_device *pdev)
 
        data = iio_priv(indio_dev);
        data->dev = dev;
-       data->cfg = of_device_get_match_data(dev);
+       data->cfg = device_get_match_data(dev);
 
        mutex_init(&data->lock);
        init_completion(&data->rising);
index 661a79e..a284b20 100644 (file)
@@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
        u16 tries = 20;
        u8 buffer[12];
        int ret;
+       unsigned long time_left;
 
        ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1);
        if (ret < 0)
@@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
        if (data->client->irq) {
                reinit_completion(&data->completion);
 
-               ret = wait_for_completion_timeout(&data->completion, HZ/10);
-               if (ret < 0)
-                       return ret;
-               else if (ret == 0)
+               time_left = wait_for_completion_timeout(&data->completion, HZ/10);
+               if (time_left == 0)
                        return -ETIMEDOUT;
 
                vl53l0x_clear_irq(data);
index 301c3f1..4fc6542 100644 (file)
 #include <linux/iio/iio.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/spi/spi.h>
 
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
 /* register map */
 #define LTC2983_STATUS_REG                     0x0000
 #define LTC2983_TEMP_RES_START_REG             0x0010
@@ -219,7 +223,7 @@ struct ltc2983_sensor {
 
 struct ltc2983_custom_sensor {
        /* raw table sensor data */
-       u8 *table;
+       void *table;
        size_t size;
        /* address offset */
        s8 offset;
@@ -377,25 +381,25 @@ static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
        return regmap_bulk_write(st->regmap, reg, custom->table, custom->size);
 }
 
-static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
-                                               struct ltc2983_data *st,
-                                               const struct device_node *np,
-                                               const char *propname,
-                                               const bool is_steinhart,
-                                               const u32 resolution,
-                                               const bool has_signed)
+static struct ltc2983_custom_sensor *
+__ltc2983_custom_sensor_new(struct ltc2983_data *st, const struct fwnode_handle *fn,
+                           const char *propname, const bool is_steinhart,
+                           const u32 resolution, const bool has_signed)
 {
        struct ltc2983_custom_sensor *new_custom;
-       u8 index, n_entries, tbl = 0;
        struct device *dev = &st->spi->dev;
        /*
         * For custom steinhart, the full u32 is taken. For all the others
         * the MSB is discarded.
         */
        const u8 n_size = is_steinhart ? 4 : 3;
-       const u8 e_size = is_steinhart ? sizeof(u32) : sizeof(u64);
+       u8 index, n_entries;
+       int ret;
 
-       n_entries = of_property_count_elems_of_size(np, propname, e_size);
+       if (is_steinhart)
+               n_entries = fwnode_property_count_u32(fn, propname);
+       else
+               n_entries = fwnode_property_count_u64(fn, propname);
        /* n_entries must be an even number */
        if (!n_entries || (n_entries % 2) != 0) {
                dev_err(dev, "Number of entries either 0 or not even\n");
@@ -409,8 +413,8 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
        new_custom->size = n_entries * n_size;
        /* check Steinhart size */
        if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) {
-               dev_err(dev, "Steinhart sensors size(%zu) must be 24",
-                                                       new_custom->size);
+               dev_err(dev, "Steinhart sensors size(%zu) must be %u\n", new_custom->size,
+                       LTC2983_CUSTOM_STEINHART_SIZE);
                return ERR_PTR(-EINVAL);
        }
        /* Check space on the table. */
@@ -423,21 +427,33 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
        }
 
        /* allocate the table */
-       new_custom->table = devm_kzalloc(dev, new_custom->size, GFP_KERNEL);
+       if (is_steinhart)
+               new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u32), GFP_KERNEL);
+       else
+               new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u64), GFP_KERNEL);
        if (!new_custom->table)
                return ERR_PTR(-ENOMEM);
 
-       for (index = 0; index < n_entries; index++) {
-               u64 temp = 0, j;
-               /*
-                * Steinhart sensors are configured with raw values in the
-                * devicetree. For the other sensors we must convert the
-                * value to raw. The odd index's correspond to temperarures
-                * and always have 1/1024 of resolution. Temperatures also
-                * come in kelvin, so signed values is not possible
-                */
-               if (!is_steinhart) {
-                       of_property_read_u64_index(np, propname, index, &temp);
+       /*
+        * Steinhart sensors are configured with raw values in the firmware
+        * node. For the other sensors we must convert the value to raw.
+        * The odd index's correspond to temperatures and always have 1/1024
+        * of resolution. Temperatures also come in Kelvin, so signed values
+        * are not possible.
+        */
+       if (is_steinhart) {
+               ret = fwnode_property_read_u32_array(fn, propname, new_custom->table, n_entries);
+               if (ret < 0)
+                       return ERR_PTR(ret);
+
+               cpu_to_be32_array(new_custom->table, new_custom->table, n_entries);
+       } else {
+               ret = fwnode_property_read_u64_array(fn, propname, new_custom->table, n_entries);
+               if (ret < 0)
+                       return ERR_PTR(ret);
+
+               for (index = 0; index < n_entries; index++) {
+                       u64 temp = ((u64 *)new_custom->table)[index];
 
                        if ((index % 2) != 0)
                                temp = __convert_to_raw(temp, 1024);
@@ -445,16 +461,9 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
                                temp = __convert_to_raw_sign(temp, resolution);
                        else
                                temp = __convert_to_raw(temp, resolution);
-               } else {
-                       u32 t32;
 
-                       of_property_read_u32_index(np, propname, index, &t32);
-                       temp = t32;
+                       put_unaligned_be24(temp, new_custom->table + index * 3);
                }
-
-               for (j = 0; j < n_size; j++)
-                       new_custom->table[tbl++] =
-                               temp >> (8 * (n_size - j - 1));
        }
 
        new_custom->is_steinhart = is_steinhart;
@@ -597,13 +606,12 @@ static int ltc2983_adc_assign_chan(struct ltc2983_data *st,
        return __ltc2983_chan_assign_common(st, sensor, chan_val);
 }
 
-static struct ltc2983_sensor *ltc2983_thermocouple_new(
-                                       const struct device_node *child,
-                                       struct ltc2983_data *st,
-                                       const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_thermocouple_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+                        const struct ltc2983_sensor *sensor)
 {
        struct ltc2983_thermocouple *thermo;
-       struct device_node *phandle;
+       struct fwnode_handle *ref;
        u32 oc_current;
        int ret;
 
@@ -611,11 +619,10 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
        if (!thermo)
                return ERR_PTR(-ENOMEM);
 
-       if (of_property_read_bool(child, "adi,single-ended"))
+       if (fwnode_property_read_bool(child, "adi,single-ended"))
                thermo->sensor_config = LTC2983_THERMOCOUPLE_SGL(1);
 
-       ret = of_property_read_u32(child, "adi,sensor-oc-current-microamp",
-                                  &oc_current);
+       ret = fwnode_property_read_u32(child, "adi,sensor-oc-current-microamp", &oc_current);
        if (!ret) {
                switch (oc_current) {
                case 10:
@@ -651,20 +658,18 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
                return ERR_PTR(-EINVAL);
        }
 
-       phandle = of_parse_phandle(child, "adi,cold-junction-handle", 0);
-       if (phandle) {
-               int ret;
-
-               ret = of_property_read_u32(phandle, "reg",
-                                          &thermo->cold_junction_chan);
+       ref = fwnode_find_reference(child, "adi,cold-junction-handle", 0);
+       if (IS_ERR(ref)) {
+               ref = NULL;
+       } else {
+               ret = fwnode_property_read_u32(ref, "reg", &thermo->cold_junction_chan);
                if (ret) {
                        /*
                         * This would be catched later but we can just return
                         * the error right away.
                         */
                        dev_err(&st->spi->dev, "Property reg must be given\n");
-                       of_node_put(phandle);
-                       return ERR_PTR(-EINVAL);
+                       goto fail;
                }
        }
 
@@ -676,8 +681,8 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
                                                             propname, false,
                                                             16384, true);
                if (IS_ERR(thermo->custom)) {
-                       of_node_put(phandle);
-                       return ERR_CAST(thermo->custom);
+                       ret = PTR_ERR(thermo->custom);
+                       goto fail;
                }
        }
 
@@ -685,37 +690,41 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new(
        thermo->sensor.fault_handler = ltc2983_thermocouple_fault_handler;
        thermo->sensor.assign_chan = ltc2983_thermocouple_assign_chan;
 
-       of_node_put(phandle);
+       fwnode_handle_put(ref);
        return &thermo->sensor;
+
+fail:
+       fwnode_handle_put(ref);
+       return ERR_PTR(ret);
 }
 
-static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
-                                         struct ltc2983_data *st,
-                                         const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+               const struct ltc2983_sensor *sensor)
 {
        struct ltc2983_rtd *rtd;
        int ret = 0;
        struct device *dev = &st->spi->dev;
-       struct device_node *phandle;
+       struct fwnode_handle *ref;
        u32 excitation_current = 0, n_wires = 0;
 
        rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
        if (!rtd)
                return ERR_PTR(-ENOMEM);
 
-       phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
-       if (!phandle) {
+       ref = fwnode_find_reference(child, "adi,rsense-handle", 0);
+       if (IS_ERR(ref)) {
                dev_err(dev, "Property adi,rsense-handle missing or invalid");
-               return ERR_PTR(-EINVAL);
+               return ERR_CAST(ref);
        }
 
-       ret = of_property_read_u32(phandle, "reg", &rtd->r_sense_chan);
+       ret = fwnode_property_read_u32(ref, "reg", &rtd->r_sense_chan);
        if (ret) {
                dev_err(dev, "Property reg must be given\n");
                goto fail;
        }
 
-       ret = of_property_read_u32(child, "adi,number-of-wires", &n_wires);
+       ret = fwnode_property_read_u32(child, "adi,number-of-wires", &n_wires);
        if (!ret) {
                switch (n_wires) {
                case 2:
@@ -738,9 +747,9 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
                }
        }
 
-       if (of_property_read_bool(child, "adi,rsense-share")) {
+       if (fwnode_property_read_bool(child, "adi,rsense-share")) {
                /* Current rotation is only available with rsense sharing */
-               if (of_property_read_bool(child, "adi,current-rotate")) {
+               if (fwnode_property_read_bool(child, "adi,current-rotate")) {
                        if (n_wires == 2 || n_wires == 3) {
                                dev_err(dev,
                                        "Rotation not allowed for 2/3 Wire RTDs");
@@ -803,8 +812,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
                                                          "adi,custom-rtd",
                                                          false, 2048, false);
                if (IS_ERR(rtd->custom)) {
-                       of_node_put(phandle);
-                       return ERR_CAST(rtd->custom);
+                       ret = PTR_ERR(rtd->custom);
+                       goto fail;
                }
        }
 
@@ -812,8 +821,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
        rtd->sensor.fault_handler = ltc2983_common_fault_handler;
        rtd->sensor.assign_chan = ltc2983_rtd_assign_chan;
 
-       ret = of_property_read_u32(child, "adi,excitation-current-microamp",
-                                  &excitation_current);
+       ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp",
+                                      &excitation_current);
        if (ret) {
                /* default to 5uA */
                rtd->excitation_current = 1;
@@ -852,23 +861,22 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
                }
        }
 
-       of_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
+       fwnode_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
 
-       of_node_put(phandle);
+       fwnode_handle_put(ref);
        return &rtd->sensor;
 fail:
-       of_node_put(phandle);
+       fwnode_handle_put(ref);
        return ERR_PTR(ret);
 }
 
-static struct ltc2983_sensor *ltc2983_thermistor_new(
-                                       const struct device_node *child,
-                                       struct ltc2983_data *st,
-                                       const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *st,
+                      const struct ltc2983_sensor *sensor)
 {
        struct ltc2983_thermistor *thermistor;
        struct device *dev = &st->spi->dev;
-       struct device_node *phandle;
+       struct fwnode_handle *ref;
        u32 excitation_current = 0;
        int ret = 0;
 
@@ -876,23 +884,23 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
        if (!thermistor)
                return ERR_PTR(-ENOMEM);
 
-       phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
-       if (!phandle) {
+       ref = fwnode_find_reference(child, "adi,rsense-handle", 0);
+       if (IS_ERR(ref)) {
                dev_err(dev, "Property adi,rsense-handle missing or invalid");
-               return ERR_PTR(-EINVAL);
+               return ERR_CAST(ref);
        }
 
-       ret = of_property_read_u32(phandle, "reg", &thermistor->r_sense_chan);
+       ret = fwnode_property_read_u32(ref, "reg", &thermistor->r_sense_chan);
        if (ret) {
                dev_err(dev, "rsense channel must be configured...\n");
                goto fail;
        }
 
-       if (of_property_read_bool(child, "adi,single-ended")) {
+       if (fwnode_property_read_bool(child, "adi,single-ended")) {
                thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1);
-       } else if (of_property_read_bool(child, "adi,rsense-share")) {
+       } else if (fwnode_property_read_bool(child, "adi,rsense-share")) {
                /* rotation is only possible if sharing rsense */
-               if (of_property_read_bool(child, "adi,current-rotate"))
+               if (fwnode_property_read_bool(child, "adi,current-rotate"))
                        thermistor->sensor_config =
                                                LTC2983_THERMISTOR_C_ROTATE(1);
                else
@@ -926,16 +934,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
                                                                 steinhart,
                                                                 64, false);
                if (IS_ERR(thermistor->custom)) {
-                       of_node_put(phandle);
-                       return ERR_CAST(thermistor->custom);
+                       ret = PTR_ERR(thermistor->custom);
+                       goto fail;
                }
        }
        /* set common parameters */
        thermistor->sensor.fault_handler = ltc2983_common_fault_handler;
        thermistor->sensor.assign_chan = ltc2983_thermistor_assign_chan;
 
-       ret = of_property_read_u32(child, "adi,excitation-current-nanoamp",
-                                  &excitation_current);
+       ret = fwnode_property_read_u32(child, "adi,excitation-current-nanoamp",
+                                      &excitation_current);
        if (ret) {
                /* Auto range is not allowed for custom sensors */
                if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART)
@@ -999,17 +1007,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new(
                }
        }
 
-       of_node_put(phandle);
+       fwnode_handle_put(ref);
        return &thermistor->sensor;
 fail:
-       of_node_put(phandle);
+       fwnode_handle_put(ref);
        return ERR_PTR(ret);
 }
 
-static struct ltc2983_sensor *ltc2983_diode_new(
-                                       const struct device_node *child,
-                                       const struct ltc2983_data *st,
-                                       const struct ltc2983_sensor *sensor)
+static struct ltc2983_sensor *
+ltc2983_diode_new(const struct fwnode_handle *child, const struct ltc2983_data *st,
+                 const struct ltc2983_sensor *sensor)
 {
        struct ltc2983_diode *diode;
        u32 temp = 0, excitation_current = 0;
@@ -1019,13 +1026,13 @@ static struct ltc2983_sensor *ltc2983_diode_new(
        if (!diode)
                return ERR_PTR(-ENOMEM);
 
-       if (of_property_read_bool(child, "adi,single-ended"))
+       if (fwnode_property_read_bool(child, "adi,single-ended"))
                diode->sensor_config = LTC2983_DIODE_SGL(1);
 
-       if (of_property_read_bool(child, "adi,three-conversion-cycles"))
+       if (fwnode_property_read_bool(child, "adi,three-conversion-cycles"))
                diode->sensor_config |= LTC2983_DIODE_3_CONV_CYCLE(1);
 
-       if (of_property_read_bool(child, "adi,average-on"))
+       if (fwnode_property_read_bool(child, "adi,average-on"))
                diode->sensor_config |= LTC2983_DIODE_AVERAGE_ON(1);
 
        /* validate channel index */
@@ -1040,8 +1047,8 @@ static struct ltc2983_sensor *ltc2983_diode_new(
        diode->sensor.fault_handler = ltc2983_common_fault_handler;
        diode->sensor.assign_chan = ltc2983_diode_assign_chan;
 
-       ret = of_property_read_u32(child, "adi,excitation-current-microamp",
-                                  &excitation_current);
+       ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp",
+                                      &excitation_current);
        if (!ret) {
                switch (excitation_current) {
                case 10:
@@ -1064,7 +1071,7 @@ static struct ltc2983_sensor *ltc2983_diode_new(
                }
        }
 
-       of_property_read_u32(child, "adi,ideal-factor-value", &temp);
+       fwnode_property_read_u32(child, "adi,ideal-factor-value", &temp);
 
        /* 2^20 resolution */
        diode->ideal_factor_value = __convert_to_raw(temp, 1048576);
@@ -1072,7 +1079,7 @@ static struct ltc2983_sensor *ltc2983_diode_new(
        return &diode->sensor;
 }
 
-static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
+static struct ltc2983_sensor *ltc2983_r_sense_new(struct fwnode_handle *child,
                                        struct ltc2983_data *st,
                                        const struct ltc2983_sensor *sensor)
 {
@@ -1091,7 +1098,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
                return ERR_PTR(-EINVAL);
        }
 
-       ret = of_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
+       ret = fwnode_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
        if (ret) {
                dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n");
                return ERR_PTR(-EINVAL);
@@ -1110,7 +1117,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
        return &rsense->sensor;
 }
 
-static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
+static struct ltc2983_sensor *ltc2983_adc_new(struct fwnode_handle *child,
                                         struct ltc2983_data *st,
                                         const struct ltc2983_sensor *sensor)
 {
@@ -1120,7 +1127,7 @@ static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
        if (!adc)
                return ERR_PTR(-ENOMEM);
 
-       if (of_property_read_bool(child, "adi,single-ended"))
+       if (fwnode_property_read_bool(child, "adi,single-ended"))
                adc->single_ended = true;
 
        if (!adc->single_ended &&
@@ -1264,17 +1271,15 @@ static irqreturn_t ltc2983_irq_handler(int irq, void *data)
 
 static int ltc2983_parse_dt(struct ltc2983_data *st)
 {
-       struct device_node *child;
        struct device *dev = &st->spi->dev;
+       struct fwnode_handle *child;
        int ret = 0, chan = 0, channel_avail_mask = 0;
 
-       of_property_read_u32(dev->of_node, "adi,mux-delay-config-us",
-                            &st->mux_delay_config);
+       device_property_read_u32(dev, "adi,mux-delay-config-us", &st->mux_delay_config);
 
-       of_property_read_u32(dev->of_node, "adi,filter-notch-freq",
-                            &st->filter_notch_freq);
+       device_property_read_u32(dev, "adi,filter-notch-freq", &st->filter_notch_freq);
 
-       st->num_channels = of_get_available_child_count(dev->of_node);
+       st->num_channels = device_get_child_node_count(dev);
        if (!st->num_channels) {
                dev_err(&st->spi->dev, "At least one channel must be given!");
                return -EINVAL;
@@ -1286,10 +1291,10 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
                return -ENOMEM;
 
        st->iio_channels = st->num_channels;
-       for_each_available_child_of_node(dev->of_node, child) {
+       device_for_each_child_node(dev, child) {
                struct ltc2983_sensor sensor;
 
-               ret = of_property_read_u32(child, "reg", &sensor.chan);
+               ret = fwnode_property_read_u32(child, "reg", &sensor.chan);
                if (ret) {
                        dev_err(dev, "reg property must given for child nodes\n");
                        goto put_child;
@@ -1299,8 +1304,8 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
                if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
                    sensor.chan > LTC2983_MAX_CHANNELS_NR) {
                        ret = -EINVAL;
-                       dev_err(dev,
-                               "chan:%d must be from 1 to 20\n", sensor.chan);
+                       dev_err(dev, "chan:%d must be from %u to %u\n", sensor.chan,
+                               LTC2983_MIN_CHANNELS_NR, LTC2983_MAX_CHANNELS_NR);
                        goto put_child;
                } else if (channel_avail_mask & BIT(sensor.chan)) {
                        ret = -EINVAL;
@@ -1308,8 +1313,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
                        goto put_child;
                }
 
-               ret = of_property_read_u32(child, "adi,sensor-type",
-                                              &sensor.type);
+               ret = fwnode_property_read_u32(child, "adi,sensor-type", &sensor.type);
                if (ret) {
                        dev_err(dev,
                                "adi,sensor-type property must given for child nodes\n");
@@ -1363,7 +1367,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
 
        return 0;
 put_child:
-       of_node_put(child);
+       fwnode_handle_put(child);
        return ret;
 }
 
index 5484088..8307aae 100644 (file)
@@ -7,9 +7,11 @@
  */
 
 #include <linux/ctype.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include <linux/property.h>
 #include <linux/spi/spi.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -422,9 +424,7 @@ static int max31856_probe(struct spi_device *spi)
        indio_dev->channels = max31856_channels;
        indio_dev->num_channels = ARRAY_SIZE(max31856_channels);
 
-       ret = of_property_read_u32(spi->dev.of_node, "thermocouple-type",
-                                  &data->thermocouple_type);
-
+       ret = device_property_read_u32(&spi->dev, "thermocouple-type", &data->thermocouple_type);
        if (ret) {
                dev_info(&spi->dev,
                         "Could not read thermocouple type DT property, configuring as a K-Type\n");
index 86c3f35..e3bb781 100644 (file)
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/property.h>
 #include <linux/spi/spi.h>
 #include <asm/unaligned.h>
 
@@ -305,7 +307,7 @@ static int max31865_probe(struct spi_device *spi)
        indio_dev->channels = max31865_channels;
        indio_dev->num_channels = ARRAY_SIZE(max31865_channels);
 
-       if (of_property_read_bool(spi->dev.of_node, "maxim,3-wire")) {
+       if (device_property_read_bool(&spi->dev, "maxim,3-wire")) {
                /* select 3 wire */
                data->three_wire = 1;
        } else {
index 2a4b758..f1a8704 100644 (file)
@@ -176,16 +176,15 @@ out1:
 
 static int iio_sysfs_trigger_remove(int id)
 {
-       bool foundit = false;
-       struct iio_sysfs_trig *t;
+       struct iio_sysfs_trig *t = NULL, *iter;
 
        mutex_lock(&iio_sysfs_trig_list_mut);
-       list_for_each_entry(t, &iio_sysfs_trig_list, l)
-               if (id == t->id) {
-                       foundit = true;
+       list_for_each_entry(iter, &iio_sysfs_trig_list, l)
+               if (id == iter->id) {
+                       t = iter;
                        break;
                }
-       if (!foundit) {
+       if (!t) {
                mutex_unlock(&iio_sysfs_trig_list_mut);
                return -EINVAL;
        }
index 8a36d78..946bf75 100644 (file)
@@ -639,7 +639,7 @@ static int ati_remote2_urb_init(struct ati_remote2 *ar2)
                        return -ENOMEM;
 
                pipe = usb_rcvintpipe(udev, ar2->ep[i]->bEndpointAddress);
-               maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+               maxp = usb_maxpacket(udev, pipe);
                maxp = maxp > 4 ? 4 : maxp;
 
                usb_fill_int_urb(ar2->urb[i], udev, pipe, ar2->buf[i], maxp,
index f515fae..728325a 100644 (file)
@@ -745,7 +745,7 @@ static int cm109_usb_probe(struct usb_interface *intf,
 
        /* get a handle to the interrupt data pipe */
        pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
-       ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       ret = usb_maxpacket(udev, pipe);
        if (ret != USB_PKT_LEN)
                dev_err(&intf->dev, "invalid payload size %d, expected %d\n",
                        ret, USB_PKT_LEN);
index c4e0e18..c1c733a 100644 (file)
@@ -374,7 +374,7 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
 
        /* get a handle to the interrupt data pipe */
        pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(udev, pipe);
 
        if (maxp < POWERMATE_PAYLOAD_SIZE_MIN || maxp > POWERMATE_PAYLOAD_SIZE_MAX) {
                printk(KERN_WARNING "powermate: Expected payload of %d--%d bytes, found %d bytes!\n",
index 1fc9b3e..8d8ebdc 100644 (file)
@@ -481,7 +481,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
  error_evtchan:
        xenbus_free_evtchn(dev, evtchn);
  error_grant:
-       gnttab_end_foreign_access(info->gref, 0UL);
+       gnttab_end_foreign_access(info->gref, NULL);
        info->gref = -1;
        return ret;
 }
@@ -492,7 +492,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
                unbind_from_irqhandler(info->irq, info);
        info->irq = -1;
        if (info->gref >= 0)
-               gnttab_end_foreign_access(info->gref, 0UL);
+               gnttab_end_foreign_access(info->gref, NULL);
        info->gref = -1;
 }
 
index 8ab01c7..6942078 100644 (file)
@@ -905,7 +905,7 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        /* get a handle to the interrupt data pipe */
        pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
-       ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       ret = usb_maxpacket(udev, pipe);
        if (ret != USB_PKT_LEN)
                dev_err(&intf->dev, "invalid payload size %d, expected %zd\n",
                        ret, USB_PKT_LEN);
index 3332b77..f04ba12 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 
-#include <mach/hardware.h>
 #include <linux/platform_data/mouse-pxa930_trkball.h>
 
 /* Trackball Controller Register Definitions */
index a38d1fe..56c7e47 100644 (file)
@@ -130,7 +130,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
                return -ENODEV;
 
        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(dev, pipe);
 
        acecad = kzalloc(sizeof(struct usb_acecad), GFP_KERNEL);
        input_dev = input_allocate_device();
index 749edbd..c608ac5 100644 (file)
@@ -296,7 +296,7 @@ static int pegasus_probe(struct usb_interface *intf,
        pegasus->intf = intf;
 
        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
-       pegasus->data_len = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+       pegasus->data_len = usb_maxpacket(dev, pipe);
 
        pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
                                           &pegasus->data_dma);
index 43c7d6e..2d70c94 100644 (file)
@@ -902,6 +902,7 @@ config TOUCHSCREEN_WM9713
 config TOUCHSCREEN_WM97XX_MAINSTONE
        tristate "WM97xx Mainstone/Palm accelerated touch"
        depends on TOUCHSCREEN_WM97XX && ARCH_PXA
+       depends on SND_PXA2XX_LIB_AC97
        help
          Say Y here for support for streaming mode with WM97xx touchscreens
          on Mainstone, Palm Tungsten T5, TX and LifeDrive systems.
@@ -914,6 +915,7 @@ config TOUCHSCREEN_WM97XX_MAINSTONE
 config TOUCHSCREEN_WM97XX_ZYLONITE
        tristate "Zylonite accelerated touch"
        depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE
+       depends on SND_PXA2XX_LIB_AC97
        select TOUCHSCREEN_WM9713
        help
          Say Y here for support for streaming mode with the touchscreen
index f8564b3..c39f497 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
-#include <linux/wm97xx.h>
 #include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/wm97xx.h>
 
-#include <mach/regs-ac97.h>
+#include <sound/pxa2xx-lib.h>
 
 #include <asm/mach-types.h>
 
@@ -41,24 +42,23 @@ struct continuous {
 #define WM_READS(sp) ((sp / HZ) + 1)
 
 static const struct continuous cinfo[] = {
-       {WM9705_ID2, 0, WM_READS(94), 94},
-       {WM9705_ID2, 1, WM_READS(188), 188},
-       {WM9705_ID2, 2, WM_READS(375), 375},
-       {WM9705_ID2, 3, WM_READS(750), 750},
-       {WM9712_ID2, 0, WM_READS(94), 94},
-       {WM9712_ID2, 1, WM_READS(188), 188},
-       {WM9712_ID2, 2, WM_READS(375), 375},
-       {WM9712_ID2, 3, WM_READS(750), 750},
-       {WM9713_ID2, 0, WM_READS(94), 94},
-       {WM9713_ID2, 1, WM_READS(120), 120},
-       {WM9713_ID2, 2, WM_READS(154), 154},
-       {WM9713_ID2, 3, WM_READS(188), 188},
+       { WM9705_ID2, 0, WM_READS(94),  94  },
+       { WM9705_ID2, 1, WM_READS(188), 188 },
+       { WM9705_ID2, 2, WM_READS(375), 375 },
+       { WM9705_ID2, 3, WM_READS(750), 750 },
+       { WM9712_ID2, 0, WM_READS(94),  94  },
+       { WM9712_ID2, 1, WM_READS(188), 188 },
+       { WM9712_ID2, 2, WM_READS(375), 375 },
+       { WM9712_ID2, 3, WM_READS(750), 750 },
+       { WM9713_ID2, 0, WM_READS(94),  94  },
+       { WM9713_ID2, 1, WM_READS(120), 120 },
+       { WM9713_ID2, 2, WM_READS(154), 154 },
+       { WM9713_ID2, 3, WM_READS(188), 188 },
 };
 
 /* continuous speed index */
 static int sp_idx;
-static u16 last, tries;
-static int irq;
+static struct gpio_desc *gpiod_irq;
 
 /*
  * Pen sampling frequency (Hz) in continuous mode.
@@ -97,44 +97,40 @@ MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
 
 
 /* flush AC97 slot 5 FIFO on pxa machines */
-#ifdef CONFIG_PXA27x
-static void wm97xx_acc_pen_up(struct wm97xx *wm)
-{
-       schedule_timeout_uninterruptible(1);
-
-       while (MISR & (1 << 2))
-               MODR;
-}
-#else
 static void wm97xx_acc_pen_up(struct wm97xx *wm)
 {
        unsigned int count;
 
-       schedule_timeout_uninterruptible(1);
+       msleep(1);
 
-       for (count = 0; count < 16; count++)
-               MODR;
+       if (cpu_is_pxa27x()) {
+               while (pxa2xx_ac97_read_misr() & (1 << 2))
+                       pxa2xx_ac97_read_modr();
+       } else if (cpu_is_pxa3xx()) {
+               for (count = 0; count < 16; count++)
+                       pxa2xx_ac97_read_modr();
+       }
 }
-#endif
 
 static int wm97xx_acc_pen_down(struct wm97xx *wm)
 {
        u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
        int reads = 0;
+       static u16 last, tries;
 
        /* When the AC97 queue has been drained we need to allow time
         * to buffer up samples otherwise we end up spinning polling
         * for samples.  The controller can't have a suitably low
         * threshold set to use the notifications it gives.
         */
-       schedule_timeout_uninterruptible(1);
+       msleep(1);
 
        if (tries > 5) {
                tries = 0;
                return RC_PENUP;
        }
 
-       x = MODR;
+       x = pxa2xx_ac97_read_modr();
        if (x == last) {
                tries++;
                return RC_AGAIN;
@@ -142,10 +138,10 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
        last = x;
        do {
                if (reads)
-                       x = MODR;
-               y = MODR;
+                       x = pxa2xx_ac97_read_modr();
+               y = pxa2xx_ac97_read_modr();
                if (pressure)
-                       p = MODR;
+                       p = pxa2xx_ac97_read_modr();
 
                dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n",
                        x, y, p);
@@ -194,28 +190,23 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
        /* IRQ driven touchscreen is used on Palm hardware */
        if (machine_is_palmt5() || machine_is_palmtx() || machine_is_palmld()) {
                pen_int = 1;
-               irq = 27;
                /* There is some obscure mutant of WM9712 interbred with WM9713
                 * used on Palm HW */
                wm->variant = WM97xx_WM1613;
-       } else if (machine_is_mainstone() && pen_int)
-               irq = 4;
-
-       if (irq) {
-               ret = gpio_request(irq, "Touchscreen IRQ");
-               if (ret)
-                       goto out;
-
-               ret = gpio_direction_input(irq);
-               if (ret) {
-                       gpio_free(irq);
-                       goto out;
-               }
+       } else if (machine_is_zylonite()) {
+               pen_int = 1;
+       }
 
-               wm->pen_irq = gpio_to_irq(irq);
+       if (pen_int) {
+               gpiod_irq = gpiod_get(wm->dev, "touch", GPIOD_IN);
+               if (IS_ERR(gpiod_irq))
+                       pen_int = 0;
+       }
+
+       if (pen_int) {
+               wm->pen_irq = gpiod_to_irq(gpiod_irq);
                irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
-       } else /* pen irq not supported */
-               pen_int = 0;
+       }
 
        /* codec specific irq config */
        if (pen_int) {
@@ -242,7 +233,6 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
                }
        }
 
-out:
        return ret;
 }
 
@@ -250,28 +240,19 @@ static void wm97xx_acc_shutdown(struct wm97xx *wm)
 {
        /* codec specific deconfig */
        if (pen_int) {
-               if (irq)
-                       gpio_free(irq);
+               if (gpiod_irq)
+                       gpiod_put(gpiod_irq);
                wm->pen_irq = 0;
        }
 }
 
-static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
-{
-       if (enable)
-               enable_irq(wm->pen_irq);
-       else
-               disable_irq_nosync(wm->pen_irq);
-}
-
 static struct wm97xx_mach_ops mainstone_mach_ops = {
-       .acc_enabled = 1,
-       .acc_pen_up = wm97xx_acc_pen_up,
-       .acc_pen_down = wm97xx_acc_pen_down,
-       .acc_startup = wm97xx_acc_startup,
-       .acc_shutdown = wm97xx_acc_shutdown,
-       .irq_enable = wm97xx_irq_enable,
-       .irq_gpio = WM97XX_GPIO_2,
+       .acc_enabled    = 1,
+       .acc_pen_up     = wm97xx_acc_pen_up,
+       .acc_pen_down   = wm97xx_acc_pen_down,
+       .acc_startup    = wm97xx_acc_startup,
+       .acc_shutdown   = wm97xx_acc_shutdown,
+       .irq_gpio       = WM97XX_GPIO_2,
 };
 
 static int mainstone_wm97xx_probe(struct platform_device *pdev)
@@ -286,14 +267,15 @@ static int mainstone_wm97xx_remove(struct platform_device *pdev)
        struct wm97xx *wm = platform_get_drvdata(pdev);
 
        wm97xx_unregister_mach_ops(wm);
+
        return 0;
 }
 
 static struct platform_driver mainstone_wm97xx_driver = {
-       .probe = mainstone_wm97xx_probe,
-       .remove = mainstone_wm97xx_remove,
-       .driver = {
-               .name = "wm97xx-touch",
+       .probe  = mainstone_wm97xx_probe,
+       .remove = mainstone_wm97xx_remove,
+       .driver = {
+               .name   = "wm97xx-touch",
        },
 };
 module_platform_driver(mainstone_wm97xx_driver);
index 1b58611..2757c77 100644 (file)
@@ -285,11 +285,12 @@ void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode)
 EXPORT_SYMBOL_GPL(wm97xx_set_suspend_mode);
 
 /*
- * Handle a pen down interrupt.
+ * Codec PENDOWN irq handler
+ *
  */
-static void wm97xx_pen_irq_worker(struct work_struct *work)
+static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id)
 {
-       struct wm97xx *wm = container_of(work, struct wm97xx, pen_event_work);
+       struct wm97xx *wm = dev_id;
        int pen_was_down = wm->pen_is_down;
 
        /* do we need to enable the touch panel reader */
@@ -343,27 +344,6 @@ static void wm97xx_pen_irq_worker(struct work_struct *work)
        if (!wm->pen_is_down && wm->mach_ops->acc_enabled)
                wm->mach_ops->acc_pen_up(wm);
 
-       wm->mach_ops->irq_enable(wm, 1);
-}
-
-/*
- * Codec PENDOWN irq handler
- *
- * We have to disable the codec interrupt in the handler because it
- * can take up to 1ms to clear the interrupt source. We schedule a task
- * in a work queue to do the actual interaction with the chip.  The
- * interrupt is then enabled again in the slow handler when the source
- * has been cleared.
- */
-static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id)
-{
-       struct wm97xx *wm = dev_id;
-
-       if (!work_pending(&wm->pen_event_work)) {
-               wm->mach_ops->irq_enable(wm, 0);
-               queue_work(wm->ts_workq, &wm->pen_event_work);
-       }
-
        return IRQ_HANDLED;
 }
 
@@ -374,12 +354,9 @@ static int wm97xx_init_pen_irq(struct wm97xx *wm)
 {
        u16 reg;
 
-       /* If an interrupt is supplied an IRQ enable operation must also be
-        * provided. */
-       BUG_ON(!wm->mach_ops->irq_enable);
-
-       if (request_irq(wm->pen_irq, wm97xx_pen_interrupt, IRQF_SHARED,
-                       "wm97xx-pen", wm)) {
+       if (request_threaded_irq(wm->pen_irq, NULL, wm97xx_pen_interrupt,
+                                IRQF_SHARED | IRQF_ONESHOT,
+                                "wm97xx-pen", wm)) {
                dev_err(wm->dev,
                        "Failed to register pen down interrupt, polling");
                wm->pen_irq = 0;
@@ -509,7 +486,6 @@ static int wm97xx_ts_input_open(struct input_dev *idev)
        wm->codec->dig_enable(wm, 1);
 
        INIT_DELAYED_WORK(&wm->ts_reader, wm97xx_ts_reader);
-       INIT_WORK(&wm->pen_event_work, wm97xx_pen_irq_worker);
 
        wm->ts_reader_min_interval = HZ >= 100 ? HZ / 100 : 1;
        if (wm->ts_reader_min_interval < 1)
@@ -560,10 +536,6 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
 
        wm->pen_is_down = 0;
 
-       /* Balance out interrupt disables/enables */
-       if (cancel_work_sync(&wm->pen_event_work))
-               wm->mach_ops->irq_enable(wm, 1);
-
        /* ts_reader rearms itself so we need to explicitly stop it
         * before we destroy the workqueue.
         */
index 0f4ac7f..a70fe4a 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/soc/pxa/cpu.h>
 #include <linux/wm97xx.h>
 
-#include <mach/hardware.h>
-#include <mach/mfp.h>
-#include <mach/regs-ac97.h>
+#include <sound/pxa2xx-lib.h>
 
 struct continuous {
        u16 id;    /* codec id */
@@ -80,7 +79,7 @@ static void wm97xx_acc_pen_up(struct wm97xx *wm)
        msleep(1);
 
        for (i = 0; i < 16; i++)
-               MODR;
+               pxa2xx_ac97_read_modr();
 }
 
 static int wm97xx_acc_pen_down(struct wm97xx *wm)
@@ -101,7 +100,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
                return RC_PENUP;
        }
 
-       x = MODR;
+       x = pxa2xx_ac97_read_modr();
        if (x == last) {
                tries++;
                return RC_AGAIN;
@@ -109,10 +108,10 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
        last = x;
        do {
                if (reads)
-                       x = MODR;
-               y = MODR;
+                       x = pxa2xx_ac97_read_modr();
+               y = pxa2xx_ac97_read_modr();
                if (pressure)
-                       p = MODR;
+                       p = pxa2xx_ac97_read_modr();
 
                dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n",
                        x, y, p);
@@ -161,34 +160,28 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
        return 0;
 }
 
-static void wm97xx_irq_enable(struct wm97xx *wm, int enable)
-{
-       if (enable)
-               enable_irq(wm->pen_irq);
-       else
-               disable_irq_nosync(wm->pen_irq);
-}
-
 static struct wm97xx_mach_ops zylonite_mach_ops = {
        .acc_enabled    = 1,
        .acc_pen_up     = wm97xx_acc_pen_up,
        .acc_pen_down   = wm97xx_acc_pen_down,
        .acc_startup    = wm97xx_acc_startup,
-       .irq_enable     = wm97xx_irq_enable,
        .irq_gpio       = WM97XX_GPIO_2,
 };
 
 static int zylonite_wm97xx_probe(struct platform_device *pdev)
 {
        struct wm97xx *wm = platform_get_drvdata(pdev);
-       int gpio_touch_irq;
-
-       if (cpu_is_pxa320())
-               gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO15);
-       else
-               gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
+       struct gpio_desc *gpio_touch_irq;
+       int err;
+
+       gpio_touch_irq = devm_gpiod_get(&pdev->dev, "touch", GPIOD_IN);
+       err = PTR_ERR_OR_ZERO(gpio_touch_irq);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot get irq gpio: %d\n", err);
+               return err;
+       }
 
-       wm->pen_irq = gpio_to_irq(gpio_touch_irq);
+       wm->pen_irq = gpiod_to_irq(gpio_touch_irq);
        irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
 
        wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
index 91353e6..22adff5 100644 (file)
@@ -110,6 +110,15 @@ config INTERCONNECT_QCOM_SC8180X
          This is a driver for the Qualcomm Network-on-Chip on sc8180x-based
          platforms.
 
+config INTERCONNECT_QCOM_SC8280XP
+       tristate "Qualcomm SC8280XP interconnect driver"
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+         This is a driver for the Qualcomm Network-on-Chip on SC8280XP-based
+         platforms.
+
 config INTERCONNECT_QCOM_SDM660
        tristate "Qualcomm SDM660 interconnect driver"
        depends on INTERCONNECT_QCOM
@@ -137,6 +146,15 @@ config INTERCONNECT_QCOM_SDX55
          This is a driver for the Qualcomm Network-on-Chip on sdx55-based
          platforms.
 
+config INTERCONNECT_QCOM_SDX65
+       tristate "Qualcomm SDX65 interconnect driver"
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+         This is a driver for the Qualcomm Network-on-Chip on sdx65-based
+         platforms.
+
 config INTERCONNECT_QCOM_SM8150
        tristate "Qualcomm SM8150 interconnect driver"
        depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
index ceae9bb..8d1fe9d 100644 (file)
@@ -12,9 +12,11 @@ icc-rpmh-obj                         := icc-rpmh.o
 qnoc-sc7180-objs                       := sc7180.o
 qnoc-sc7280-objs                        := sc7280.o
 qnoc-sc8180x-objs                      := sc8180x.o
+qnoc-sc8280xp-objs                     := sc8280xp.o
 qnoc-sdm660-objs                       := sdm660.o
 qnoc-sdm845-objs                       := sdm845.o
 qnoc-sdx55-objs                                := sdx55.o
+qnoc-sdx65-objs                                := sdx65.o
 qnoc-sm8150-objs                       := sm8150.o
 qnoc-sm8250-objs                       := sm8250.o
 qnoc-sm8350-objs                       := sm8350.o
@@ -33,9 +35,11 @@ obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SC7280) += qnoc-sc7280.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SC8180X) += qnoc-sc8180x.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SC8280XP) += qnoc-sc8280xp.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
index 34125e8..fb01319 100644 (file)
@@ -274,20 +274,19 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
        do_div(rate, qn->buswidth);
        rate = min_t(u64, rate, LONG_MAX);
 
-       if (qn->rate == rate)
-               return 0;
-
        for (i = 0; i < qp->num_clks; i++) {
+               if (qp->bus_clk_rate[i] == rate)
+                       continue;
+
                ret = clk_set_rate(qp->bus_clks[i].clk, rate);
                if (ret) {
                        pr_err("%s clk_set_rate error: %d\n",
                               qp->bus_clks[i].id, ret);
                        return ret;
                }
+               qp->bus_clk_rate[i] = rate;
        }
 
-       qn->rate = rate;
-
        return 0;
 }
 
@@ -301,7 +300,7 @@ int qnoc_probe(struct platform_device *pdev)
        const struct qcom_icc_desc *desc;
        struct icc_onecell_data *data;
        struct icc_provider *provider;
-       struct qcom_icc_node **qnodes;
+       struct qcom_icc_node * const *qnodes;
        struct qcom_icc_provider *qp;
        struct icc_node *node;
        size_t num_nodes, i;
@@ -332,6 +331,11 @@ int qnoc_probe(struct platform_device *pdev)
        if (!qp)
                return -ENOMEM;
 
+       qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
+                                       GFP_KERNEL);
+       if (!qp->bus_clk_rate)
+               return -ENOMEM;
+
        data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
                            GFP_KERNEL);
        if (!data)
index 26dad00..ebee900 100644 (file)
@@ -26,6 +26,7 @@ enum qcom_icc_type {
  * @type: the ICC provider type
  * @qos_offset: offset to QoS registers
  * @regmap: regmap for QoS registers read/write access
+ * @bus_clk_rate: bus clock rate in Hz
  */
 struct qcom_icc_provider {
        struct icc_provider provider;
@@ -33,6 +34,7 @@ struct qcom_icc_provider {
        enum qcom_icc_type type;
        struct regmap *regmap;
        unsigned int qos_offset;
+       u64 *bus_clk_rate;
        struct clk_bulk_data bus_clks[];
 };
 
@@ -66,7 +68,6 @@ struct qcom_icc_qos {
  * @mas_rpm_id:        RPM id for devices that are bus masters
  * @slv_rpm_id:        RPM id for devices that are bus slaves
  * @qos: NoC QoS setting parameters
- * @rate: current bus clock rate in Hz
  */
 struct qcom_icc_node {
        unsigned char *name;
@@ -77,11 +78,10 @@ struct qcom_icc_node {
        int mas_rpm_id;
        int slv_rpm_id;
        struct qcom_icc_qos qos;
-       u64 rate;
 };
 
 struct qcom_icc_desc {
-       struct qcom_icc_node **nodes;
+       struct qcom_icc_node * const *nodes;
        size_t num_nodes;
        const char * const *clocks;
        size_t num_clocks;
index 2c8e125..3c40076 100644 (file)
@@ -189,7 +189,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct icc_onecell_data *data;
        struct icc_provider *provider;
-       struct qcom_icc_node **qnodes, *qn;
+       struct qcom_icc_node * const *qnodes, *qn;
        struct qcom_icc_provider *qp;
        struct icc_node *node;
        size_t num_nodes, i, j;
index 4bfc060..d299294 100644 (file)
@@ -22,7 +22,7 @@
 struct qcom_icc_provider {
        struct icc_provider provider;
        struct device *dev;
-       struct qcom_icc_bcm **bcms;
+       struct qcom_icc_bcm * const *bcms;
        size_t num_bcms;
        struct bcm_voter *voter;
 };
@@ -112,9 +112,9 @@ struct qcom_icc_fabric {
 };
 
 struct qcom_icc_desc {
-       struct qcom_icc_node **nodes;
+       struct qcom_icc_node * const *nodes;
        size_t num_nodes;
-       struct qcom_icc_bcm **bcms;
+       struct qcom_icc_bcm * const *bcms;
        size_t num_bcms;
 };
 
index 2f397a7..5c4ba2f 100644 (file)
@@ -1191,7 +1191,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = {
        .links = snoc_pcnoc_slv_links,
 };
 
-static struct qcom_icc_node *msm8916_snoc_nodes[] = {
+static struct qcom_icc_node * const msm8916_snoc_nodes[] = {
        [BIMC_SNOC_SLV] = &bimc_snoc_slv,
        [MASTER_JPEG] = &mas_jpeg,
        [MASTER_MDP_PORT0] = &mas_mdp,
@@ -1228,7 +1228,7 @@ static const struct regmap_config msm8916_snoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc msm8916_snoc = {
+static const struct qcom_icc_desc msm8916_snoc = {
        .type = QCOM_ICC_NOC,
        .nodes = msm8916_snoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
@@ -1236,7 +1236,7 @@ static struct qcom_icc_desc msm8916_snoc = {
        .qos_offset = 0x7000,
 };
 
-static struct qcom_icc_node *msm8916_bimc_nodes[] = {
+static struct qcom_icc_node * const msm8916_bimc_nodes[] = {
        [BIMC_SNOC_MAS] = &bimc_snoc_mas,
        [MASTER_AMPSS_M0] = &mas_apss,
        [MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -1256,7 +1256,7 @@ static const struct regmap_config msm8916_bimc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc msm8916_bimc = {
+static const struct qcom_icc_desc msm8916_bimc = {
        .type = QCOM_ICC_BIMC,
        .nodes = msm8916_bimc_nodes,
        .num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
@@ -1264,7 +1264,7 @@ static struct qcom_icc_desc msm8916_bimc = {
        .qos_offset = 0x8000,
 };
 
-static struct qcom_icc_node *msm8916_pcnoc_nodes[] = {
+static struct qcom_icc_node * const msm8916_pcnoc_nodes[] = {
        [MASTER_BLSP_1] = &mas_blsp_1,
        [MASTER_DEHR] = &mas_dehr,
        [MASTER_LPASS] = &mas_audio,
@@ -1325,7 +1325,7 @@ static const struct regmap_config msm8916_pcnoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc msm8916_pcnoc = {
+static const struct qcom_icc_desc msm8916_pcnoc = {
        .type = QCOM_ICC_NOC,
        .nodes = msm8916_pcnoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
index f9c2d7d..63b31de 100644 (file)
@@ -1251,7 +1251,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = {
        .links = snoc_pcnoc_slv_links,
 };
 
-static struct qcom_icc_node *msm8939_snoc_nodes[] = {
+static struct qcom_icc_node * const msm8939_snoc_nodes[] = {
        [BIMC_SNOC_SLV] = &bimc_snoc_slv,
        [MASTER_QDSS_BAM] = &mas_qdss_bam,
        [MASTER_QDSS_ETR] = &mas_qdss_etr,
@@ -1281,7 +1281,7 @@ static const struct regmap_config msm8939_snoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc msm8939_snoc = {
+static const struct qcom_icc_desc msm8939_snoc = {
        .type = QCOM_ICC_NOC,
        .nodes = msm8939_snoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
@@ -1289,7 +1289,7 @@ static struct qcom_icc_desc msm8939_snoc = {
        .qos_offset = 0x7000,
 };
 
-static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
+static struct qcom_icc_node * const msm8939_snoc_mm_nodes[] = {
        [MASTER_VIDEO_P0] = &mas_video,
        [MASTER_JPEG] = &mas_jpeg,
        [MASTER_VFE] = &mas_vfe,
@@ -1301,7 +1301,7 @@ static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
        [SNOC_MM_INT_2] = &mm_int_2,
 };
 
-static struct qcom_icc_desc msm8939_snoc_mm = {
+static const struct qcom_icc_desc msm8939_snoc_mm = {
        .type = QCOM_ICC_NOC,
        .nodes = msm8939_snoc_mm_nodes,
        .num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
@@ -1309,7 +1309,7 @@ static struct qcom_icc_desc msm8939_snoc_mm = {
        .qos_offset = 0x7000,
 };
 
-static struct qcom_icc_node *msm8939_bimc_nodes[] = {
+static struct qcom_icc_node * const msm8939_bimc_nodes[] = {
        [BIMC_SNOC_MAS] = &bimc_snoc_mas,
        [MASTER_AMPSS_M0] = &mas_apss,
        [MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -1329,7 +1329,7 @@ static const struct regmap_config msm8939_bimc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc msm8939_bimc = {
+static const struct qcom_icc_desc msm8939_bimc = {
        .type = QCOM_ICC_BIMC,
        .nodes = msm8939_bimc_nodes,
        .num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
@@ -1337,7 +1337,7 @@ static struct qcom_icc_desc msm8939_bimc = {
        .qos_offset = 0x8000,
 };
 
-static struct qcom_icc_node *msm8939_pcnoc_nodes[] = {
+static struct qcom_icc_node * const msm8939_pcnoc_nodes[] = {
        [MASTER_BLSP_1] = &mas_blsp_1,
        [MASTER_DEHR] = &mas_dehr,
        [MASTER_LPASS] = &mas_audio,
@@ -1400,7 +1400,7 @@ static const struct regmap_config msm8939_pcnoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc msm8939_pcnoc = {
+static const struct qcom_icc_desc msm8939_pcnoc = {
        .type = QCOM_ICC_NOC,
        .nodes = msm8939_pcnoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
index da68ce3..6fa0ad9 100644 (file)
@@ -220,7 +220,7 @@ struct msm8974_icc_node {
 };
 
 struct msm8974_icc_desc {
-       struct msm8974_icc_node **nodes;
+       struct msm8974_icc_node * const *nodes;
        size_t num_nodes;
 };
 
@@ -244,7 +244,7 @@ DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC,
 DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
 DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
 
-static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_bimc_nodes[] = {
        [BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
        [BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
        [BIMC_MAS_MSS_PROC] = &mas_mss_proc,
@@ -254,7 +254,7 @@ static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
        [BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
 };
 
-static struct msm8974_icc_desc msm8974_bimc = {
+static const struct msm8974_icc_desc msm8974_bimc = {
        .nodes = msm8974_bimc_nodes,
        .num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
 };
@@ -297,7 +297,7 @@ DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
 DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
 DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
 
-static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_cnoc_nodes[] = {
        [CNOC_MAS_RPM_INST] = &mas_rpm_inst,
        [CNOC_MAS_RPM_DATA] = &mas_rpm_data,
        [CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
@@ -337,7 +337,7 @@ static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
        [CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
 };
 
-static struct msm8974_icc_desc msm8974_cnoc = {
+static const struct msm8974_icc_desc msm8974_cnoc = {
        .nodes = msm8974_cnoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
 };
@@ -365,7 +365,7 @@ DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
 DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
 DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
 
-static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_mnoc_nodes[] = {
        [MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
        [MNOC_MAS_JPEG] = &mas_jpeg,
        [MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
@@ -390,7 +390,7 @@ static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
        [MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
 };
 
-static struct msm8974_icc_desc msm8974_mnoc = {
+static const struct msm8974_icc_desc msm8974_mnoc = {
        .nodes = msm8974_mnoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
 };
@@ -410,7 +410,7 @@ DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MS
 DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
 DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
 
-static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_onoc_nodes[] = {
        [OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
        [OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
        [OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
@@ -425,7 +425,7 @@ static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
        [OCMEM_SLV_OCMEM] = &slv_ocmem,
 };
 
-static struct msm8974_icc_desc msm8974_onoc = {
+static const struct msm8974_icc_desc msm8974_onoc = {
        .nodes = msm8974_onoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
 };
@@ -458,7 +458,7 @@ DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
 DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
 DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
 
-static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_pnoc_nodes[] = {
        [PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
        [PNOC_MAS_SDCC_1] = &mas_sdcc_1,
        [PNOC_MAS_SDCC_3] = &mas_sdcc_3,
@@ -488,7 +488,7 @@ static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
        [PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
 };
 
-static struct msm8974_icc_desc msm8974_pnoc = {
+static const struct msm8974_icc_desc msm8974_pnoc = {
        .nodes = msm8974_pnoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
 };
@@ -518,7 +518,7 @@ DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
 DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
 DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
 
-static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
+static struct msm8974_icc_node * const msm8974_snoc_nodes[] = {
        [SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
        [SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
        [SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
@@ -545,7 +545,7 @@ static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
        [SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
 };
 
-static struct msm8974_icc_desc msm8974_snoc = {
+static const struct msm8974_icc_desc msm8974_snoc = {
        .nodes = msm8974_snoc_nodes,
        .num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
 };
@@ -648,7 +648,7 @@ static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
 static int msm8974_icc_probe(struct platform_device *pdev)
 {
        const struct msm8974_icc_desc *desc;
-       struct msm8974_icc_node **qnodes;
+       struct msm8974_icc_node * const *qnodes;
        struct msm8974_icc_provider *qp;
        struct device *dev = &pdev->dev;
        struct icc_onecell_data *data;
index 499e11f..c2903ae 100644 (file)
@@ -1796,7 +1796,7 @@ static struct qcom_icc_node slv_srvc_snoc = {
        .qos.qos_mode = NOC_QOS_MODE_INVALID
 };
 
-static struct qcom_icc_node *a0noc_nodes[] = {
+static struct qcom_icc_node * const a0noc_nodes[] = {
        [MASTER_PCIE_0] = &mas_pcie_0,
        [MASTER_PCIE_1] = &mas_pcie_1,
        [MASTER_PCIE_2] = &mas_pcie_2
@@ -1820,7 +1820,7 @@ static const struct qcom_icc_desc msm8996_a0noc = {
        .regmap_cfg = &msm8996_a0noc_regmap_config
 };
 
-static struct qcom_icc_node *a1noc_nodes[] = {
+static struct qcom_icc_node * const a1noc_nodes[] = {
        [MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc,
        [MASTER_CRYPTO_CORE0] = &mas_crypto_c0,
        [MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc
@@ -1841,7 +1841,7 @@ static const struct qcom_icc_desc msm8996_a1noc = {
        .regmap_cfg = &msm8996_a1noc_regmap_config
 };
 
-static struct qcom_icc_node *a2noc_nodes[] = {
+static struct qcom_icc_node * const a2noc_nodes[] = {
        [MASTER_USB3] = &mas_usb3,
        [MASTER_IPA] = &mas_ipa,
        [MASTER_UFS] = &mas_ufs
@@ -1862,7 +1862,7 @@ static const struct qcom_icc_desc msm8996_a2noc = {
        .regmap_cfg = &msm8996_a2noc_regmap_config
 };
 
-static struct qcom_icc_node *bimc_nodes[] = {
+static struct qcom_icc_node * const bimc_nodes[] = {
        [MASTER_AMPSS_M0] = &mas_apps_proc,
        [MASTER_GRAPHICS_3D] = &mas_oxili,
        [MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
@@ -1888,7 +1888,7 @@ static const struct qcom_icc_desc msm8996_bimc = {
        .regmap_cfg = &msm8996_bimc_regmap_config
 };
 
-static struct qcom_icc_node *cnoc_nodes[] = {
+static struct qcom_icc_node * const cnoc_nodes[] = {
        [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
        [MASTER_QDSS_DAP] = &mas_qdss_dap,
        [SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc,
@@ -1946,7 +1946,7 @@ static const struct qcom_icc_desc msm8996_cnoc = {
        .regmap_cfg = &msm8996_cnoc_regmap_config
 };
 
-static struct qcom_icc_node *mnoc_nodes[] = {
+static struct qcom_icc_node * const mnoc_nodes[] = {
        [MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
        [MASTER_CPP] = &mas_cpp,
        [MASTER_JPEG] = &mas_jpeg,
@@ -2001,7 +2001,7 @@ static const struct qcom_icc_desc msm8996_mnoc = {
        .regmap_cfg = &msm8996_mnoc_regmap_config
 };
 
-static struct qcom_icc_node *pnoc_nodes[] = {
+static struct qcom_icc_node * const pnoc_nodes[] = {
        [MASTER_SNOC_PNOC] = &mas_snoc_pnoc,
        [MASTER_SDCC_1] = &mas_sdcc_1,
        [MASTER_SDCC_2] = &mas_sdcc_2,
@@ -2037,7 +2037,7 @@ static const struct qcom_icc_desc msm8996_pnoc = {
        .regmap_cfg = &msm8996_pnoc_regmap_config
 };
 
-static struct qcom_icc_node *snoc_nodes[] = {
+static struct qcom_icc_node * const snoc_nodes[] = {
        [MASTER_HMSS] = &mas_hmss,
        [MASTER_QDSS_BAM] = &mas_qdss_bam,
        [MASTER_SNOC_CFG] = &mas_snoc_cfg,
index eec1309..4198656 100644 (file)
@@ -67,7 +67,7 @@ struct qcom_osm_l3_node {
 };
 
 struct qcom_osm_l3_desc {
-       const struct qcom_osm_l3_node **nodes;
+       const struct qcom_osm_l3_node * const *nodes;
        size_t num_nodes;
        unsigned int lut_row_size;
        unsigned int reg_freq_lut;
@@ -86,7 +86,7 @@ struct qcom_osm_l3_desc {
 DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3);
 DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16);
 
-static const struct qcom_osm_l3_node *sdm845_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sdm845_osm_l3_nodes[] = {
        [MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3,
        [SLAVE_OSM_L3] = &sdm845_osm_l3,
 };
@@ -102,7 +102,7 @@ static const struct qcom_osm_l3_desc sdm845_icc_osm_l3 = {
 DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3);
 DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16);
 
-static const struct qcom_osm_l3_node *sc7180_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc7180_osm_l3_nodes[] = {
        [MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3,
        [SLAVE_OSM_L3] = &sc7180_osm_l3,
 };
@@ -118,7 +118,7 @@ static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = {
 DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3);
 DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32);
 
-static const struct qcom_osm_l3_node *sc7280_epss_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc7280_epss_l3_nodes[] = {
        [MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3,
        [SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3,
 };
@@ -134,7 +134,7 @@ static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = {
 DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3);
 DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32);
 
-static const struct qcom_osm_l3_node *sc8180x_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sc8180x_osm_l3_nodes[] = {
        [MASTER_OSM_L3_APPS] = &sc8180x_osm_apps_l3,
        [SLAVE_OSM_L3] = &sc8180x_osm_l3,
 };
@@ -150,7 +150,7 @@ static const struct qcom_osm_l3_desc sc8180x_icc_osm_l3 = {
 DEFINE_QNODE(sm8150_osm_apps_l3, SM8150_MASTER_OSM_L3_APPS, 32, SM8150_SLAVE_OSM_L3);
 DEFINE_QNODE(sm8150_osm_l3, SM8150_SLAVE_OSM_L3, 32);
 
-static const struct qcom_osm_l3_node *sm8150_osm_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sm8150_osm_l3_nodes[] = {
        [MASTER_OSM_L3_APPS] = &sm8150_osm_apps_l3,
        [SLAVE_OSM_L3] = &sm8150_osm_l3,
 };
@@ -166,7 +166,7 @@ static const struct qcom_osm_l3_desc sm8150_icc_osm_l3 = {
 DEFINE_QNODE(sm8250_epss_apps_l3, SM8250_MASTER_EPSS_L3_APPS, 32, SM8250_SLAVE_EPSS_L3);
 DEFINE_QNODE(sm8250_epss_l3, SM8250_SLAVE_EPSS_L3, 32);
 
-static const struct qcom_osm_l3_node *sm8250_epss_l3_nodes[] = {
+static const struct qcom_osm_l3_node * const sm8250_epss_l3_nodes[] = {
        [MASTER_EPSS_L3_APPS] = &sm8250_epss_apps_l3,
        [SLAVE_EPSS_L3_SHARED] = &sm8250_epss_l3,
 };
@@ -228,7 +228,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
        const struct qcom_osm_l3_desc *desc;
        struct icc_onecell_data *data;
        struct icc_provider *provider;
-       const struct qcom_osm_l3_node **qnodes;
+       const struct qcom_osm_l3_node * const *qnodes;
        struct icc_node *node;
        size_t num_nodes;
        struct clk *clk;
index 74404e0..0da612d 100644 (file)
@@ -1174,7 +1174,7 @@ static struct qcom_icc_node slv_anoc_snoc = {
 };
 
 /* NoC descriptors */
-static struct qcom_icc_node *qcm2290_bimc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_bimc_nodes[] = {
        [MASTER_APPSS_PROC] = &mas_appss_proc,
        [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
        [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
@@ -1193,7 +1193,7 @@ static const struct regmap_config qcm2290_bimc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc qcm2290_bimc = {
+static const struct qcom_icc_desc qcm2290_bimc = {
        .type = QCOM_ICC_BIMC,
        .nodes = qcm2290_bimc_nodes,
        .num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes),
@@ -1202,7 +1202,7 @@ static struct qcom_icc_desc qcm2290_bimc = {
        .qos_offset = 0x8000,
 };
 
-static struct qcom_icc_node *qcm2290_cnoc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_cnoc_nodes[] = {
        [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
        [MASTER_QDSS_DAP] = &mas_qdss_dap,
        [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
@@ -1248,14 +1248,14 @@ static const struct regmap_config qcm2290_cnoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc qcm2290_cnoc = {
+static const struct qcom_icc_desc qcm2290_cnoc = {
        .type = QCOM_ICC_NOC,
        .nodes = qcm2290_cnoc_nodes,
        .num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes),
        .regmap_cfg = &qcm2290_cnoc_regmap_config,
 };
 
-static struct qcom_icc_node *qcm2290_snoc_nodes[] = {
+static struct qcom_icc_node * const qcm2290_snoc_nodes[] = {
        [MASTER_CRYPTO_CORE0] = &mas_crypto_core0,
        [MASTER_SNOC_CFG] = &mas_snoc_cfg,
        [MASTER_TIC] = &mas_tic,
@@ -1289,7 +1289,7 @@ static const struct regmap_config qcm2290_snoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc qcm2290_snoc = {
+static const struct qcom_icc_desc qcm2290_snoc = {
        .type = QCOM_ICC_QNOC,
        .nodes = qcm2290_snoc_nodes,
        .num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes),
@@ -1298,25 +1298,25 @@ static struct qcom_icc_desc qcm2290_snoc = {
        .qos_offset = 0x15000,
 };
 
-static struct qcom_icc_node *qcm2290_qup_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_qup_virt_nodes[] = {
        [MASTER_QUP_CORE_0] = &mas_qup_core_0,
        [SLAVE_QUP_CORE_0] = &slv_qup_core_0
 };
 
-static struct qcom_icc_desc qcm2290_qup_virt = {
+static const struct qcom_icc_desc qcm2290_qup_virt = {
        .type = QCOM_ICC_QNOC,
        .nodes = qcm2290_qup_virt_nodes,
        .num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes),
 };
 
-static struct qcom_icc_node *qcm2290_mmnrt_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_mmnrt_virt_nodes[] = {
        [MASTER_CAMNOC_SF] = &mas_camnoc_sf,
        [MASTER_VIDEO_P0] = &mas_video_p0,
        [MASTER_VIDEO_PROC] = &mas_video_proc,
        [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
 };
 
-static struct qcom_icc_desc qcm2290_mmnrt_virt = {
+static const struct qcom_icc_desc qcm2290_mmnrt_virt = {
        .type = QCOM_ICC_QNOC,
        .nodes = qcm2290_mmnrt_virt_nodes,
        .num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes),
@@ -1324,13 +1324,13 @@ static struct qcom_icc_desc qcm2290_mmnrt_virt = {
        .qos_offset = 0x15000,
 };
 
-static struct qcom_icc_node *qcm2290_mmrt_virt_nodes[] = {
+static struct qcom_icc_node * const qcm2290_mmrt_virt_nodes[] = {
        [MASTER_CAMNOC_HF] = &mas_camnoc_hf,
        [MASTER_MDP0] = &mas_mdp0,
        [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
 };
 
-static struct qcom_icc_desc qcm2290_mmrt_virt = {
+static const struct qcom_icc_desc qcm2290_mmrt_virt = {
        .type = QCOM_ICC_QNOC,
        .nodes = qcm2290_mmrt_virt_nodes,
        .num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes),
index 416c8bf..fae1553 100644 (file)
@@ -974,7 +974,7 @@ static struct qcom_icc_node slv_lpass = {
        .slv_rpm_id = -1,
 };
 
-static struct qcom_icc_node *qcs404_bimc_nodes[] = {
+static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
        [MASTER_AMPSS_M0] = &mas_apps_proc,
        [MASTER_OXILI] = &mas_oxili,
        [MASTER_MDP_PORT0] = &mas_mdp,
@@ -984,12 +984,12 @@ static struct qcom_icc_node *qcs404_bimc_nodes[] = {
        [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
 };
 
-static struct qcom_icc_desc qcs404_bimc = {
+static const struct qcom_icc_desc qcs404_bimc = {
        .nodes = qcs404_bimc_nodes,
        .num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
 };
 
-static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
+static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
        [MASTER_SPDM] = &mas_spdm,
        [MASTER_BLSP_1] = &mas_blsp_1,
        [MASTER_BLSP_2] = &mas_blsp_2,
@@ -1038,12 +1038,12 @@ static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
        [SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
 };
 
-static struct qcom_icc_desc qcs404_pcnoc = {
+static const struct qcom_icc_desc qcs404_pcnoc = {
        .nodes = qcs404_pcnoc_nodes,
        .num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
 };
 
-static struct qcom_icc_node *qcs404_snoc_nodes[] = {
+static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
        [MASTER_QDSS_BAM] = &mas_qdss_bam,
        [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
        [MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc,
@@ -1066,7 +1066,7 @@ static struct qcom_icc_node *qcs404_snoc_nodes[] = {
        [SLAVE_LPASS] = &slv_lpass,
 };
 
-static struct qcom_icc_desc qcs404_snoc = {
+static const struct qcom_icc_desc qcs404_snoc = {
        .nodes = qcs404_snoc_nodes,
        .num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
 };
index 5f7c0f8..35cd448 100644 (file)
@@ -178,11 +178,11 @@ DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
 DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
 DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
 
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
        &bcm_cn1,
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
        [MASTER_QSPI] = &qhm_qspi,
        [MASTER_QUP_0] = &qhm_qup_0,
@@ -193,18 +193,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
 };
 
-static struct qcom_icc_desc sc7180_aggre1_noc = {
+static const struct qcom_icc_desc sc7180_aggre1_noc = {
        .nodes = aggre1_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
        .bcms = aggre1_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_ce0,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
        [MASTER_QUP_1] = &qhm_qup_1,
@@ -216,56 +216,56 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
 };
 
-static struct qcom_icc_desc sc7180_aggre2_noc = {
+static const struct qcom_icc_desc sc7180_aggre2_noc = {
        .nodes = aggre2_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
        .bcms = aggre2_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
 };
 
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
        &bcm_mm1,
 };
 
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
        [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
        [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
        [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
        [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
 };
 
-static struct qcom_icc_desc sc7180_camnoc_virt = {
+static const struct qcom_icc_desc sc7180_camnoc_virt = {
        .nodes = camnoc_virt_nodes,
        .num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
        .bcms = camnoc_virt_bcms,
        .num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
        &bcm_co0,
        &bcm_co2,
        &bcm_co3,
 };
 
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
        [MASTER_NPU] = &qnm_npu,
        [MASTER_NPU_PROC] = &qxm_npu_dsp,
        [SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
 };
 
-static struct qcom_icc_desc sc7180_compute_noc = {
+static const struct qcom_icc_desc sc7180_compute_noc = {
        .nodes = compute_noc_nodes,
        .num_nodes = ARRAY_SIZE(compute_noc_nodes),
        .bcms = compute_noc_bcms,
        .num_bcms = ARRAY_SIZE(compute_noc_bcms),
 };
 
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
        &bcm_cn0,
        &bcm_cn1,
 };
 
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
        [MASTER_SNOC_CNOC] = &qnm_snoc,
        [MASTER_QDSS_DAP] = &xm_qdss_dap,
        [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
@@ -321,32 +321,32 @@ static struct qcom_icc_node *config_noc_nodes[] = {
        [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
 };
 
-static struct qcom_icc_desc sc7180_config_noc = {
+static const struct qcom_icc_desc sc7180_config_noc = {
        .nodes = config_noc_nodes,
        .num_nodes = ARRAY_SIZE(config_noc_nodes),
        .bcms = config_noc_bcms,
        .num_bcms = ARRAY_SIZE(config_noc_bcms),
 };
 
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
        [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
        [SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
        [SLAVE_LLCC_CFG] = &qhs_llcc,
 };
 
-static struct qcom_icc_desc sc7180_dc_noc = {
+static const struct qcom_icc_desc sc7180_dc_noc = {
        .nodes = dc_noc_nodes,
        .num_nodes = ARRAY_SIZE(dc_noc_nodes),
 };
 
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh2,
        &bcm_sh3,
        &bcm_sh4,
 };
 
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
        [MASTER_APPSS_PROC] = &acm_apps0,
        [MASTER_SYS_TCU] = &acm_sys_tcu,
        [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
@@ -362,7 +362,7 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
        [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
 };
 
-static struct qcom_icc_desc sc7180_gem_noc = {
+static const struct qcom_icc_desc sc7180_gem_noc = {
        .nodes = gem_noc_nodes,
        .num_nodes = ARRAY_SIZE(gem_noc_nodes),
        .bcms = gem_noc_bcms,
@@ -374,25 +374,25 @@ static struct qcom_icc_bcm *mc_virt_bcms[] = {
        &bcm_mc0,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &llcc_mc,
        [SLAVE_EBI1] = &ebi,
 };
 
-static struct qcom_icc_desc sc7180_mc_virt = {
+static const struct qcom_icc_desc sc7180_mc_virt = {
        .nodes = mc_virt_nodes,
        .num_nodes = ARRAY_SIZE(mc_virt_nodes),
        .bcms = mc_virt_bcms,
        .num_bcms = ARRAY_SIZE(mc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm2,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
        [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
        [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -406,14 +406,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
 };
 
-static struct qcom_icc_desc sc7180_mmss_noc = {
+static const struct qcom_icc_desc sc7180_mmss_noc = {
        .nodes = mmss_noc_nodes,
        .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
        .bcms = mmss_noc_bcms,
        .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
 };
 
-static struct qcom_icc_node *npu_noc_nodes[] = {
+static struct qcom_icc_node * const npu_noc_nodes[] = {
        [MASTER_NPU_SYS] = &amm_npu_sys,
        [MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
        [SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
@@ -427,30 +427,30 @@ static struct qcom_icc_node *npu_noc_nodes[] = {
        [SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
 };
 
-static struct qcom_icc_desc sc7180_npu_noc = {
+static const struct qcom_icc_desc sc7180_npu_noc = {
        .nodes = npu_noc_nodes,
        .num_nodes = ARRAY_SIZE(npu_noc_nodes),
 };
 
-static struct qcom_icc_bcm *qup_virt_bcms[] = {
+static struct qcom_icc_bcm * const qup_virt_bcms[] = {
        &bcm_qup0,
 };
 
-static struct qcom_icc_node *qup_virt_nodes[] = {
+static struct qcom_icc_node * const qup_virt_nodes[] = {
        [MASTER_QUP_CORE_0] = &qup_core_master_1,
        [MASTER_QUP_CORE_1] = &qup_core_master_2,
        [SLAVE_QUP_CORE_0] = &qup_core_slave_1,
        [SLAVE_QUP_CORE_1] = &qup_core_slave_2,
 };
 
-static struct qcom_icc_desc sc7180_qup_virt = {
+static const  struct qcom_icc_desc sc7180_qup_virt = {
        .nodes = qup_virt_nodes,
        .num_nodes = ARRAY_SIZE(qup_virt_nodes),
        .bcms = qup_virt_bcms,
        .num_bcms = ARRAY_SIZE(qup_virt_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn1,
        &bcm_sn2,
@@ -461,7 +461,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
        &bcm_sn12,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
        [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
        [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
@@ -478,7 +478,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
        [SLAVE_TCU] = &xs_sys_tcu_cfg,
 };
 
-static struct qcom_icc_desc sc7180_system_noc = {
+static const struct qcom_icc_desc sc7180_system_noc = {
        .nodes = system_noc_nodes,
        .num_nodes = ARRAY_SIZE(system_noc_nodes),
        .bcms = system_noc_bcms,
index f8b34f6..971f538 100644 (file)
@@ -1476,13 +1476,13 @@ static struct qcom_icc_bcm bcm_sn14 = {
        .nodes = { &qns_pcie_mem_noc },
 };
 
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
        &bcm_sn5,
        &bcm_sn6,
        &bcm_sn14,
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_QSPI_0] = &qhm_qspi,
        [MASTER_QUP_0] = &qhm_qup0,
        [MASTER_QUP_1] = &qhm_qup1,
@@ -1500,18 +1500,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
 };
 
-static struct qcom_icc_desc sc7280_aggre1_noc = {
+static const struct qcom_icc_desc sc7280_aggre1_noc = {
        .nodes = aggre1_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
        .bcms = aggre1_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_ce0,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
        [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
        [MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath,
@@ -1522,38 +1522,38 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
 };
 
-static struct qcom_icc_desc sc7280_aggre2_noc = {
+static const struct qcom_icc_desc sc7280_aggre2_noc = {
        .nodes = aggre2_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
        .bcms = aggre2_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
 };
 
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
        &bcm_qup0,
        &bcm_qup1,
 };
 
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
        [MASTER_QUP_CORE_0] = &qup0_core_master,
        [MASTER_QUP_CORE_1] = &qup1_core_master,
        [SLAVE_QUP_CORE_0] = &qup0_core_slave,
        [SLAVE_QUP_CORE_1] = &qup1_core_slave,
 };
 
-static struct qcom_icc_desc sc7280_clk_virt = {
+static const struct qcom_icc_desc sc7280_clk_virt = {
        .nodes = clk_virt_nodes,
        .num_nodes = ARRAY_SIZE(clk_virt_nodes),
        .bcms = clk_virt_bcms,
        .num_bcms = ARRAY_SIZE(clk_virt_bcms),
 };
 
-static struct qcom_icc_bcm *cnoc2_bcms[] = {
+static struct qcom_icc_bcm * const cnoc2_bcms[] = {
        &bcm_cn1,
        &bcm_cn2,
 };
 
-static struct qcom_icc_node *cnoc2_nodes[] = {
+static struct qcom_icc_node * const cnoc2_nodes[] = {
        [MASTER_CNOC3_CNOC2] = &qnm_cnoc3_cnoc2,
        [MASTER_QDSS_DAP] = &xm_qdss_dap,
        [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
@@ -1603,21 +1603,21 @@ static struct qcom_icc_node *cnoc2_nodes[] = {
        [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
 };
 
-static struct qcom_icc_desc sc7280_cnoc2 = {
+static const struct qcom_icc_desc sc7280_cnoc2 = {
        .nodes = cnoc2_nodes,
        .num_nodes = ARRAY_SIZE(cnoc2_nodes),
        .bcms = cnoc2_bcms,
        .num_bcms = ARRAY_SIZE(cnoc2_bcms),
 };
 
-static struct qcom_icc_bcm *cnoc3_bcms[] = {
+static struct qcom_icc_bcm * const cnoc3_bcms[] = {
        &bcm_cn0,
        &bcm_cn1,
        &bcm_sn3,
        &bcm_sn4,
 };
 
-static struct qcom_icc_node *cnoc3_nodes[] = {
+static struct qcom_icc_node * const cnoc3_nodes[] = {
        [MASTER_CNOC2_CNOC3] = &qnm_cnoc2_cnoc3,
        [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
        [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
@@ -1635,37 +1635,37 @@ static struct qcom_icc_node *cnoc3_nodes[] = {
        [SLAVE_TCU] = &xs_sys_tcu_cfg,
 };
 
-static struct qcom_icc_desc sc7280_cnoc3 = {
+static const struct qcom_icc_desc sc7280_cnoc3 = {
        .nodes = cnoc3_nodes,
        .num_nodes = ARRAY_SIZE(cnoc3_nodes),
        .bcms = cnoc3_bcms,
        .num_bcms = ARRAY_SIZE(cnoc3_bcms),
 };
 
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
        [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
        [SLAVE_LLCC_CFG] = &qhs_llcc,
        [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
 };
 
-static struct qcom_icc_desc sc7280_dc_noc = {
+static const struct qcom_icc_desc sc7280_dc_noc = {
        .nodes = dc_noc_nodes,
        .num_nodes = ARRAY_SIZE(dc_noc_nodes),
        .bcms = dc_noc_bcms,
        .num_bcms = ARRAY_SIZE(dc_noc_bcms),
 };
 
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh2,
        &bcm_sh3,
        &bcm_sh4,
 };
 
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
        [MASTER_GPU_TCU] = &alm_gpu_tcu,
        [MASTER_SYS_TCU] = &alm_sys_tcu,
        [MASTER_APPSS_PROC] = &chm_apps,
@@ -1687,17 +1687,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
        [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
 };
 
-static struct qcom_icc_desc sc7280_gem_noc = {
+static const struct qcom_icc_desc sc7280_gem_noc = {
        .nodes = gem_noc_nodes,
        .num_nodes = ARRAY_SIZE(gem_noc_nodes),
        .bcms = gem_noc_bcms,
        .num_bcms = ARRAY_SIZE(gem_noc_bcms),
 };
 
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
        [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
        [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
        [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
@@ -1707,38 +1707,38 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
        [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
 };
 
-static struct qcom_icc_desc sc7280_lpass_ag_noc = {
+static const struct qcom_icc_desc sc7280_lpass_ag_noc = {
        .nodes = lpass_ag_noc_nodes,
        .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
        .bcms = lpass_ag_noc_bcms,
        .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
 };
 
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
        &bcm_acv,
        &bcm_mc0,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &llcc_mc,
        [SLAVE_EBI1] = &ebi,
 };
 
-static struct qcom_icc_desc sc7280_mc_virt = {
+static const struct qcom_icc_desc sc7280_mc_virt = {
        .nodes = mc_virt_nodes,
        .num_nodes = ARRAY_SIZE(mc_virt_nodes),
        .bcms = mc_virt_bcms,
        .num_bcms = ARRAY_SIZE(mc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm4,
        &bcm_mm5,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
        [MASTER_VIDEO_P0] = &qnm_video0,
        [MASTER_VIDEO_PROC] = &qnm_video_cpu,
@@ -1751,40 +1751,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
 };
 
-static struct qcom_icc_desc sc7280_mmss_noc = {
+static const struct qcom_icc_desc sc7280_mmss_noc = {
        .nodes = mmss_noc_nodes,
        .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
        .bcms = mmss_noc_bcms,
        .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
 };
 
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
        &bcm_co0,
        &bcm_co3,
 };
 
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
        [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
        [MASTER_CDSP_PROC] = &qxm_nsp,
        [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
        [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
 };
 
-static struct qcom_icc_desc sc7280_nsp_noc = {
+static const struct qcom_icc_desc sc7280_nsp_noc = {
        .nodes = nsp_noc_nodes,
        .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
        .bcms = nsp_noc_bcms,
        .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn2,
        &bcm_sn7,
        &bcm_sn8,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
        [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
        [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
@@ -1795,7 +1795,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
        [SLAVE_SERVICE_SNOC] = &srvc_snoc,
 };
 
-static struct qcom_icc_desc sc7280_system_noc = {
+static const struct qcom_icc_desc sc7280_system_noc = {
        .nodes = system_noc_nodes,
        .num_nodes = ARRAY_SIZE(system_noc_nodes),
        .bcms = system_noc_bcms,
index e9adf05..8e32ca9 100644 (file)
 #include "icc-rpmh.h"
 #include "sc8180x.h"
 
-DEFINE_QNODE(mas_qhm_a1noc_cfg, SC8180X_MASTER_A1NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(mas_xm_ufs_card, SC8180X_MASTER_UFS_CARD, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_ufs_g4, SC8180X_MASTER_UFS_GEN4, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_ufs_mem, SC8180X_MASTER_UFS_MEM, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_0, SC8180X_MASTER_USB3, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_1, SC8180X_MASTER_USB3_1, 1, 8, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_usb3_2, SC8180X_MASTER_USB3_2, 1, 16, SC8180X_A1NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_a2noc_cfg, SC8180X_MASTER_A2NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(mas_qhm_qdss_bam, SC8180X_MASTER_QDSS_BAM, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qspi, SC8180X_MASTER_QSPI_0, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qspi1, SC8180X_MASTER_QSPI_1, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup0, SC8180X_MASTER_QUP_0, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup1, SC8180X_MASTER_QUP_1, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_qup2, SC8180X_MASTER_QUP_2, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qhm_sensorss_ahb, SC8180X_MASTER_SENSORS_AHB, 1, 4, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_crypto, SC8180X_MASTER_CRYPTO_CORE_0, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_ipa, SC8180X_MASTER_IPA, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_emac, SC8180X_MASTER_EMAC, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_pcie3_0, SC8180X_MASTER_PCIE, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_1, SC8180X_MASTER_PCIE_1, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_2, SC8180X_MASTER_PCIE_2, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_pcie3_3, SC8180X_MASTER_PCIE_3, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(mas_xm_qdss_etr, SC8180X_MASTER_QDSS_ETR, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_sdc2, SC8180X_MASTER_SDCC_2, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_xm_sdc4, SC8180X_MASTER_SDCC_4, 1, 8, SC8180X_A2NOC_SNOC_SLV);
-DEFINE_QNODE(mas_qxm_camnoc_hf0_uncomp, SC8180X_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qxm_camnoc_hf1_uncomp, SC8180X_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qxm_camnoc_sf_uncomp, SC8180X_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(mas_qnm_npu, SC8180X_MASTER_NPU, 1, 32, SC8180X_SLAVE_CDSP_MEM_NOC);
-DEFINE_QNODE(mas_qnm_snoc, SC8180X_SNOC_CNOC_MAS, 1, 8, SC8180X_SLAVE_TLMM_SOUTH, SC8180X_SLAVE_CDSP_CFG, SC8180X_SLAVE_SPSS_CFG, SC8180X_SLAVE_CAMERA_CFG, SC8180X_SLAVE_SDCC_4, SC8180X_SLAVE_AHB2PHY_CENTER, SC8180X_SLAVE_SDCC_2, SC8180X_SLAVE_PCIE_2_CFG, SC8180X_SLAVE_CNOC_MNOC_CFG, SC8180X_SLAVE_EMAC_CFG, SC8180X_SLAVE_QSPI_0, SC8180X_SLAVE_QSPI_1, SC8180X_SLAVE_TLMM_EAST, SC8180X_SLAVE_SNOC_CFG, SC8180X_SLAVE_AHB2PHY_EAST, SC8180X_SLAVE_GLM, SC8180X_SLAVE_PDM, SC8180X_SLAVE_PCIE_1_CFG, SC8180X_SLAVE_A2NOC_CFG, SC8180X_SLAVE_QDSS_CFG, SC8180X_SLAVE_DISPLAY_CFG, SC8180X_SLAVE_TCSR, SC8180X_SLAVE_UFS_MEM_0_CFG, SC8180X_SLAVE_CNOC_DDRSS, SC8180X_SLAVE_PCIE_0_CFG, SC8180X_SLAVE_QUP_1, SC8180X_SLAVE_QUP_2, SC8180X_SLAVE_NPU_CFG, SC8180X_SLAVE_CRYPTO_0_CFG, SC8180X_SLAVE_GRAPHICS_3D_CFG, SC8180X_SLAVE_VENUS_CFG, SC8180X_SLAVE_TSIF, SC8180X_SLAVE_IPA_CFG, SC8180X_SLAVE_CLK_CTL, SC8180X_SLAVE_SECURITY, SC8180X_SLAVE_AOP, SC8180X_SLAVE_AHB2PHY_WEST, SC8180X_SLAVE_AHB2PHY_SOUTH, SC8180X_SLAVE_SERVICE_CNOC, SC8180X_SLAVE_UFS_CARD_CFG, SC8180X_SLAVE_USB3_1, SC8180X_SLAVE_USB3_2, SC8180X_SLAVE_PCIE_3_CFG, SC8180X_SLAVE_RBCPR_CX_CFG, SC8180X_SLAVE_TLMM_WEST, SC8180X_SLAVE_A1NOC_CFG, SC8180X_SLAVE_AOSS, SC8180X_SLAVE_PRNG, SC8180X_SLAVE_VSENSE_CTRL_CFG, SC8180X_SLAVE_QUP_0, SC8180X_SLAVE_USB3, SC8180X_SLAVE_RBCPR_MMCX_CFG, SC8180X_SLAVE_PIMEM_CFG, SC8180X_SLAVE_UFS_MEM_1_CFG, SC8180X_SLAVE_RBCPR_MX_CFG, SC8180X_SLAVE_IMEM_CFG);
-DEFINE_QNODE(mas_qhm_cnoc_dc_noc, SC8180X_MASTER_CNOC_DC_NOC, 1, 4, SC8180X_SLAVE_LLCC_CFG, SC8180X_SLAVE_GEM_NOC_CFG);
-DEFINE_QNODE(mas_acm_apps, SC8180X_MASTER_AMPSS_M0, 4, 64, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_acm_gpu_tcu, SC8180X_MASTER_GPU_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_acm_sys_tcu, SC8180X_MASTER_SYS_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qhm_gemnoc_cfg, SC8180X_MASTER_GEM_NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_GEM_NOC_1, SC8180X_SLAVE_SERVICE_GEM_NOC, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG);
-DEFINE_QNODE(mas_qnm_cmpnoc, SC8180X_MASTER_COMPUTE_NOC, 2, 32, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_gpu, SC8180X_MASTER_GRAPHICS_3D, 4, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_mnoc_hf, SC8180X_MASTER_MNOC_HF_MEM_NOC, 2, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qnm_mnoc_sf, SC8180X_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_pcie, SC8180X_MASTER_GEM_NOC_PCIE_SNOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(mas_qnm_snoc_gc, SC8180X_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qnm_snoc_sf, SC8180X_MASTER_SNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_qxm_ecc, SC8180X_MASTER_ECC, 2, 32, SC8180X_SLAVE_LLCC);
-DEFINE_QNODE(mas_ipa_core_master, SC8180X_MASTER_IPA_CORE, 1, 8, SC8180X_SLAVE_IPA_CORE);
-DEFINE_QNODE(mas_llcc_mc, SC8180X_MASTER_LLCC, 8, 4, SC8180X_SLAVE_EBI_CH0);
-DEFINE_QNODE(mas_qhm_mnoc_cfg, SC8180X_MASTER_CNOC_MNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(mas_qxm_camnoc_hf0, SC8180X_MASTER_CAMNOC_HF0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_camnoc_hf1, SC8180X_MASTER_CAMNOC_HF1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_camnoc_sf, SC8180X_MASTER_CAMNOC_SF, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_mdp0, SC8180X_MASTER_MDP_PORT0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_mdp1, SC8180X_MASTER_MDP_PORT1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_rot, SC8180X_MASTER_ROTATOR, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus0, SC8180X_MASTER_VIDEO_P0, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus1, SC8180X_MASTER_VIDEO_P1, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qxm_venus_arm9, SC8180X_MASTER_VIDEO_PROC, 1, 8, SC8180X_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(mas_qhm_snoc_cfg, SC8180X_MASTER_SNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(mas_qnm_aggre1_noc, SC8180X_A1NOC_SNOC_MAS, 1, 32, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qnm_aggre2_noc, SC8180X_A2NOC_SNOC_MAS, 1, 16, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_PCIE_3, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SLAVE_PCIE_2, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_PCIE_0, SC8180X_SLAVE_PCIE_1, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qnm_gemnoc, SC8180X_MASTER_GEM_NOC_SNOC, 1, 8, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM);
-DEFINE_QNODE(mas_qxm_pimem, SC8180X_MASTER_PIMEM, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM);
-DEFINE_QNODE(mas_xm_gic, SC8180X_MASTER_GIC, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM);
-DEFINE_QNODE(slv_qns_a1noc_snoc, SC8180X_A1NOC_SNOC_SLV, 1, 32, SC8180X_A1NOC_SNOC_MAS);
-DEFINE_QNODE(slv_srvc_aggre1_noc, SC8180X_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(slv_qns_a2noc_snoc, SC8180X_A2NOC_SNOC_SLV, 1, 16, SC8180X_A2NOC_SNOC_MAS);
-DEFINE_QNODE(slv_qns_pcie_mem_noc, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC, 1, 32, SC8180X_MASTER_GEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(slv_srvc_aggre2_noc, SC8180X_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(slv_qns_camnoc_uncomp, SC8180X_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(slv_qns_cdsp_mem_noc, SC8180X_SLAVE_CDSP_MEM_NOC, 2, 32, SC8180X_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(slv_qhs_a1_noc_cfg, SC8180X_SLAVE_A1NOC_CFG, 1, 4, SC8180X_MASTER_A1NOC_CFG);
-DEFINE_QNODE(slv_qhs_a2_noc_cfg, SC8180X_SLAVE_A2NOC_CFG, 1, 4, SC8180X_MASTER_A2NOC_CFG);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_center, SC8180X_SLAVE_AHB2PHY_CENTER, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_east, SC8180X_SLAVE_AHB2PHY_EAST, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_refgen_west, SC8180X_SLAVE_AHB2PHY_WEST, 1, 4);
-DEFINE_QNODE(slv_qhs_ahb2phy_south, SC8180X_SLAVE_AHB2PHY_SOUTH, 1, 4);
-DEFINE_QNODE(slv_qhs_aop, SC8180X_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(slv_qhs_aoss, SC8180X_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(slv_qhs_camera_cfg, SC8180X_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_clk_ctl, SC8180X_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(slv_qhs_compute_dsp, SC8180X_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_cx, SC8180X_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_mmcx, SC8180X_SLAVE_RBCPR_MMCX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_cpr_mx, SC8180X_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_crypto0_cfg, SC8180X_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ddrss_cfg, SC8180X_SLAVE_CNOC_DDRSS, 1, 4, SC8180X_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(slv_qhs_display_cfg, SC8180X_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_emac_cfg, SC8180X_SLAVE_EMAC_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_glm, SC8180X_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(slv_qhs_gpuss_cfg, SC8180X_SLAVE_GRAPHICS_3D_CFG, 1, 8);
-DEFINE_QNODE(slv_qhs_imem_cfg, SC8180X_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ipa, SC8180X_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_mnoc_cfg, SC8180X_SLAVE_CNOC_MNOC_CFG, 1, 4, SC8180X_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(slv_qhs_npu_cfg, SC8180X_SLAVE_NPU_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie0_cfg, SC8180X_SLAVE_PCIE_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie1_cfg, SC8180X_SLAVE_PCIE_1_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie2_cfg, SC8180X_SLAVE_PCIE_2_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pcie3_cfg, SC8180X_SLAVE_PCIE_3_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_pdm, SC8180X_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(slv_qhs_pimem_cfg, SC8180X_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_prng, SC8180X_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(slv_qhs_qdss_cfg, SC8180X_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_qspi_0, SC8180X_SLAVE_QSPI_0, 1, 4);
-DEFINE_QNODE(slv_qhs_qspi_1, SC8180X_SLAVE_QSPI_1, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_east0, SC8180X_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_east1, SC8180X_SLAVE_QUP_2, 1, 4);
-DEFINE_QNODE(slv_qhs_qupv3_west, SC8180X_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(slv_qhs_sdc2, SC8180X_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(slv_qhs_sdc4, SC8180X_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(slv_qhs_security, SC8180X_SLAVE_SECURITY, 1, 4);
-DEFINE_QNODE(slv_qhs_snoc_cfg, SC8180X_SLAVE_SNOC_CFG, 1, 4, SC8180X_MASTER_SNOC_CFG);
-DEFINE_QNODE(slv_qhs_spss_cfg, SC8180X_SLAVE_SPSS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_tcsr, SC8180X_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_east, SC8180X_SLAVE_TLMM_EAST, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_south, SC8180X_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(slv_qhs_tlmm_west, SC8180X_SLAVE_TLMM_WEST, 1, 4);
-DEFINE_QNODE(slv_qhs_tsif, SC8180X_SLAVE_TSIF, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_card_cfg, SC8180X_SLAVE_UFS_CARD_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_mem0_cfg, SC8180X_SLAVE_UFS_MEM_0_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_ufs_mem1_cfg, SC8180X_SLAVE_UFS_MEM_1_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_0, SC8180X_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_1, SC8180X_SLAVE_USB3_1, 1, 4);
-DEFINE_QNODE(slv_qhs_usb3_2, SC8180X_SLAVE_USB3_2, 1, 4);
-DEFINE_QNODE(slv_qhs_venus_cfg, SC8180X_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_vsense_ctrl_cfg, SC8180X_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(slv_srvc_cnoc, SC8180X_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(slv_qhs_gemnoc, SC8180X_SLAVE_GEM_NOC_CFG, 1, 4, SC8180X_MASTER_GEM_NOC_CFG);
-DEFINE_QNODE(slv_qhs_llcc, SC8180X_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(slv_qhs_mdsp_ms_mpu_cfg, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(slv_qns_ecc, SC8180X_SLAVE_ECC, 1, 32);
-DEFINE_QNODE(slv_qns_gem_noc_snoc, SC8180X_SLAVE_GEM_NOC_SNOC, 1, 8, SC8180X_MASTER_GEM_NOC_SNOC);
-DEFINE_QNODE(slv_qns_llcc, SC8180X_SLAVE_LLCC, 8, 16, SC8180X_MASTER_LLCC);
-DEFINE_QNODE(slv_srvc_gemnoc, SC8180X_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(slv_srvc_gemnoc1, SC8180X_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
-DEFINE_QNODE(slv_ipa_core_slave, SC8180X_SLAVE_IPA_CORE, 1, 8);
-DEFINE_QNODE(slv_ebi, SC8180X_SLAVE_EBI_CH0, 8, 4);
-DEFINE_QNODE(slv_qns2_mem_noc, SC8180X_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC8180X_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(slv_qns_mem_noc_hf, SC8180X_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SC8180X_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(slv_srvc_mnoc, SC8180X_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(slv_qhs_apss, SC8180X_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(slv_qns_cnoc, SC8180X_SNOC_CNOC_SLV, 1, 8, SC8180X_SNOC_CNOC_MAS);
-DEFINE_QNODE(slv_qns_gemnoc_gc, SC8180X_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC8180X_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(slv_qns_gemnoc_sf, SC8180X_SLAVE_SNOC_GEM_NOC_SF, 1, 32, SC8180X_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(slv_qxs_imem, SC8180X_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(slv_qxs_pimem, SC8180X_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(slv_srvc_snoc, SC8180X_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(slv_xs_pcie_0, SC8180X_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_1, SC8180X_SLAVE_PCIE_1, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_2, SC8180X_SLAVE_PCIE_2, 1, 8);
-DEFINE_QNODE(slv_xs_pcie_3, SC8180X_SLAVE_PCIE_3, 1, 8);
-DEFINE_QNODE(slv_xs_qdss_stm, SC8180X_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(slv_xs_sys_tcu_cfg, SC8180X_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &slv_ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", false, &slv_ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", false, &slv_qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", false, &slv_qns_mem_noc_hf);
-DEFINE_QBCM(bcm_co0, "CO0", false, &slv_qns_cdsp_mem_noc);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &mas_qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", false, &mas_qnm_snoc, &slv_qhs_a1_noc_cfg, &slv_qhs_a2_noc_cfg, &slv_qhs_ahb2phy_refgen_center, &slv_qhs_ahb2phy_refgen_east, &slv_qhs_ahb2phy_refgen_west, &slv_qhs_ahb2phy_south, &slv_qhs_aop, &slv_qhs_aoss, &slv_qhs_camera_cfg, &slv_qhs_clk_ctl, &slv_qhs_compute_dsp, &slv_qhs_cpr_cx, &slv_qhs_cpr_mmcx, &slv_qhs_cpr_mx, &slv_qhs_crypto0_cfg, &slv_qhs_ddrss_cfg, &slv_qhs_display_cfg, &slv_qhs_emac_cfg, &slv_qhs_glm, &slv_qhs_gpuss_cfg, &slv_qhs_imem_cfg, &slv_qhs_ipa, &slv_qhs_mnoc_cfg, &slv_qhs_npu_cfg, &slv_qhs_pcie0_cfg, &slv_qhs_pcie1_cfg, &slv_qhs_pcie2_cfg, &slv_qhs_pcie3_cfg, &slv_qhs_pdm, &slv_qhs_pimem_cfg, &slv_qhs_prng, &slv_qhs_qdss_cfg, &slv_qhs_qspi_0, &slv_qhs_qspi_1, &slv_qhs_qupv3_east0, &slv_qhs_qupv3_east1, &slv_qhs_qupv3_west, &slv_qhs_sdc2, &slv_qhs_sdc4, &slv_qhs_security, &slv_qhs_snoc_cfg, &slv_qhs_spss_cfg, &slv_qhs_tcsr, &slv_qhs_tlmm_east, &slv_qhs_tlmm_south, &slv_qhs_tlmm_west, &slv_qhs_tsif, &slv_qhs_ufs_card_cfg, &slv_qhs_ufs_mem0_cfg, &slv_qhs_ufs_mem1_cfg, &slv_qhs_usb3_0, &slv_qhs_usb3_1, &slv_qhs_usb3_2, &slv_qhs_venus_cfg, &slv_qhs_vsense_ctrl_cfg, &slv_srvc_cnoc);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &mas_qxm_camnoc_hf0_uncomp, &mas_qxm_camnoc_hf1_uncomp, &mas_qxm_camnoc_sf_uncomp, &mas_qxm_camnoc_hf0, &mas_qxm_camnoc_hf1, &mas_qxm_mdp0, &mas_qxm_mdp1);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &mas_qhm_qup0, &mas_qhm_qup1, &mas_qhm_qup2);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &slv_qns_gem_noc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &mas_qxm_camnoc_sf, &mas_qxm_rot, &mas_qxm_venus0, &mas_qxm_venus1, &mas_qxm_venus_arm9, &slv_qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &mas_acm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", false, &slv_qns_gemnoc_sf);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &slv_qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &slv_qns_gemnoc_gc);
-DEFINE_QBCM(bcm_co2, "CO2", false, &mas_qnm_npu);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &slv_ipa_core_slave);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &slv_srvc_aggre1_noc, &slv_qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &slv_qxs_pimem);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &slv_xs_pcie_0, &slv_xs_pcie_1, &slv_xs_pcie_2, &slv_xs_pcie_3);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &mas_qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &mas_qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn14, "SN14", false, &slv_qns_pcie_mem_noc);
-DEFINE_QBCM(bcm_sn15, "SN15", false, &mas_qnm_gemnoc);
-
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_node mas_qhm_a1noc_cfg = {
+       .name = "mas_qhm_a1noc_cfg",
+       .id = SC8180X_MASTER_A1NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_SERVICE_A1NOC }
+};
+
+static struct qcom_icc_node mas_xm_ufs_card = {
+       .name = "mas_xm_ufs_card",
+       .id = SC8180X_MASTER_UFS_CARD,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_ufs_g4 = {
+       .name = "mas_xm_ufs_g4",
+       .id = SC8180X_MASTER_UFS_GEN4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_ufs_mem = {
+       .name = "mas_xm_ufs_mem",
+       .id = SC8180X_MASTER_UFS_MEM,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_0 = {
+       .name = "mas_xm_usb3_0",
+       .id = SC8180X_MASTER_USB3,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_1 = {
+       .name = "mas_xm_usb3_1",
+       .id = SC8180X_MASTER_USB3_1,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_usb3_2 = {
+       .name = "mas_xm_usb3_2",
+       .id = SC8180X_MASTER_USB3_2,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8180X_A1NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_a2noc_cfg = {
+       .name = "mas_qhm_a2noc_cfg",
+       .id = SC8180X_MASTER_A2NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_SERVICE_A2NOC }
+};
+
+static struct qcom_icc_node mas_qhm_qdss_bam = {
+       .name = "mas_qhm_qdss_bam",
+       .id = SC8180X_MASTER_QDSS_BAM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qspi = {
+       .name = "mas_qhm_qspi",
+       .id = SC8180X_MASTER_QSPI_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qspi1 = {
+       .name = "mas_qhm_qspi1",
+       .id = SC8180X_MASTER_QSPI_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup0 = {
+       .name = "mas_qhm_qup0",
+       .id = SC8180X_MASTER_QUP_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup1 = {
+       .name = "mas_qhm_qup1",
+       .id = SC8180X_MASTER_QUP_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_qup2 = {
+       .name = "mas_qhm_qup2",
+       .id = SC8180X_MASTER_QUP_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qhm_sensorss_ahb = {
+       .name = "mas_qhm_sensorss_ahb",
+       .id = SC8180X_MASTER_SENSORS_AHB,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_crypto = {
+       .name = "mas_qxm_crypto",
+       .id = SC8180X_MASTER_CRYPTO_CORE_0,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_ipa = {
+       .name = "mas_qxm_ipa",
+       .id = SC8180X_MASTER_IPA,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_emac = {
+       .name = "mas_xm_emac",
+       .id = SC8180X_MASTER_EMAC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_0 = {
+       .name = "mas_xm_pcie3_0",
+       .id = SC8180X_MASTER_PCIE,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_1 = {
+       .name = "mas_xm_pcie3_1",
+       .id = SC8180X_MASTER_PCIE_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_2 = {
+       .name = "mas_xm_pcie3_2",
+       .id = SC8180X_MASTER_PCIE_2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_pcie3_3 = {
+       .name = "mas_xm_pcie3_3",
+       .id = SC8180X_MASTER_PCIE_3,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+};
+
+static struct qcom_icc_node mas_xm_qdss_etr = {
+       .name = "mas_xm_qdss_etr",
+       .id = SC8180X_MASTER_QDSS_ETR,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_sdc2 = {
+       .name = "mas_xm_sdc2",
+       .id = SC8180X_MASTER_SDCC_2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_xm_sdc4 = {
+       .name = "mas_xm_sdc4",
+       .id = SC8180X_MASTER_SDCC_4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_SLV }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf0_uncomp = {
+       .name = "mas_qxm_camnoc_hf0_uncomp",
+       .id = SC8180X_MASTER_CAMNOC_HF0_UNCOMP,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf1_uncomp = {
+       .name = "mas_qxm_camnoc_hf1_uncomp",
+       .id = SC8180X_MASTER_CAMNOC_HF1_UNCOMP,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_sf_uncomp = {
+       .name = "mas_qxm_camnoc_sf_uncomp",
+       .id = SC8180X_MASTER_CAMNOC_SF_UNCOMP,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+};
+
+static struct qcom_icc_node mas_qnm_npu = {
+       .name = "mas_qnm_npu",
+       .id = SC8180X_MASTER_NPU,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_CDSP_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc = {
+       .name = "mas_qnm_snoc",
+       .id = SC8180X_SNOC_CNOC_MAS,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 56,
+       .links = { SC8180X_SLAVE_TLMM_SOUTH,
+                  SC8180X_SLAVE_CDSP_CFG,
+                  SC8180X_SLAVE_SPSS_CFG,
+                  SC8180X_SLAVE_CAMERA_CFG,
+                  SC8180X_SLAVE_SDCC_4,
+                  SC8180X_SLAVE_AHB2PHY_CENTER,
+                  SC8180X_SLAVE_SDCC_2,
+                  SC8180X_SLAVE_PCIE_2_CFG,
+                  SC8180X_SLAVE_CNOC_MNOC_CFG,
+                  SC8180X_SLAVE_EMAC_CFG,
+                  SC8180X_SLAVE_QSPI_0,
+                  SC8180X_SLAVE_QSPI_1,
+                  SC8180X_SLAVE_TLMM_EAST,
+                  SC8180X_SLAVE_SNOC_CFG,
+                  SC8180X_SLAVE_AHB2PHY_EAST,
+                  SC8180X_SLAVE_GLM,
+                  SC8180X_SLAVE_PDM,
+                  SC8180X_SLAVE_PCIE_1_CFG,
+                  SC8180X_SLAVE_A2NOC_CFG,
+                  SC8180X_SLAVE_QDSS_CFG,
+                  SC8180X_SLAVE_DISPLAY_CFG,
+                  SC8180X_SLAVE_TCSR,
+                  SC8180X_SLAVE_UFS_MEM_0_CFG,
+                  SC8180X_SLAVE_CNOC_DDRSS,
+                  SC8180X_SLAVE_PCIE_0_CFG,
+                  SC8180X_SLAVE_QUP_1,
+                  SC8180X_SLAVE_QUP_2,
+                  SC8180X_SLAVE_NPU_CFG,
+                  SC8180X_SLAVE_CRYPTO_0_CFG,
+                  SC8180X_SLAVE_GRAPHICS_3D_CFG,
+                  SC8180X_SLAVE_VENUS_CFG,
+                  SC8180X_SLAVE_TSIF,
+                  SC8180X_SLAVE_IPA_CFG,
+                  SC8180X_SLAVE_CLK_CTL,
+                  SC8180X_SLAVE_SECURITY,
+                  SC8180X_SLAVE_AOP,
+                  SC8180X_SLAVE_AHB2PHY_WEST,
+                  SC8180X_SLAVE_AHB2PHY_SOUTH,
+                  SC8180X_SLAVE_SERVICE_CNOC,
+                  SC8180X_SLAVE_UFS_CARD_CFG,
+                  SC8180X_SLAVE_USB3_1,
+                  SC8180X_SLAVE_USB3_2,
+                  SC8180X_SLAVE_PCIE_3_CFG,
+                  SC8180X_SLAVE_RBCPR_CX_CFG,
+                  SC8180X_SLAVE_TLMM_WEST,
+                  SC8180X_SLAVE_A1NOC_CFG,
+                  SC8180X_SLAVE_AOSS,
+                  SC8180X_SLAVE_PRNG,
+                  SC8180X_SLAVE_VSENSE_CTRL_CFG,
+                  SC8180X_SLAVE_QUP_0,
+                  SC8180X_SLAVE_USB3,
+                  SC8180X_SLAVE_RBCPR_MMCX_CFG,
+                  SC8180X_SLAVE_PIMEM_CFG,
+                  SC8180X_SLAVE_UFS_MEM_1_CFG,
+                  SC8180X_SLAVE_RBCPR_MX_CFG,
+                  SC8180X_SLAVE_IMEM_CFG }
+};
+
+static struct qcom_icc_node mas_qhm_cnoc_dc_noc = {
+       .name = "mas_qhm_cnoc_dc_noc",
+       .id = SC8180X_MASTER_CNOC_DC_NOC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_LLCC_CFG,
+                  SC8180X_SLAVE_GEM_NOC_CFG }
+};
+
+static struct qcom_icc_node mas_acm_apps = {
+       .name = "mas_acm_apps",
+       .id = SC8180X_MASTER_AMPSS_M0,
+       .channels = 4,
+       .buswidth = 64,
+       .num_links = 3,
+       .links = { SC8180X_SLAVE_ECC,
+                  SC8180X_SLAVE_LLCC,
+                  SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_acm_gpu_tcu = {
+       .name = "mas_acm_gpu_tcu",
+       .id = SC8180X_MASTER_GPU_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_LLCC,
+                  SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_acm_sys_tcu = {
+       .name = "mas_acm_sys_tcu",
+       .id = SC8180X_MASTER_SYS_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_LLCC,
+                  SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qhm_gemnoc_cfg = {
+       .name = "mas_qhm_gemnoc_cfg",
+       .id = SC8180X_MASTER_GEM_NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 3,
+       .links = { SC8180X_SLAVE_SERVICE_GEM_NOC_1,
+                  SC8180X_SLAVE_SERVICE_GEM_NOC,
+                  SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG }
+};
+
+static struct qcom_icc_node mas_qnm_cmpnoc = {
+       .name = "mas_qnm_cmpnoc",
+       .id = SC8180X_MASTER_COMPUTE_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 3,
+       .links = { SC8180X_SLAVE_ECC,
+                  SC8180X_SLAVE_LLCC,
+                  SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_gpu = {
+       .name = "mas_qnm_gpu",
+       .id = SC8180X_MASTER_GRAPHICS_3D,
+       .channels = 4,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_LLCC,
+                  SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_mnoc_hf = {
+       .name = "mas_qnm_mnoc_hf",
+       .id = SC8180X_MASTER_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qnm_mnoc_sf = {
+       .name = "mas_qnm_mnoc_sf",
+       .id = SC8180X_MASTER_MNOC_SF_MEM_NOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_LLCC,
+                  SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_pcie = {
+       .name = "mas_qnm_pcie",
+       .id = SC8180X_MASTER_GEM_NOC_PCIE_SNOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_LLCC,
+                  SC8180X_SLAVE_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc_gc = {
+       .name = "mas_qnm_snoc_gc",
+       .id = SC8180X_MASTER_SNOC_GC_MEM_NOC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qnm_snoc_sf = {
+       .name = "mas_qnm_snoc_sf",
+       .id = SC8180X_MASTER_SNOC_SF_MEM_NOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_qxm_ecc = {
+       .name = "mas_qxm_ecc",
+       .id = SC8180X_MASTER_ECC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_LLCC }
+};
+
+static struct qcom_icc_node mas_ipa_core_master = {
+       .name = "mas_ipa_core_master",
+       .id = SC8180X_MASTER_IPA_CORE,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_IPA_CORE }
+};
+
+static struct qcom_icc_node mas_llcc_mc = {
+       .name = "mas_llcc_mc",
+       .id = SC8180X_MASTER_LLCC,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_EBI_CH0 }
+};
+
+static struct qcom_icc_node mas_qhm_mnoc_cfg = {
+       .name = "mas_qhm_mnoc_cfg",
+       .id = SC8180X_MASTER_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_SERVICE_MNOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf0 = {
+       .name = "mas_qxm_camnoc_hf0",
+       .id = SC8180X_MASTER_CAMNOC_HF0,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_hf1 = {
+       .name = "mas_qxm_camnoc_hf1",
+       .id = SC8180X_MASTER_CAMNOC_HF1,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_camnoc_sf = {
+       .name = "mas_qxm_camnoc_sf",
+       .id = SC8180X_MASTER_CAMNOC_SF,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_mdp0 = {
+       .name = "mas_qxm_mdp0",
+       .id = SC8180X_MASTER_MDP_PORT0,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_mdp1 = {
+       .name = "mas_qxm_mdp1",
+       .id = SC8180X_MASTER_MDP_PORT1,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_rot = {
+       .name = "mas_qxm_rot",
+       .id = SC8180X_MASTER_ROTATOR,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus0 = {
+       .name = "mas_qxm_venus0",
+       .id = SC8180X_MASTER_VIDEO_P0,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus1 = {
+       .name = "mas_qxm_venus1",
+       .id = SC8180X_MASTER_VIDEO_P1,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qxm_venus_arm9 = {
+       .name = "mas_qxm_venus_arm9",
+       .id = SC8180X_MASTER_VIDEO_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node mas_qhm_snoc_cfg = {
+       .name = "mas_qhm_snoc_cfg",
+       .id = SC8180X_MASTER_SNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_SERVICE_SNOC }
+};
+
+static struct qcom_icc_node mas_qnm_aggre1_noc = {
+       .name = "mas_qnm_aggre1_noc",
+       .id = SC8180X_A1NOC_SNOC_MAS,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 6,
+       .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+                  SC8180X_SLAVE_PIMEM,
+                  SC8180X_SLAVE_OCIMEM,
+                  SC8180X_SLAVE_APPSS,
+                  SC8180X_SNOC_CNOC_SLV,
+                  SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qnm_aggre2_noc = {
+       .name = "mas_qnm_aggre2_noc",
+       .id = SC8180X_A2NOC_SNOC_MAS,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 11,
+       .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+                  SC8180X_SLAVE_PIMEM,
+                  SC8180X_SLAVE_PCIE_3,
+                  SC8180X_SLAVE_OCIMEM,
+                  SC8180X_SLAVE_APPSS,
+                  SC8180X_SLAVE_PCIE_2,
+                  SC8180X_SNOC_CNOC_SLV,
+                  SC8180X_SLAVE_PCIE_0,
+                  SC8180X_SLAVE_PCIE_1,
+                  SC8180X_SLAVE_TCU,
+                  SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qnm_gemnoc = {
+       .name = "mas_qnm_gemnoc",
+       .id = SC8180X_MASTER_GEM_NOC_SNOC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 6,
+       .links = { SC8180X_SLAVE_PIMEM,
+                  SC8180X_SLAVE_OCIMEM,
+                  SC8180X_SLAVE_APPSS,
+                  SC8180X_SNOC_CNOC_SLV,
+                  SC8180X_SLAVE_TCU,
+                  SC8180X_SLAVE_QDSS_STM }
+};
+
+static struct qcom_icc_node mas_qxm_pimem = {
+       .name = "mas_qxm_pimem",
+       .id = SC8180X_MASTER_PIMEM,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+                  SC8180X_SLAVE_OCIMEM }
+};
+
+static struct qcom_icc_node mas_xm_gic = {
+       .name = "mas_xm_gic",
+       .id = SC8180X_MASTER_GIC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+                  SC8180X_SLAVE_OCIMEM }
+};
+
+static struct qcom_icc_node mas_qup_core_0 = {
+       .name = "mas_qup_core_0",
+       .id = SC8180X_MASTER_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_QUP_CORE_0 }
+};
+
+static struct qcom_icc_node mas_qup_core_1 = {
+       .name = "mas_qup_core_1",
+       .id = SC8180X_MASTER_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_QUP_CORE_1 }
+};
+
+static struct qcom_icc_node mas_qup_core_2 = {
+       .name = "mas_qup_core_2",
+       .id = SC8180X_MASTER_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_SLAVE_QUP_CORE_2 }
+};
+
+static struct qcom_icc_node slv_qns_a1noc_snoc = {
+       .name = "slv_qns_a1noc_snoc",
+       .id = SC8180X_A1NOC_SNOC_SLV,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_A1NOC_SNOC_MAS }
+};
+
+static struct qcom_icc_node slv_srvc_aggre1_noc = {
+       .name = "slv_srvc_aggre1_noc",
+       .id = SC8180X_SLAVE_SERVICE_A1NOC,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_a2noc_snoc = {
+       .name = "slv_qns_a2noc_snoc",
+       .id = SC8180X_A2NOC_SNOC_SLV,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8180X_A2NOC_SNOC_MAS }
+};
+
+static struct qcom_icc_node slv_qns_pcie_mem_noc = {
+       .name = "slv_qns_pcie_mem_noc",
+       .id = SC8180X_SLAVE_ANOC_PCIE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_GEM_NOC_PCIE_SNOC }
+};
+
+static struct qcom_icc_node slv_srvc_aggre2_noc = {
+       .name = "slv_srvc_aggre2_noc",
+       .id = SC8180X_SLAVE_SERVICE_A2NOC,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_camnoc_uncomp = {
+       .name = "slv_qns_camnoc_uncomp",
+       .id = SC8180X_SLAVE_CAMNOC_UNCOMP,
+       .channels = 1,
+       .buswidth = 32
+};
+
+static struct qcom_icc_node slv_qns_cdsp_mem_noc = {
+       .name = "slv_qns_cdsp_mem_noc",
+       .id = SC8180X_SLAVE_CDSP_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_COMPUTE_NOC }
+};
+
+static struct qcom_icc_node slv_qhs_a1_noc_cfg = {
+       .name = "slv_qhs_a1_noc_cfg",
+       .id = SC8180X_SLAVE_A1NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_A1NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_a2_noc_cfg = {
+       .name = "slv_qhs_a2_noc_cfg",
+       .id = SC8180X_SLAVE_A2NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_A2NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_center = {
+       .name = "slv_qhs_ahb2phy_refgen_center",
+       .id = SC8180X_SLAVE_AHB2PHY_CENTER,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_east = {
+       .name = "slv_qhs_ahb2phy_refgen_east",
+       .id = SC8180X_SLAVE_AHB2PHY_EAST,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_west = {
+       .name = "slv_qhs_ahb2phy_refgen_west",
+       .id = SC8180X_SLAVE_AHB2PHY_WEST,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ahb2phy_south = {
+       .name = "slv_qhs_ahb2phy_south",
+       .id = SC8180X_SLAVE_AHB2PHY_SOUTH,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_aop = {
+       .name = "slv_qhs_aop",
+       .id = SC8180X_SLAVE_AOP,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_aoss = {
+       .name = "slv_qhs_aoss",
+       .id = SC8180X_SLAVE_AOSS,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_camera_cfg = {
+       .name = "slv_qhs_camera_cfg",
+       .id = SC8180X_SLAVE_CAMERA_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_clk_ctl = {
+       .name = "slv_qhs_clk_ctl",
+       .id = SC8180X_SLAVE_CLK_CTL,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_compute_dsp = {
+       .name = "slv_qhs_compute_dsp",
+       .id = SC8180X_SLAVE_CDSP_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_cx = {
+       .name = "slv_qhs_cpr_cx",
+       .id = SC8180X_SLAVE_RBCPR_CX_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_mmcx = {
+       .name = "slv_qhs_cpr_mmcx",
+       .id = SC8180X_SLAVE_RBCPR_MMCX_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_cpr_mx = {
+       .name = "slv_qhs_cpr_mx",
+       .id = SC8180X_SLAVE_RBCPR_MX_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_crypto0_cfg = {
+       .name = "slv_qhs_crypto0_cfg",
+       .id = SC8180X_SLAVE_CRYPTO_0_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ddrss_cfg = {
+       .name = "slv_qhs_ddrss_cfg",
+       .id = SC8180X_SLAVE_CNOC_DDRSS,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_CNOC_DC_NOC }
+};
+
+static struct qcom_icc_node slv_qhs_display_cfg = {
+       .name = "slv_qhs_display_cfg",
+       .id = SC8180X_SLAVE_DISPLAY_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_emac_cfg = {
+       .name = "slv_qhs_emac_cfg",
+       .id = SC8180X_SLAVE_EMAC_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_glm = {
+       .name = "slv_qhs_glm",
+       .id = SC8180X_SLAVE_GLM,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_gpuss_cfg = {
+       .name = "slv_qhs_gpuss_cfg",
+       .id = SC8180X_SLAVE_GRAPHICS_3D_CFG,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qhs_imem_cfg = {
+       .name = "slv_qhs_imem_cfg",
+       .id = SC8180X_SLAVE_IMEM_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ipa = {
+       .name = "slv_qhs_ipa",
+       .id = SC8180X_SLAVE_IPA_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_mnoc_cfg = {
+       .name = "slv_qhs_mnoc_cfg",
+       .id = SC8180X_SLAVE_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_CNOC_MNOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_npu_cfg = {
+       .name = "slv_qhs_npu_cfg",
+       .id = SC8180X_SLAVE_NPU_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie0_cfg = {
+       .name = "slv_qhs_pcie0_cfg",
+       .id = SC8180X_SLAVE_PCIE_0_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie1_cfg = {
+       .name = "slv_qhs_pcie1_cfg",
+       .id = SC8180X_SLAVE_PCIE_1_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie2_cfg = {
+       .name = "slv_qhs_pcie2_cfg",
+       .id = SC8180X_SLAVE_PCIE_2_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pcie3_cfg = {
+       .name = "slv_qhs_pcie3_cfg",
+       .id = SC8180X_SLAVE_PCIE_3_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pdm = {
+       .name = "slv_qhs_pdm",
+       .id = SC8180X_SLAVE_PDM,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_pimem_cfg = {
+       .name = "slv_qhs_pimem_cfg",
+       .id = SC8180X_SLAVE_PIMEM_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_prng = {
+       .name = "slv_qhs_prng",
+       .id = SC8180X_SLAVE_PRNG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qdss_cfg = {
+       .name = "slv_qhs_qdss_cfg",
+       .id = SC8180X_SLAVE_QDSS_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qspi_0 = {
+       .name = "slv_qhs_qspi_0",
+       .id = SC8180X_SLAVE_QSPI_0,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qspi_1 = {
+       .name = "slv_qhs_qspi_1",
+       .id = SC8180X_SLAVE_QSPI_1,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_east0 = {
+       .name = "slv_qhs_qupv3_east0",
+       .id = SC8180X_SLAVE_QUP_1,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_east1 = {
+       .name = "slv_qhs_qupv3_east1",
+       .id = SC8180X_SLAVE_QUP_2,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_qupv3_west = {
+       .name = "slv_qhs_qupv3_west",
+       .id = SC8180X_SLAVE_QUP_0,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_sdc2 = {
+       .name = "slv_qhs_sdc2",
+       .id = SC8180X_SLAVE_SDCC_2,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_sdc4 = {
+       .name = "slv_qhs_sdc4",
+       .id = SC8180X_SLAVE_SDCC_4,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_security = {
+       .name = "slv_qhs_security",
+       .id = SC8180X_SLAVE_SECURITY,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_snoc_cfg = {
+       .name = "slv_qhs_snoc_cfg",
+       .id = SC8180X_SLAVE_SNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_SNOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_spss_cfg = {
+       .name = "slv_qhs_spss_cfg",
+       .id = SC8180X_SLAVE_SPSS_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tcsr = {
+       .name = "slv_qhs_tcsr",
+       .id = SC8180X_SLAVE_TCSR,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_east = {
+       .name = "slv_qhs_tlmm_east",
+       .id = SC8180X_SLAVE_TLMM_EAST,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_south = {
+       .name = "slv_qhs_tlmm_south",
+       .id = SC8180X_SLAVE_TLMM_SOUTH,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tlmm_west = {
+       .name = "slv_qhs_tlmm_west",
+       .id = SC8180X_SLAVE_TLMM_WEST,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_tsif = {
+       .name = "slv_qhs_tsif",
+       .id = SC8180X_SLAVE_TSIF,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_card_cfg = {
+       .name = "slv_qhs_ufs_card_cfg",
+       .id = SC8180X_SLAVE_UFS_CARD_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_mem0_cfg = {
+       .name = "slv_qhs_ufs_mem0_cfg",
+       .id = SC8180X_SLAVE_UFS_MEM_0_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_ufs_mem1_cfg = {
+       .name = "slv_qhs_ufs_mem1_cfg",
+       .id = SC8180X_SLAVE_UFS_MEM_1_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_0 = {
+       .name = "slv_qhs_usb3_0",
+       .id = SC8180X_SLAVE_USB3,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_1 = {
+       .name = "slv_qhs_usb3_1",
+       .id = SC8180X_SLAVE_USB3_1,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_usb3_2 = {
+       .name = "slv_qhs_usb3_2",
+       .id = SC8180X_SLAVE_USB3_2,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_venus_cfg = {
+       .name = "slv_qhs_venus_cfg",
+       .id = SC8180X_SLAVE_VENUS_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_vsense_ctrl_cfg = {
+       .name = "slv_qhs_vsense_ctrl_cfg",
+       .id = SC8180X_SLAVE_VSENSE_CTRL_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_srvc_cnoc = {
+       .name = "slv_srvc_cnoc",
+       .id = SC8180X_SLAVE_SERVICE_CNOC,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_gemnoc = {
+       .name = "slv_qhs_gemnoc",
+       .id = SC8180X_SLAVE_GEM_NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_GEM_NOC_CFG }
+};
+
+static struct qcom_icc_node slv_qhs_llcc = {
+       .name = "slv_qhs_llcc",
+       .id = SC8180X_SLAVE_LLCC_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_mdsp_ms_mpu_cfg = {
+       .name = "slv_qhs_mdsp_ms_mpu_cfg",
+       .id = SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns_ecc = {
+       .name = "slv_qns_ecc",
+       .id = SC8180X_SLAVE_ECC,
+       .channels = 1,
+       .buswidth = 32
+};
+
+static struct qcom_icc_node slv_qns_gem_noc_snoc = {
+       .name = "slv_qns_gem_noc_snoc",
+       .id = SC8180X_SLAVE_GEM_NOC_SNOC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_GEM_NOC_SNOC }
+};
+
+static struct qcom_icc_node slv_qns_llcc = {
+       .name = "slv_qns_llcc",
+       .id = SC8180X_SLAVE_LLCC,
+       .channels = 8,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_LLCC }
+};
+
+static struct qcom_icc_node slv_srvc_gemnoc = {
+       .name = "slv_srvc_gemnoc",
+       .id = SC8180X_SLAVE_SERVICE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_srvc_gemnoc1 = {
+       .name = "slv_srvc_gemnoc1",
+       .id = SC8180X_SLAVE_SERVICE_GEM_NOC_1,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_ipa_core_slave = {
+       .name = "slv_ipa_core_slave",
+       .id = SC8180X_SLAVE_IPA_CORE,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_ebi = {
+       .name = "slv_ebi",
+       .id = SC8180X_SLAVE_EBI_CH0,
+       .channels = 8,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qns2_mem_noc = {
+       .name = "slv_qns2_mem_noc",
+       .id = SC8180X_SLAVE_MNOC_SF_MEM_NOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_MNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qns_mem_noc_hf = {
+       .name = "slv_qns_mem_noc_hf",
+       .id = SC8180X_SLAVE_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_MNOC_HF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_srvc_mnoc = {
+       .name = "slv_srvc_mnoc",
+       .id = SC8180X_SLAVE_SERVICE_MNOC,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qhs_apss = {
+       .name = "slv_qhs_apss",
+       .id = SC8180X_SLAVE_APPSS,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qns_cnoc = {
+       .name = "slv_qns_cnoc",
+       .id = SC8180X_SNOC_CNOC_SLV,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_SNOC_CNOC_MAS }
+};
+
+static struct qcom_icc_node slv_qns_gemnoc_gc = {
+       .name = "slv_qns_gemnoc_gc",
+       .id = SC8180X_SLAVE_SNOC_GEM_NOC_GC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_SNOC_GC_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qns_gemnoc_sf = {
+       .name = "slv_qns_gemnoc_sf",
+       .id = SC8180X_SLAVE_SNOC_GEM_NOC_SF,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8180X_MASTER_SNOC_SF_MEM_NOC }
+};
+
+static struct qcom_icc_node slv_qxs_imem = {
+       .name = "slv_qxs_imem",
+       .id = SC8180X_SLAVE_OCIMEM,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qxs_pimem = {
+       .name = "slv_qxs_pimem",
+       .id = SC8180X_SLAVE_PIMEM,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+       .name = "slv_srvc_snoc",
+       .id = SC8180X_SLAVE_SERVICE_SNOC,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_xs_pcie_0 = {
+       .name = "slv_xs_pcie_0",
+       .id = SC8180X_SLAVE_PCIE_0,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_1 = {
+       .name = "slv_xs_pcie_1",
+       .id = SC8180X_SLAVE_PCIE_1,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_2 = {
+       .name = "slv_xs_pcie_2",
+       .id = SC8180X_SLAVE_PCIE_2,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_pcie_3 = {
+       .name = "slv_xs_pcie_3",
+       .id = SC8180X_SLAVE_PCIE_3,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_xs_qdss_stm = {
+       .name = "slv_xs_qdss_stm",
+       .id = SC8180X_SLAVE_QDSS_STM,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_xs_sys_tcu_cfg = {
+       .name = "slv_xs_sys_tcu_cfg",
+       .id = SC8180X_SLAVE_TCU,
+       .channels = 1,
+       .buswidth = 8
+};
+
+static struct qcom_icc_node slv_qup_core_0 = {
+       .name = "slv_qup_core_0",
+       .id = SC8180X_SLAVE_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qup_core_1 = {
+       .name = "slv_qup_core_1",
+       .id = SC8180X_SLAVE_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_node slv_qup_core_2 = {
+       .name = "slv_qup_core_2",
+       .id = SC8180X_SLAVE_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+       .name = "ACV",
+       .num_nodes = 1,
+       .nodes = { &slv_ebi }
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+       .name = "MC0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &slv_ebi }
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+       .name = "SH0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &slv_qns_llcc }
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+       .name = "MM0",
+       .num_nodes = 1,
+       .nodes = { &slv_qns_mem_noc_hf }
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+       .name = "CO0",
+       .num_nodes = 1,
+       .nodes = { &slv_qns_cdsp_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+       .name = "CE0",
+       .num_nodes = 1,
+       .nodes = { &mas_qxm_crypto }
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+       .name = "CN0",
+       .keepalive = true,
+       .num_nodes = 57,
+       .nodes = { &mas_qnm_snoc,
+                  &slv_qhs_a1_noc_cfg,
+                  &slv_qhs_a2_noc_cfg,
+                  &slv_qhs_ahb2phy_refgen_center,
+                  &slv_qhs_ahb2phy_refgen_east,
+                  &slv_qhs_ahb2phy_refgen_west,
+                  &slv_qhs_ahb2phy_south,
+                  &slv_qhs_aop,
+                  &slv_qhs_aoss,
+                  &slv_qhs_camera_cfg,
+                  &slv_qhs_clk_ctl,
+                  &slv_qhs_compute_dsp,
+                  &slv_qhs_cpr_cx,
+                  &slv_qhs_cpr_mmcx,
+                  &slv_qhs_cpr_mx,
+                  &slv_qhs_crypto0_cfg,
+                  &slv_qhs_ddrss_cfg,
+                  &slv_qhs_display_cfg,
+                  &slv_qhs_emac_cfg,
+                  &slv_qhs_glm,
+                  &slv_qhs_gpuss_cfg,
+                  &slv_qhs_imem_cfg,
+                  &slv_qhs_ipa,
+                  &slv_qhs_mnoc_cfg,
+                  &slv_qhs_npu_cfg,
+                  &slv_qhs_pcie0_cfg,
+                  &slv_qhs_pcie1_cfg,
+                  &slv_qhs_pcie2_cfg,
+                  &slv_qhs_pcie3_cfg,
+                  &slv_qhs_pdm,
+                  &slv_qhs_pimem_cfg,
+                  &slv_qhs_prng,
+                  &slv_qhs_qdss_cfg,
+                  &slv_qhs_qspi_0,
+                  &slv_qhs_qspi_1,
+                  &slv_qhs_qupv3_east0,
+                  &slv_qhs_qupv3_east1,
+                  &slv_qhs_qupv3_west,
+                  &slv_qhs_sdc2,
+                  &slv_qhs_sdc4,
+                  &slv_qhs_security,
+                  &slv_qhs_snoc_cfg,
+                  &slv_qhs_spss_cfg,
+                  &slv_qhs_tcsr,
+                  &slv_qhs_tlmm_east,
+                  &slv_qhs_tlmm_south,
+                  &slv_qhs_tlmm_west,
+                  &slv_qhs_tsif,
+                  &slv_qhs_ufs_card_cfg,
+                  &slv_qhs_ufs_mem0_cfg,
+                  &slv_qhs_ufs_mem1_cfg,
+                  &slv_qhs_usb3_0,
+                  &slv_qhs_usb3_1,
+                  &slv_qhs_usb3_2,
+                  &slv_qhs_venus_cfg,
+                  &slv_qhs_vsense_ctrl_cfg,
+                  &slv_srvc_cnoc }
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+       .name = "MM1",
+       .num_nodes = 7,
+       .nodes = { &mas_qxm_camnoc_hf0_uncomp,
+                  &mas_qxm_camnoc_hf1_uncomp,
+                  &mas_qxm_camnoc_sf_uncomp,
+                  &mas_qxm_camnoc_hf0,
+                  &mas_qxm_camnoc_hf1,
+                  &mas_qxm_mdp0,
+                  &mas_qxm_mdp1 }
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+       .name = "QUP0",
+       .num_nodes = 3,
+       .nodes = { &mas_qup_core_0,
+                  &mas_qup_core_1,
+                  &mas_qup_core_2 }
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+       .name = "SH2",
+       .num_nodes = 1,
+       .nodes = { &slv_qns_gem_noc_snoc }
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+       .name = "MM2",
+       .num_nodes = 6,
+       .nodes = { &mas_qxm_camnoc_sf,
+                  &mas_qxm_rot,
+                  &mas_qxm_venus0,
+                  &mas_qxm_venus1,
+                  &mas_qxm_venus_arm9,
+                  &slv_qns2_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+       .name = "SH3",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &mas_acm_apps }
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+       .name = "SN0",
+       .nodes = { &slv_qns_gemnoc_sf }
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+       .name = "SN1",
+       .nodes = { &slv_qxs_imem }
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+       .name = "SN2",
+       .keepalive = true,
+       .nodes = { &slv_qns_gemnoc_gc }
+};
+
+static struct qcom_icc_bcm bcm_co2 = {
+       .name = "CO2",
+       .nodes = { &mas_qnm_npu }
+};
+
+static struct qcom_icc_bcm bcm_ip0 = {
+       .name = "IP0",
+       .nodes = { &slv_ipa_core_slave }
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+       .name = "SN3",
+       .keepalive = true,
+       .nodes = { &slv_srvc_aggre1_noc,
+                 &slv_qns_cnoc }
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+       .name = "SN4",
+       .nodes = { &slv_qxs_pimem }
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+       .name = "SN8",
+       .num_nodes = 4,
+       .nodes = { &slv_xs_pcie_0,
+                  &slv_xs_pcie_1,
+                  &slv_xs_pcie_2,
+                  &slv_xs_pcie_3 }
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+       .name = "SN9",
+       .num_nodes = 1,
+       .nodes = { &mas_qnm_aggre1_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+       .name = "SN11",
+       .num_nodes = 1,
+       .nodes = { &mas_qnm_aggre2_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+       .name = "SN14",
+       .num_nodes = 1,
+       .nodes = { &slv_qns_pcie_mem_noc }
+};
+
+static struct qcom_icc_bcm bcm_sn15 = {
+       .name = "SN15",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &mas_qnm_gemnoc }
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
        &bcm_sn3,
        &bcm_ce0,
-       &bcm_qup0,
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_sn14,
        &bcm_ce0,
-       &bcm_qup0,
 };
 
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
        &bcm_mm1,
 };
 
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
        &bcm_co0,
        &bcm_co2,
 };
 
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
        &bcm_cn0,
 };
 
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh2,
        &bcm_sh3,
 };
 
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
        &bcm_ip0,
 };
 
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
        &bcm_mc0,
        &bcm_acv,
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm2,
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn1,
        &bcm_sn2,
@@ -249,7 +1631,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
        &bcm_sn15,
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_A1NOC_CFG] = &mas_qhm_a1noc_cfg,
        [MASTER_UFS_CARD] = &mas_xm_ufs_card,
        [MASTER_UFS_GEN4] = &mas_xm_ufs_g4,
@@ -261,7 +1643,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_SERVICE_A1NOC] = &slv_srvc_aggre1_noc,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_A2NOC_CFG] = &mas_qhm_a2noc_cfg,
        [MASTER_QDSS_BAM] = &mas_qhm_qdss_bam,
        [MASTER_QSPI_0] = &mas_qhm_qspi,
@@ -285,19 +1667,19 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &slv_srvc_aggre2_noc,
 };
 
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
        [MASTER_CAMNOC_HF0_UNCOMP] = &mas_qxm_camnoc_hf0_uncomp,
        [MASTER_CAMNOC_HF1_UNCOMP] = &mas_qxm_camnoc_hf1_uncomp,
        [MASTER_CAMNOC_SF_UNCOMP] = &mas_qxm_camnoc_sf_uncomp,
        [SLAVE_CAMNOC_UNCOMP] = &slv_qns_camnoc_uncomp,
 };
 
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
        [MASTER_NPU] = &mas_qnm_npu,
        [SLAVE_CDSP_MEM_NOC] = &slv_qns_cdsp_mem_noc,
 };
 
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
        [SNOC_CNOC_MAS] = &mas_qnm_snoc,
        [SLAVE_A1NOC_CFG] = &slv_qhs_a1_noc_cfg,
        [SLAVE_A2NOC_CFG] = &slv_qhs_a2_noc_cfg,
@@ -357,13 +1739,13 @@ static struct qcom_icc_node *config_noc_nodes[] = {
        [SLAVE_SERVICE_CNOC] = &slv_srvc_cnoc,
 };
 
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
        [MASTER_CNOC_DC_NOC] = &mas_qhm_cnoc_dc_noc,
        [SLAVE_GEM_NOC_CFG] = &slv_qhs_gemnoc,
        [SLAVE_LLCC_CFG] = &slv_qhs_llcc,
 };
 
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
        [MASTER_AMPSS_M0] = &mas_acm_apps,
        [MASTER_GPU_TCU] = &mas_acm_gpu_tcu,
        [MASTER_SYS_TCU] = &mas_acm_sys_tcu,
@@ -384,17 +1766,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
        [SLAVE_SERVICE_GEM_NOC_1] = &slv_srvc_gemnoc1,
 };
 
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
        [MASTER_IPA_CORE] = &mas_ipa_core_master,
        [SLAVE_IPA_CORE] = &slv_ipa_core_slave,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &mas_llcc_mc,
        [SLAVE_EBI_CH0] = &slv_ebi,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CNOC_MNOC_CFG] = &mas_qhm_mnoc_cfg,
        [MASTER_CAMNOC_HF0] = &mas_qxm_camnoc_hf0,
        [MASTER_CAMNOC_HF1] = &mas_qxm_camnoc_hf1,
@@ -410,7 +1792,7 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_SNOC_CFG] = &mas_qhm_snoc_cfg,
        [A1NOC_SNOC_MAS] = &mas_qnm_aggre1_noc,
        [A2NOC_SNOC_MAS] = &mas_qnm_aggre2_noc,
@@ -503,97 +1885,25 @@ static const struct qcom_icc_desc sc8180x_system_noc  = {
        .num_bcms = ARRAY_SIZE(system_noc_bcms),
 };
 
-static int qnoc_probe(struct platform_device *pdev)
-{
-       const struct qcom_icc_desc *desc;
-       struct icc_onecell_data *data;
-       struct icc_provider *provider;
-       struct qcom_icc_node **qnodes;
-       struct qcom_icc_provider *qp;
-       struct icc_node *node;
-       size_t num_nodes, i;
-       int ret;
-
-       desc = device_get_match_data(&pdev->dev);
-       if (!desc)
-               return -EINVAL;
-
-       qnodes = desc->nodes;
-       num_nodes = desc->num_nodes;
-
-       qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
-       if (!qp)
-               return -ENOMEM;
-
-       data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       provider = &qp->provider;
-       provider->dev = &pdev->dev;
-       provider->set = qcom_icc_set;
-       provider->pre_aggregate = qcom_icc_pre_aggregate;
-       provider->aggregate = qcom_icc_aggregate;
-       provider->xlate = of_icc_xlate_onecell;
-       INIT_LIST_HEAD(&provider->nodes);
-       provider->data = data;
-
-       qp->dev = &pdev->dev;
-       qp->bcms = desc->bcms;
-       qp->num_bcms = desc->num_bcms;
-
-       qp->voter = of_bcm_voter_get(qp->dev, NULL);
-       if (IS_ERR(qp->voter))
-               return PTR_ERR(qp->voter);
-
-       ret = icc_provider_add(provider);
-       if (ret) {
-               dev_err(&pdev->dev, "error adding interconnect provider\n");
-               return ret;
-       }
-
-       for (i = 0; i < qp->num_bcms; i++)
-               qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
-       for (i = 0; i < num_nodes; i++) {
-               size_t j;
-
-               if (!qnodes[i])
-                       continue;
-
-               node = icc_node_create(qnodes[i]->id);
-               if (IS_ERR(node)) {
-                       ret = PTR_ERR(node);
-                       goto err;
-               }
-
-               node->name = qnodes[i]->name;
-               node->data = qnodes[i];
-               icc_node_add(node, provider);
-
-               for (j = 0; j < qnodes[i]->num_links; j++)
-                       icc_link_create(node, qnodes[i]->links[j]);
-
-               data->nodes[i] = node;
-       }
-       data->num_nodes = num_nodes;
-
-       platform_set_drvdata(pdev, qp);
-
-       return 0;
-err:
-       icc_nodes_remove(provider);
-       icc_provider_del(provider);
-       return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
-       struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
-       icc_nodes_remove(&qp->provider);
-       return icc_provider_del(&qp->provider);
-}
+static struct qcom_icc_bcm * const qup_virt_bcms[] = {
+       &bcm_qup0,
+};
+
+static struct qcom_icc_node *qup_virt_nodes[] = {
+       [MASTER_QUP_CORE_0] = &mas_qup_core_0,
+       [MASTER_QUP_CORE_1] = &mas_qup_core_1,
+       [MASTER_QUP_CORE_2] = &mas_qup_core_2,
+       [SLAVE_QUP_CORE_0] = &slv_qup_core_0,
+       [SLAVE_QUP_CORE_1] = &slv_qup_core_1,
+       [SLAVE_QUP_CORE_2] = &slv_qup_core_2,
+};
+
+static const struct qcom_icc_desc sc8180x_qup_virt = {
+       .nodes = qup_virt_nodes,
+       .num_nodes = ARRAY_SIZE(qup_virt_nodes),
+       .bcms = qup_virt_bcms,
+       .num_bcms = ARRAY_SIZE(qup_virt_bcms),
+};
 
 static const struct of_device_id qnoc_of_match[] = {
        { .compatible = "qcom,sc8180x-aggre1-noc", .data = &sc8180x_aggre1_noc },
@@ -606,14 +1916,15 @@ static const struct of_device_id qnoc_of_match[] = {
        { .compatible = "qcom,sc8180x-ipa-virt", .data = &sc8180x_ipa_virt },
        { .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt },
        { .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc },
+       { .compatible = "qcom,sc8180x-qup-virt", .data = &sc8180x_qup_virt },
        { .compatible = "qcom,sc8180x-system-noc", .data = &sc8180x_system_noc },
        { }
 };
 MODULE_DEVICE_TABLE(of, qnoc_of_match);
 
 static struct platform_driver qnoc_driver = {
-       .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .probe = qcom_icc_rpmh_probe,
+       .remove = qcom_icc_rpmh_remove,
        .driver = {
                .name = "qnoc-sc8180x",
                .of_match_table = qnoc_of_match,
index e70cf70..2eafd35 100644 (file)
 #define SC8180X_MASTER_OSM_L3_APPS             161
 #define SC8180X_SLAVE_OSM_L3                   162
 
+#define SC8180X_MASTER_QUP_CORE_0              163
+#define SC8180X_MASTER_QUP_CORE_1              164
+#define SC8180X_MASTER_QUP_CORE_2              165
+#define SC8180X_SLAVE_QUP_CORE_0               166
+#define SC8180X_SLAVE_QUP_CORE_1               167
+#define SC8180X_SLAVE_QUP_CORE_2               168
+
 #endif
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
new file mode 100644 (file)
index 0000000..507fe5f
--- /dev/null
@@ -0,0 +1,2438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sc8280xp.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sc8280xp.h"
+
+static struct qcom_icc_node qhm_qspi = {
+       .name = "qhm_qspi",
+       .id = SC8280XP_MASTER_QSPI_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+       .name = "qhm_qup1",
+       .id = SC8280XP_MASTER_QUP_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+       .name = "qhm_qup2",
+       .id = SC8280XP_MASTER_QUP_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a1noc_cfg = {
+       .name = "qnm_a1noc_cfg",
+       .id = SC8280XP_MASTER_A1NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .links = { SC8280XP_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+       .name = "qxm_ipa",
+       .id = SC8280XP_MASTER_IPA,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_1 = {
+       .name = "xm_emac_1",
+       .id = SC8280XP_MASTER_EMAC_1,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+       .name = "xm_sdc4",
+       .id = SC8280XP_MASTER_SDCC_4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+       .name = "xm_ufs_mem",
+       .id = SC8280XP_MASTER_UFS_MEM,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+       .name = "xm_usb3_0",
+       .id = SC8280XP_MASTER_USB3_0,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+       .name = "xm_usb3_1",
+       .id = SC8280XP_MASTER_USB3_1,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+       .name = "xm_usb3_mp",
+       .id = SC8280XP_MASTER_USB3_MP,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb4_host0 = {
+       .name = "xm_usb4_host0",
+       .id = SC8280XP_MASTER_USB4_0,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb4_host1 = {
+       .name = "xm_usb4_host1",
+       .id = SC8280XP_MASTER_USB4_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+       .name = "qhm_qdss_bam",
+       .id = SC8280XP_MASTER_QDSS_BAM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+       .name = "qhm_qup0",
+       .id = SC8280XP_MASTER_QUP_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a2noc_cfg = {
+       .name = "qnm_a2noc_cfg",
+       .id = SC8280XP_MASTER_A2NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+       .name = "qxm_crypto",
+       .id = SC8280XP_MASTER_CRYPTO,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sensorss_q6 = {
+       .name = "qxm_sensorss_q6",
+       .id = SC8280XP_MASTER_SENSORS_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+       .name = "qxm_sp",
+       .id = SC8280XP_MASTER_SP,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_0 = {
+       .name = "xm_emac_0",
+       .id = SC8280XP_MASTER_EMAC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+       .name = "xm_pcie3_0",
+       .id = SC8280XP_MASTER_PCIE_0,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+       .name = "xm_pcie3_1",
+       .id = SC8280XP_MASTER_PCIE_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_2a = {
+       .name = "xm_pcie3_2a",
+       .id = SC8280XP_MASTER_PCIE_2A,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_2b = {
+       .name = "xm_pcie3_2b",
+       .id = SC8280XP_MASTER_PCIE_2B,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_3a = {
+       .name = "xm_pcie3_3a",
+       .id = SC8280XP_MASTER_PCIE_3A,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_3b = {
+       .name = "xm_pcie3_3b",
+       .id = SC8280XP_MASTER_PCIE_3B,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_4 = {
+       .name = "xm_pcie3_4",
+       .id = SC8280XP_MASTER_PCIE_4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+       .name = "xm_qdss_etr",
+       .id = SC8280XP_MASTER_QDSS_ETR,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+       .name = "xm_sdc2",
+       .id = SC8280XP_MASTER_SDCC_2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_card = {
+       .name = "xm_ufs_card",
+       .id = SC8280XP_MASTER_UFS_CARD,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node ipa_core_master = {
+       .name = "ipa_core_master",
+       .id = SC8280XP_MASTER_IPA_CORE,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_IPA_CORE },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+       .name = "qup0_core_master",
+       .id = SC8280XP_MASTER_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+       .name = "qup1_core_master",
+       .id = SC8280XP_MASTER_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+       .name = "qup2_core_master",
+       .id = SC8280XP_MASTER_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+       .name = "qnm_gemnoc_cnoc",
+       .id = SC8280XP_MASTER_GEM_NOC_CNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 76,
+       .links = { SC8280XP_SLAVE_AHB2PHY_0,
+                  SC8280XP_SLAVE_AHB2PHY_1,
+                  SC8280XP_SLAVE_AHB2PHY_2,
+                  SC8280XP_SLAVE_AOSS,
+                  SC8280XP_SLAVE_APPSS,
+                  SC8280XP_SLAVE_CAMERA_CFG,
+                  SC8280XP_SLAVE_CLK_CTL,
+                  SC8280XP_SLAVE_CDSP_CFG,
+                  SC8280XP_SLAVE_CDSP1_CFG,
+                  SC8280XP_SLAVE_RBCPR_CX_CFG,
+                  SC8280XP_SLAVE_RBCPR_MMCX_CFG,
+                  SC8280XP_SLAVE_RBCPR_MX_CFG,
+                  SC8280XP_SLAVE_CPR_NSPCX,
+                  SC8280XP_SLAVE_CRYPTO_0_CFG,
+                  SC8280XP_SLAVE_CX_RDPM,
+                  SC8280XP_SLAVE_DCC_CFG,
+                  SC8280XP_SLAVE_DISPLAY_CFG,
+                  SC8280XP_SLAVE_DISPLAY1_CFG,
+                  SC8280XP_SLAVE_EMAC_CFG,
+                  SC8280XP_SLAVE_EMAC1_CFG,
+                  SC8280XP_SLAVE_GFX3D_CFG,
+                  SC8280XP_SLAVE_HWKM,
+                  SC8280XP_SLAVE_IMEM_CFG,
+                  SC8280XP_SLAVE_IPA_CFG,
+                  SC8280XP_SLAVE_IPC_ROUTER_CFG,
+                  SC8280XP_SLAVE_LPASS,
+                  SC8280XP_SLAVE_MX_RDPM,
+                  SC8280XP_SLAVE_MXC_RDPM,
+                  SC8280XP_SLAVE_PCIE_0_CFG,
+                  SC8280XP_SLAVE_PCIE_1_CFG,
+                  SC8280XP_SLAVE_PCIE_2A_CFG,
+                  SC8280XP_SLAVE_PCIE_2B_CFG,
+                  SC8280XP_SLAVE_PCIE_3A_CFG,
+                  SC8280XP_SLAVE_PCIE_3B_CFG,
+                  SC8280XP_SLAVE_PCIE_4_CFG,
+                  SC8280XP_SLAVE_PCIE_RSC_CFG,
+                  SC8280XP_SLAVE_PDM,
+                  SC8280XP_SLAVE_PIMEM_CFG,
+                  SC8280XP_SLAVE_PKA_WRAPPER_CFG,
+                  SC8280XP_SLAVE_PMU_WRAPPER_CFG,
+                  SC8280XP_SLAVE_QDSS_CFG,
+                  SC8280XP_SLAVE_QSPI_0,
+                  SC8280XP_SLAVE_QUP_0,
+                  SC8280XP_SLAVE_QUP_1,
+                  SC8280XP_SLAVE_QUP_2,
+                  SC8280XP_SLAVE_SDCC_2,
+                  SC8280XP_SLAVE_SDCC_4,
+                  SC8280XP_SLAVE_SECURITY,
+                  SC8280XP_SLAVE_SMMUV3_CFG,
+                  SC8280XP_SLAVE_SMSS_CFG,
+                  SC8280XP_SLAVE_SPSS_CFG,
+                  SC8280XP_SLAVE_TCSR,
+                  SC8280XP_SLAVE_TLMM,
+                  SC8280XP_SLAVE_UFS_CARD_CFG,
+                  SC8280XP_SLAVE_UFS_MEM_CFG,
+                  SC8280XP_SLAVE_USB3_0,
+                  SC8280XP_SLAVE_USB3_1,
+                  SC8280XP_SLAVE_USB3_MP,
+                  SC8280XP_SLAVE_USB4_0,
+                  SC8280XP_SLAVE_USB4_1,
+                  SC8280XP_SLAVE_VENUS_CFG,
+                  SC8280XP_SLAVE_VSENSE_CTRL_CFG,
+                  SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
+                  SC8280XP_SLAVE_A1NOC_CFG,
+                  SC8280XP_SLAVE_A2NOC_CFG,
+                  SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
+                  SC8280XP_SLAVE_DDRSS_CFG,
+                  SC8280XP_SLAVE_CNOC_MNOC_CFG,
+                  SC8280XP_SLAVE_SNOC_CFG,
+                  SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
+                  SC8280XP_SLAVE_IMEM,
+                  SC8280XP_SLAVE_PIMEM,
+                  SC8280XP_SLAVE_SERVICE_CNOC,
+                  SC8280XP_SLAVE_QDSS_STM,
+                  SC8280XP_SLAVE_SMSS,
+                  SC8280XP_SLAVE_TCU
+       },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+       .name = "qnm_gemnoc_pcie",
+       .id = SC8280XP_MASTER_GEM_NOC_PCIE_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 7,
+       .links = { SC8280XP_SLAVE_PCIE_0,
+                  SC8280XP_SLAVE_PCIE_1,
+                  SC8280XP_SLAVE_PCIE_2A,
+                  SC8280XP_SLAVE_PCIE_2B,
+                  SC8280XP_SLAVE_PCIE_3A,
+                  SC8280XP_SLAVE_PCIE_3B,
+                  SC8280XP_SLAVE_PCIE_4
+       },
+};
+
+static struct qcom_icc_node qnm_cnoc_dc_noc = {
+       .name = "qnm_cnoc_dc_noc",
+       .id = SC8280XP_MASTER_CNOC_DC_NOC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_LLCC_CFG,
+                  SC8280XP_SLAVE_GEM_NOC_CFG
+       },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+       .name = "alm_gpu_tcu",
+       .id = SC8280XP_MASTER_GPU_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node alm_pcie_tcu = {
+       .name = "alm_pcie_tcu",
+       .id = SC8280XP_MASTER_PCIE_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+       .name = "alm_sys_tcu",
+       .id = SC8280XP_MASTER_SYS_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node chm_apps = {
+       .name = "chm_apps",
+       .id = SC8280XP_MASTER_APPSS_PROC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 3,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC,
+                  SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
+       },
+};
+
+static struct qcom_icc_node qnm_cmpnoc0 = {
+       .name = "qnm_cmpnoc0",
+       .id = SC8280XP_MASTER_COMPUTE_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node qnm_cmpnoc1 = {
+       .name = "qnm_cmpnoc1",
+       .id = SC8280XP_MASTER_COMPUTE_NOC_1,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cfg = {
+       .name = "qnm_gemnoc_cfg",
+       .id = SC8280XP_MASTER_GEM_NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 3,
+       .links = { SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
+                  SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
+                  SC8280XP_SLAVE_SERVICE_GEM_NOC
+       },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+       .name = "qnm_gpu",
+       .id = SC8280XP_MASTER_GFX3D,
+       .channels = 4,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+       .name = "qnm_mnoc_hf",
+       .id = SC8280XP_MASTER_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_LLCC,
+                  SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
+       },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+       .name = "qnm_mnoc_sf",
+       .id = SC8280XP_MASTER_MNOC_SF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+       .name = "qnm_pcie",
+       .id = SC8280XP_MASTER_ANOC_PCIE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC
+       },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+       .name = "qnm_snoc_gc",
+       .id = SC8280XP_MASTER_SNOC_GC_MEM_NOC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+       .name = "qnm_snoc_sf",
+       .id = SC8280XP_MASTER_SNOC_SF_MEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 3,
+       .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
+                  SC8280XP_SLAVE_LLCC,
+                  SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+       .name = "qhm_config_noc",
+       .id = SC8280XP_MASTER_CNOC_LPASS_AG_NOC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 6,
+       .links = { SC8280XP_SLAVE_LPASS_CORE_CFG,
+                  SC8280XP_SLAVE_LPASS_LPI_CFG,
+                  SC8280XP_SLAVE_LPASS_MPU_CFG,
+                  SC8280XP_SLAVE_LPASS_TOP_CFG,
+                  SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+                  SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
+       },
+};
+
+static struct qcom_icc_node qxm_lpass_dsp = {
+       .name = "qxm_lpass_dsp",
+       .id = SC8280XP_MASTER_LPASS_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 4,
+       .links = { SC8280XP_SLAVE_LPASS_TOP_CFG,
+                  SC8280XP_SLAVE_LPASS_SNOC,
+                  SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+                  SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
+       },
+};
+
+static struct qcom_icc_node llcc_mc = {
+       .name = "llcc_mc",
+       .id = SC8280XP_MASTER_LLCC,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+       .name = "qnm_camnoc_hf",
+       .id = SC8280XP_MASTER_CAMNOC_HF,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_0 = {
+       .name = "qnm_mdp0_0",
+       .id = SC8280XP_MASTER_MDP0,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_1 = {
+       .name = "qnm_mdp0_1",
+       .id = SC8280XP_MASTER_MDP1,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_0 = {
+       .name = "qnm_mdp1_0",
+       .id = SC8280XP_MASTER_MDP_CORE1_0,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_1 = {
+       .name = "qnm_mdp1_1",
+       .id = SC8280XP_MASTER_MDP_CORE1_1,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_cfg = {
+       .name = "qnm_mnoc_cfg",
+       .id = SC8280XP_MASTER_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_rot_0 = {
+       .name = "qnm_rot_0",
+       .id = SC8280XP_MASTER_ROTATOR,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_rot_1 = {
+       .name = "qnm_rot_1",
+       .id = SC8280XP_MASTER_ROTATOR_1,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+       .name = "qnm_video0",
+       .id = SC8280XP_MASTER_VIDEO_P0,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video1 = {
+       .name = "qnm_video1",
+       .id = SC8280XP_MASTER_VIDEO_P1,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+       .name = "qnm_video_cvp",
+       .id = SC8280XP_MASTER_VIDEO_PROC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_icp = {
+       .name = "qxm_camnoc_icp",
+       .id = SC8280XP_MASTER_CAMNOC_ICP,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+       .name = "qxm_camnoc_sf",
+       .id = SC8280XP_MASTER_CAMNOC_SF,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+       .name = "qhm_nsp_noc_config",
+       .id = SC8280XP_MASTER_CDSP_NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+       .name = "qxm_nsp",
+       .id = SC8280XP_MASTER_CDSP_PROC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_CDSP_MEM_NOC,
+                  SC8280XP_SLAVE_NSP_XFR
+       },
+};
+
+static struct qcom_icc_node qhm_nspb_noc_config = {
+       .name = "qhm_nspb_noc_config",
+       .id = SC8280XP_MASTER_CDSPB_NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SERVICE_NSPB_NOC },
+};
+
+static struct qcom_icc_node qxm_nspb = {
+       .name = "qxm_nspb",
+       .id = SC8280XP_MASTER_CDSP_PROC_B,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SC8280XP_SLAVE_CDSPB_MEM_NOC,
+                  SC8280XP_SLAVE_NSPB_XFR
+       },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+       .name = "qnm_aggre1_noc",
+       .id = SC8280XP_MASTER_A1NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+       .name = "qnm_aggre2_noc",
+       .id = SC8280XP_MASTER_A2NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre_usb_noc = {
+       .name = "qnm_aggre_usb_noc",
+       .id = SC8280XP_MASTER_USB_NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_lpass_noc = {
+       .name = "qnm_lpass_noc",
+       .id = SC8280XP_MASTER_LPASS_ANOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+       .name = "qnm_snoc_cfg",
+       .id = SC8280XP_MASTER_SNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+       .name = "qxm_pimem",
+       .id = SC8280XP_MASTER_PIMEM,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+       .name = "xm_gic",
+       .id = SC8280XP_MASTER_GIC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+       .name = "qns_a1noc_snoc",
+       .id = SC8280XP_SLAVE_A1NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_aggre_usb_snoc = {
+       .name = "qns_aggre_usb_snoc",
+       .id = SC8280XP_SLAVE_USB_NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+       .name = "srvc_aggre1_noc",
+       .id = SC8280XP_SLAVE_SERVICE_A1NOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+       .name = "qns_a2noc_snoc",
+       .id = SC8280XP_SLAVE_A2NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_pcie_gem_noc = {
+       .name = "qns_pcie_gem_noc",
+       .id = SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+       .name = "srvc_aggre2_noc",
+       .id = SC8280XP_SLAVE_SERVICE_A2NOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node ipa_core_slave = {
+       .name = "ipa_core_slave",
+       .id = SC8280XP_SLAVE_IPA_CORE,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+       .name = "qup0_core_slave",
+       .id = SC8280XP_SLAVE_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+       .name = "qup1_core_slave",
+       .id = SC8280XP_SLAVE_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+       .name = "qup2_core_slave",
+       .id = SC8280XP_SLAVE_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+       .name = "qhs_ahb2phy0",
+       .id = SC8280XP_SLAVE_AHB2PHY_0,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+       .name = "qhs_ahb2phy1",
+       .id = SC8280XP_SLAVE_AHB2PHY_1,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+       .name = "qhs_ahb2phy2",
+       .id = SC8280XP_SLAVE_AHB2PHY_2,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+       .name = "qhs_aoss",
+       .id = SC8280XP_SLAVE_AOSS,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+       .name = "qhs_apss",
+       .id = SC8280XP_SLAVE_APPSS,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+       .name = "qhs_camera_cfg",
+       .id = SC8280XP_SLAVE_CAMERA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+       .name = "qhs_clk_ctl",
+       .id = SC8280XP_SLAVE_CLK_CTL,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute0_cfg = {
+       .name = "qhs_compute0_cfg",
+       .id = SC8280XP_SLAVE_CDSP_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_CDSP_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_compute1_cfg = {
+       .name = "qhs_compute1_cfg",
+       .id = SC8280XP_SLAVE_CDSP1_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_CDSPB_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+       .name = "qhs_cpr_cx",
+       .id = SC8280XP_SLAVE_RBCPR_CX_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+       .name = "qhs_cpr_mmcx",
+       .id = SC8280XP_SLAVE_RBCPR_MMCX_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+       .name = "qhs_cpr_mx",
+       .id = SC8280XP_SLAVE_RBCPR_MX_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_nspcx = {
+       .name = "qhs_cpr_nspcx",
+       .id = SC8280XP_SLAVE_CPR_NSPCX,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+       .name = "qhs_crypto0_cfg",
+       .id = SC8280XP_SLAVE_CRYPTO_0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+       .name = "qhs_cx_rdpm",
+       .id = SC8280XP_SLAVE_CX_RDPM,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+       .name = "qhs_dcc_cfg",
+       .id = SC8280XP_SLAVE_DCC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display0_cfg = {
+       .name = "qhs_display0_cfg",
+       .id = SC8280XP_SLAVE_DISPLAY_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display1_cfg = {
+       .name = "qhs_display1_cfg",
+       .id = SC8280XP_SLAVE_DISPLAY1_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac0_cfg = {
+       .name = "qhs_emac0_cfg",
+       .id = SC8280XP_SLAVE_EMAC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac1_cfg = {
+       .name = "qhs_emac1_cfg",
+       .id = SC8280XP_SLAVE_EMAC1_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+       .name = "qhs_gpuss_cfg",
+       .id = SC8280XP_SLAVE_GFX3D_CFG,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+       .name = "qhs_hwkm",
+       .id = SC8280XP_SLAVE_HWKM,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+       .name = "qhs_imem_cfg",
+       .id = SC8280XP_SLAVE_IMEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+       .name = "qhs_ipa",
+       .id = SC8280XP_SLAVE_IPA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+       .name = "qhs_ipc_router",
+       .id = SC8280XP_SLAVE_IPC_ROUTER_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+       .name = "qhs_lpass_cfg",
+       .id = SC8280XP_SLAVE_LPASS,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+       .name = "qhs_mx_rdpm",
+       .id = SC8280XP_SLAVE_MX_RDPM,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mxc_rdpm = {
+       .name = "qhs_mxc_rdpm",
+       .id = SC8280XP_SLAVE_MXC_RDPM,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+       .name = "qhs_pcie0_cfg",
+       .id = SC8280XP_SLAVE_PCIE_0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+       .name = "qhs_pcie1_cfg",
+       .id = SC8280XP_SLAVE_PCIE_1_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2a_cfg = {
+       .name = "qhs_pcie2a_cfg",
+       .id = SC8280XP_SLAVE_PCIE_2A_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2b_cfg = {
+       .name = "qhs_pcie2b_cfg",
+       .id = SC8280XP_SLAVE_PCIE_2B_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3a_cfg = {
+       .name = "qhs_pcie3a_cfg",
+       .id = SC8280XP_SLAVE_PCIE_3A_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3b_cfg = {
+       .name = "qhs_pcie3b_cfg",
+       .id = SC8280XP_SLAVE_PCIE_3B_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+       .name = "qhs_pcie4_cfg",
+       .id = SC8280XP_SLAVE_PCIE_4_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_rsc_cfg = {
+       .name = "qhs_pcie_rsc_cfg",
+       .id = SC8280XP_SLAVE_PCIE_RSC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+       .name = "qhs_pdm",
+       .id = SC8280XP_SLAVE_PDM,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+       .name = "qhs_pimem_cfg",
+       .id = SC8280XP_SLAVE_PIMEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pka_wrapper_cfg = {
+       .name = "qhs_pka_wrapper_cfg",
+       .id = SC8280XP_SLAVE_PKA_WRAPPER_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
+       .name = "qhs_pmu_wrapper_cfg",
+       .id = SC8280XP_SLAVE_PMU_WRAPPER_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+       .name = "qhs_qdss_cfg",
+       .id = SC8280XP_SLAVE_QDSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+       .name = "qhs_qspi",
+       .id = SC8280XP_SLAVE_QSPI_0,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+       .name = "qhs_qup0",
+       .id = SC8280XP_SLAVE_QUP_0,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+       .name = "qhs_qup1",
+       .id = SC8280XP_SLAVE_QUP_1,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+       .name = "qhs_qup2",
+       .id = SC8280XP_SLAVE_QUP_2,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+       .name = "qhs_sdc2",
+       .id = SC8280XP_SLAVE_SDCC_2,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+       .name = "qhs_sdc4",
+       .id = SC8280XP_SLAVE_SDCC_4,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_security = {
+       .name = "qhs_security",
+       .id = SC8280XP_SLAVE_SECURITY,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+       .name = "qhs_smmuv3_cfg",
+       .id = SC8280XP_SLAVE_SMMUV3_CFG,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_smss_cfg = {
+       .name = "qhs_smss_cfg",
+       .id = SC8280XP_SLAVE_SMSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+       .name = "qhs_spss_cfg",
+       .id = SC8280XP_SLAVE_SPSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+       .name = "qhs_tcsr",
+       .id = SC8280XP_SLAVE_TCSR,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+       .name = "qhs_tlmm",
+       .id = SC8280XP_SLAVE_TLMM,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+       .name = "qhs_ufs_card_cfg",
+       .id = SC8280XP_SLAVE_UFS_CARD_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+       .name = "qhs_ufs_mem_cfg",
+       .id = SC8280XP_SLAVE_UFS_MEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+       .name = "qhs_usb3_0",
+       .id = SC8280XP_SLAVE_USB3_0,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+       .name = "qhs_usb3_1",
+       .id = SC8280XP_SLAVE_USB3_1,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_mp = {
+       .name = "qhs_usb3_mp",
+       .id = SC8280XP_SLAVE_USB3_MP,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_host_0 = {
+       .name = "qhs_usb4_host_0",
+       .id = SC8280XP_SLAVE_USB4_0,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_host_1 = {
+       .name = "qhs_usb4_host_1",
+       .id = SC8280XP_SLAVE_USB4_1,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+       .name = "qhs_venus_cfg",
+       .id = SC8280XP_SLAVE_VENUS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+       .name = "qhs_vsense_ctrl_cfg",
+       .id = SC8280XP_SLAVE_VSENSE_CTRL_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_r_cfg = {
+       .name = "qhs_vsense_ctrl_r_cfg",
+       .id = SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a1_noc_cfg = {
+       .name = "qns_a1_noc_cfg",
+       .id = SC8280XP_SLAVE_A1NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qns_a2_noc_cfg = {
+       .name = "qns_a2_noc_cfg",
+       .id = SC8280XP_SLAVE_A2NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qns_anoc_pcie_bridge_cfg = {
+       .name = "qns_anoc_pcie_bridge_cfg",
+       .id = SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+       .name = "qns_ddrss_cfg",
+       .id = SC8280XP_SLAVE_DDRSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qns_mnoc_cfg = {
+       .name = "qns_mnoc_cfg",
+       .id = SC8280XP_SLAVE_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+       .name = "qns_snoc_cfg",
+       .id = SC8280XP_SLAVE_SNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_sf_bridge_cfg = {
+       .name = "qns_snoc_sf_bridge_cfg",
+       .id = SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qxs_imem = {
+       .name = "qxs_imem",
+       .id = SC8280XP_SLAVE_IMEM,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+       .name = "qxs_pimem",
+       .id = SC8280XP_SLAVE_PIMEM,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+       .name = "srvc_cnoc",
+       .id = SC8280XP_SLAVE_SERVICE_CNOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+       .name = "xs_pcie_0",
+       .id = SC8280XP_SLAVE_PCIE_0,
+       .channels = 1,
+       .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+       .name = "xs_pcie_1",
+       .id = SC8280XP_SLAVE_PCIE_1,
+       .channels = 1,
+       .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_2a = {
+       .name = "xs_pcie_2a",
+       .id = SC8280XP_SLAVE_PCIE_2A,
+       .channels = 1,
+       .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_2b = {
+       .name = "xs_pcie_2b",
+       .id = SC8280XP_SLAVE_PCIE_2B,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_3a = {
+       .name = "xs_pcie_3a",
+       .id = SC8280XP_SLAVE_PCIE_3A,
+       .channels = 1,
+       .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_3b = {
+       .name = "xs_pcie_3b",
+       .id = SC8280XP_SLAVE_PCIE_3B,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+       .name = "xs_pcie_4",
+       .id = SC8280XP_SLAVE_PCIE_4,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+       .name = "xs_qdss_stm",
+       .id = SC8280XP_SLAVE_QDSS_STM,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_smss = {
+       .name = "xs_smss",
+       .id = SC8280XP_SLAVE_SMSS,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+       .name = "xs_sys_tcu_cfg",
+       .id = SC8280XP_SLAVE_TCU,
+       .channels = 1,
+       .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+       .name = "qhs_llcc",
+       .id = SC8280XP_SLAVE_LLCC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc = {
+       .name = "qns_gemnoc",
+       .id = SC8280XP_SLAVE_GEM_NOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+       .name = "qns_gem_noc_cnoc",
+       .id = SC8280XP_SLAVE_GEM_NOC_CNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+       .name = "qns_llcc",
+       .id = SC8280XP_SLAVE_LLCC,
+       .channels = 8,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+       .name = "qns_pcie",
+       .id = SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_even_gemnoc = {
+       .name = "srvc_even_gemnoc",
+       .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_odd_gemnoc = {
+       .name = "srvc_odd_gemnoc",
+       .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc = {
+       .name = "srvc_sys_gemnoc",
+       .id = SC8280XP_SLAVE_SERVICE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+       .name = "qhs_lpass_core",
+       .id = SC8280XP_SLAVE_LPASS_CORE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+       .name = "qhs_lpass_lpi",
+       .id = SC8280XP_SLAVE_LPASS_LPI_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+       .name = "qhs_lpass_mpu",
+       .id = SC8280XP_SLAVE_LPASS_MPU_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+       .name = "qhs_lpass_top",
+       .id = SC8280XP_SLAVE_LPASS_TOP_CFG,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_sysnoc = {
+       .name = "qns_sysnoc",
+       .id = SC8280XP_SLAVE_LPASS_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_LPASS_ANOC },
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+       .name = "srvc_niu_aml_noc",
+       .id = SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+       .name = "srvc_niu_lpass_agnoc",
+       .id = SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+       .name = "ebi",
+       .id = SC8280XP_SLAVE_EBI1,
+       .channels = 8,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+       .name = "qns_mem_noc_hf",
+       .id = SC8280XP_SLAVE_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+       .name = "qns_mem_noc_sf",
+       .id = SC8280XP_SLAVE_MNOC_SF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+       .name = "srvc_mnoc",
+       .id = SC8280XP_SLAVE_SERVICE_MNOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+       .name = "qns_nsp_gemnoc",
+       .id = SC8280XP_SLAVE_CDSP_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qxs_nsp_xfr = {
+       .name = "qxs_nsp_xfr",
+       .id = SC8280XP_SLAVE_NSP_XFR,
+       .channels = 1,
+       .buswidth = 32,
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+       .name = "service_nsp_noc",
+       .id = SC8280XP_SLAVE_SERVICE_NSP_NOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nspb_gemnoc = {
+       .name = "qns_nspb_gemnoc",
+       .id = SC8280XP_SLAVE_CDSPB_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_COMPUTE_NOC_1 },
+};
+
+static struct qcom_icc_node qxs_nspb_xfr = {
+       .name = "qxs_nspb_xfr",
+       .id = SC8280XP_SLAVE_NSPB_XFR,
+       .channels = 1,
+       .buswidth = 32,
+};
+
+static struct qcom_icc_node service_nspb_noc = {
+       .name = "service_nspb_noc",
+       .id = SC8280XP_SLAVE_SERVICE_NSPB_NOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+       .name = "qns_gemnoc_gc",
+       .id = SC8280XP_SLAVE_SNOC_GEM_NOC_GC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+       .name = "qns_gemnoc_sf",
+       .id = SC8280XP_SLAVE_SNOC_GEM_NOC_SF,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SC8280XP_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+       .name = "srvc_snoc",
+       .id = SC8280XP_SLAVE_SERVICE_SNOC,
+       .channels = 1,
+       .buswidth = 4,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+       .name = "ACV",
+       .num_nodes = 1,
+       .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+       .name = "CE0",
+       .num_nodes = 1,
+       .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+       .name = "CN0",
+       .keepalive = true,
+       .num_nodes = 9,
+       .nodes = { &qnm_gemnoc_cnoc,
+                  &qnm_gemnoc_pcie,
+                  &xs_pcie_0,
+                  &xs_pcie_1,
+                  &xs_pcie_2a,
+                  &xs_pcie_2b,
+                  &xs_pcie_3a,
+                  &xs_pcie_3b,
+                  &xs_pcie_4
+       },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+       .name = "CN1",
+       .num_nodes = 67,
+       .nodes = { &qhs_ahb2phy0,
+                  &qhs_ahb2phy1,
+                  &qhs_ahb2phy2,
+                  &qhs_aoss,
+                  &qhs_apss,
+                  &qhs_camera_cfg,
+                  &qhs_clk_ctl,
+                  &qhs_compute0_cfg,
+                  &qhs_compute1_cfg,
+                  &qhs_cpr_cx,
+                  &qhs_cpr_mmcx,
+                  &qhs_cpr_mx,
+                  &qhs_cpr_nspcx,
+                  &qhs_crypto0_cfg,
+                  &qhs_cx_rdpm,
+                  &qhs_dcc_cfg,
+                  &qhs_display0_cfg,
+                  &qhs_display1_cfg,
+                  &qhs_emac0_cfg,
+                  &qhs_emac1_cfg,
+                  &qhs_gpuss_cfg,
+                  &qhs_hwkm,
+                  &qhs_imem_cfg,
+                  &qhs_ipa,
+                  &qhs_ipc_router,
+                  &qhs_lpass_cfg,
+                  &qhs_mx_rdpm,
+                  &qhs_mxc_rdpm,
+                  &qhs_pcie0_cfg,
+                  &qhs_pcie1_cfg,
+                  &qhs_pcie2a_cfg,
+                  &qhs_pcie2b_cfg,
+                  &qhs_pcie3a_cfg,
+                  &qhs_pcie3b_cfg,
+                  &qhs_pcie4_cfg,
+                  &qhs_pcie_rsc_cfg,
+                  &qhs_pdm,
+                  &qhs_pimem_cfg,
+                  &qhs_pka_wrapper_cfg,
+                  &qhs_pmu_wrapper_cfg,
+                  &qhs_qdss_cfg,
+                  &qhs_sdc2,
+                  &qhs_sdc4,
+                  &qhs_security,
+                  &qhs_smmuv3_cfg,
+                  &qhs_smss_cfg,
+                  &qhs_spss_cfg,
+                  &qhs_tcsr,
+                  &qhs_tlmm,
+                  &qhs_ufs_card_cfg,
+                  &qhs_ufs_mem_cfg,
+                  &qhs_usb3_0,
+                  &qhs_usb3_1,
+                  &qhs_usb3_mp,
+                  &qhs_usb4_host_0,
+                  &qhs_usb4_host_1,
+                  &qhs_venus_cfg,
+                  &qhs_vsense_ctrl_cfg,
+                  &qhs_vsense_ctrl_r_cfg,
+                  &qns_a1_noc_cfg,
+                  &qns_a2_noc_cfg,
+                  &qns_anoc_pcie_bridge_cfg,
+                  &qns_ddrss_cfg,
+                  &qns_mnoc_cfg,
+                  &qns_snoc_cfg,
+                  &qns_snoc_sf_bridge_cfg,
+                  &srvc_cnoc
+       },
+};
+
+static struct qcom_icc_bcm bcm_cn2 = {
+       .name = "CN2",
+       .num_nodes = 4,
+       .nodes = { &qhs_qspi,
+                  &qhs_qup0,
+                  &qhs_qup1,
+                  &qhs_qup2
+       },
+};
+
+static struct qcom_icc_bcm bcm_cn3 = {
+       .name = "CN3",
+       .num_nodes = 3,
+       .nodes = { &qxs_imem,
+                  &xs_smss,
+                  &xs_sys_tcu_cfg
+       },
+};
+
+static struct qcom_icc_bcm bcm_ip0 = {
+       .name = "IP0",
+       .num_nodes = 1,
+       .nodes = { &ipa_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+       .name = "MC0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+       .name = "MM0",
+       .keepalive = true,
+       .num_nodes = 5,
+       .nodes = { &qnm_camnoc_hf,
+                  &qnm_mdp0_0,
+                  &qnm_mdp0_1,
+                  &qnm_mdp1_0,
+                  &qns_mem_noc_hf
+       },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+       .name = "MM1",
+       .num_nodes = 8,
+       .nodes = { &qnm_rot_0,
+                  &qnm_rot_1,
+                  &qnm_video0,
+                  &qnm_video1,
+                  &qnm_video_cvp,
+                  &qxm_camnoc_icp,
+                  &qxm_camnoc_sf,
+                  &qns_mem_noc_sf
+       },
+};
+
+static struct qcom_icc_bcm bcm_nsa0 = {
+       .name = "NSA0",
+       .num_nodes = 2,
+       .nodes = { &qns_nsp_gemnoc,
+                  &qxs_nsp_xfr
+       },
+};
+
+static struct qcom_icc_bcm bcm_nsa1 = {
+       .name = "NSA1",
+       .num_nodes = 1,
+       .nodes = { &qxm_nsp },
+};
+
+static struct qcom_icc_bcm bcm_nsb0 = {
+       .name = "NSB0",
+       .num_nodes = 2,
+       .nodes = { &qns_nspb_gemnoc,
+                  &qxs_nspb_xfr
+       },
+};
+
+static struct qcom_icc_bcm bcm_nsb1 = {
+       .name = "NSB1",
+       .num_nodes = 1,
+       .nodes = { &qxm_nspb },
+};
+
+static struct qcom_icc_bcm bcm_pci0 = {
+       .name = "PCI0",
+       .num_nodes = 1,
+       .nodes = { &qns_pcie_gem_noc },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+       .name = "QUP0",
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+       .name = "QUP1",
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+       .name = "QUP2",
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+       .name = "SH0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+       .name = "SH2",
+       .num_nodes = 1,
+       .nodes = { &chm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+       .name = "SN0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+       .name = "SN1",
+       .num_nodes = 1,
+       .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+       .name = "SN2",
+       .num_nodes = 1,
+       .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+       .name = "SN3",
+       .num_nodes = 2,
+       .nodes = { &qns_a1noc_snoc,
+                  &qnm_aggre1_noc
+       },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+       .name = "SN4",
+       .num_nodes = 2,
+       .nodes = { &qns_a2noc_snoc,
+                  &qnm_aggre2_noc
+       },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+       .name = "SN5",
+       .num_nodes = 2,
+       .nodes = { &qns_aggre_usb_snoc,
+                  &qnm_aggre_usb_noc
+       },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+       .name = "SN9",
+       .num_nodes = 2,
+       .nodes = { &qns_sysnoc,
+                  &qnm_lpass_noc
+       },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+       .name = "SN10",
+       .num_nodes = 1,
+       .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+       &bcm_sn3,
+       &bcm_sn5,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+       [MASTER_QSPI_0] = &qhm_qspi,
+       [MASTER_QUP_1] = &qhm_qup1,
+       [MASTER_QUP_2] = &qhm_qup2,
+       [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+       [MASTER_IPA] = &qxm_ipa,
+       [MASTER_EMAC_1] = &xm_emac_1,
+       [MASTER_SDCC_4] = &xm_sdc4,
+       [MASTER_UFS_MEM] = &xm_ufs_mem,
+       [MASTER_USB3_0] = &xm_usb3_0,
+       [MASTER_USB3_1] = &xm_usb3_1,
+       [MASTER_USB3_MP] = &xm_usb3_mp,
+       [MASTER_USB4_0] = &xm_usb4_host0,
+       [MASTER_USB4_1] = &xm_usb4_host1,
+       [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+       [SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc,
+       [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_aggre1_noc = {
+       .nodes = aggre1_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+       .bcms = aggre1_noc_bcms,
+       .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+       &bcm_ce0,
+       &bcm_pci0,
+       &bcm_sn4,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+       [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+       [MASTER_QUP_0] = &qhm_qup0,
+       [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+       [MASTER_CRYPTO] = &qxm_crypto,
+       [MASTER_SENSORS_PROC] = &qxm_sensorss_q6,
+       [MASTER_SP] = &qxm_sp,
+       [MASTER_EMAC] = &xm_emac_0,
+       [MASTER_PCIE_0] = &xm_pcie3_0,
+       [MASTER_PCIE_1] = &xm_pcie3_1,
+       [MASTER_PCIE_2A] = &xm_pcie3_2a,
+       [MASTER_PCIE_2B] = &xm_pcie3_2b,
+       [MASTER_PCIE_3A] = &xm_pcie3_3a,
+       [MASTER_PCIE_3B] = &xm_pcie3_3b,
+       [MASTER_PCIE_4] = &xm_pcie3_4,
+       [MASTER_QDSS_ETR] = &xm_qdss_etr,
+       [MASTER_SDCC_2] = &xm_sdc2,
+       [MASTER_UFS_CARD] = &xm_ufs_card,
+       [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+       [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gem_noc,
+       [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_aggre2_noc = {
+       .nodes = aggre2_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+       .bcms = aggre2_noc_bcms,
+       .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+       &bcm_ip0,
+       &bcm_qup0,
+       &bcm_qup1,
+       &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+       [MASTER_IPA_CORE] = &ipa_core_master,
+       [MASTER_QUP_CORE_0] = &qup0_core_master,
+       [MASTER_QUP_CORE_1] = &qup1_core_master,
+       [MASTER_QUP_CORE_2] = &qup2_core_master,
+       [SLAVE_IPA_CORE] = &ipa_core_slave,
+       [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+       [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+       [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sc8280xp_clk_virt = {
+       .nodes = clk_virt_nodes,
+       .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+       .bcms = clk_virt_bcms,
+       .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+       &bcm_cn0,
+       &bcm_cn1,
+       &bcm_cn2,
+       &bcm_cn3,
+       &bcm_sn2,
+       &bcm_sn10,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+       [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+       [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+       [SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
+       [SLAVE_AHB2PHY_1] = &qhs_ahb2phy1,
+       [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+       [SLAVE_AOSS] = &qhs_aoss,
+       [SLAVE_APPSS] = &qhs_apss,
+       [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+       [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+       [SLAVE_CDSP_CFG] = &qhs_compute0_cfg,
+       [SLAVE_CDSP1_CFG] = &qhs_compute1_cfg,
+       [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+       [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+       [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+       [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
+       [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+       [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+       [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+       [SLAVE_DISPLAY_CFG] = &qhs_display0_cfg,
+       [SLAVE_DISPLAY1_CFG] = &qhs_display1_cfg,
+       [SLAVE_EMAC_CFG] = &qhs_emac0_cfg,
+       [SLAVE_EMAC1_CFG] = &qhs_emac1_cfg,
+       [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+       [SLAVE_HWKM] = &qhs_hwkm,
+       [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+       [SLAVE_IPA_CFG] = &qhs_ipa,
+       [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+       [SLAVE_LPASS] = &qhs_lpass_cfg,
+       [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+       [SLAVE_MXC_RDPM] = &qhs_mxc_rdpm,
+       [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+       [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+       [SLAVE_PCIE_2A_CFG] = &qhs_pcie2a_cfg,
+       [SLAVE_PCIE_2B_CFG] = &qhs_pcie2b_cfg,
+       [SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg,
+       [SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg,
+       [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+       [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
+       [SLAVE_PDM] = &qhs_pdm,
+       [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+       [SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
+       [SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
+       [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+       [SLAVE_QSPI_0] = &qhs_qspi,
+       [SLAVE_QUP_0] = &qhs_qup0,
+       [SLAVE_QUP_1] = &qhs_qup1,
+       [SLAVE_QUP_2] = &qhs_qup2,
+       [SLAVE_SDCC_2] = &qhs_sdc2,
+       [SLAVE_SDCC_4] = &qhs_sdc4,
+       [SLAVE_SECURITY] = &qhs_security,
+       [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+       [SLAVE_SMSS_CFG] = &qhs_smss_cfg,
+       [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+       [SLAVE_TCSR] = &qhs_tcsr,
+       [SLAVE_TLMM] = &qhs_tlmm,
+       [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+       [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+       [SLAVE_USB3_0] = &qhs_usb3_0,
+       [SLAVE_USB3_1] = &qhs_usb3_1,
+       [SLAVE_USB3_MP] = &qhs_usb3_mp,
+       [SLAVE_USB4_0] = &qhs_usb4_host_0,
+       [SLAVE_USB4_1] = &qhs_usb4_host_1,
+       [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+       [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+       [SLAVE_VSENSE_CTRL_R_CFG] = &qhs_vsense_ctrl_r_cfg,
+       [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+       [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+       [SLAVE_ANOC_PCIE_BRIDGE_CFG] = &qns_anoc_pcie_bridge_cfg,
+       [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+       [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+       [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+       [SLAVE_SNOC_SF_BRIDGE_CFG] = &qns_snoc_sf_bridge_cfg,
+       [SLAVE_IMEM] = &qxs_imem,
+       [SLAVE_PIMEM] = &qxs_pimem,
+       [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+       [SLAVE_PCIE_0] = &xs_pcie_0,
+       [SLAVE_PCIE_1] = &xs_pcie_1,
+       [SLAVE_PCIE_2A] = &xs_pcie_2a,
+       [SLAVE_PCIE_2B] = &xs_pcie_2b,
+       [SLAVE_PCIE_3A] = &xs_pcie_3a,
+       [SLAVE_PCIE_3B] = &xs_pcie_3b,
+       [SLAVE_PCIE_4] = &xs_pcie_4,
+       [SLAVE_QDSS_STM] = &xs_qdss_stm,
+       [SLAVE_SMSS] = &xs_smss,
+       [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sc8280xp_config_noc = {
+       .nodes = config_noc_nodes,
+       .num_nodes = ARRAY_SIZE(config_noc_nodes),
+       .bcms = config_noc_bcms,
+       .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+       [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
+       [SLAVE_LLCC_CFG] = &qhs_llcc,
+       [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_dc_noc = {
+       .nodes = dc_noc_nodes,
+       .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+       .bcms = dc_noc_bcms,
+       .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+       &bcm_sh0,
+       &bcm_sh2,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+       [MASTER_GPU_TCU] = &alm_gpu_tcu,
+       [MASTER_PCIE_TCU] = &alm_pcie_tcu,
+       [MASTER_SYS_TCU] = &alm_sys_tcu,
+       [MASTER_APPSS_PROC] = &chm_apps,
+       [MASTER_COMPUTE_NOC] = &qnm_cmpnoc0,
+       [MASTER_COMPUTE_NOC_1] = &qnm_cmpnoc1,
+       [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+       [MASTER_GFX3D] = &qnm_gpu,
+       [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+       [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+       [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+       [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+       [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+       [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+       [SLAVE_LLCC] = &qns_llcc,
+       [SLAVE_GEM_NOC_PCIE_CNOC] = &qns_pcie,
+       [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+       [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+       [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_gem_noc = {
+       .nodes = gem_noc_nodes,
+       .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+       .bcms = gem_noc_bcms,
+       .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
+       &bcm_sn9,
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+       [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+       [MASTER_LPASS_PROC] = &qxm_lpass_dsp,
+       [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+       [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+       [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+       [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+       [SLAVE_LPASS_SNOC] = &qns_sysnoc,
+       [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+       [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_lpass_ag_noc = {
+       .nodes = lpass_ag_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+       .bcms = lpass_ag_noc_bcms,
+       .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+       &bcm_acv,
+       &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+       [MASTER_LLCC] = &llcc_mc,
+       [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sc8280xp_mc_virt = {
+       .nodes = mc_virt_nodes,
+       .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+       .bcms = mc_virt_bcms,
+       .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+       &bcm_mm0,
+       &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+       [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+       [MASTER_MDP0] = &qnm_mdp0_0,
+       [MASTER_MDP1] = &qnm_mdp0_1,
+       [MASTER_MDP_CORE1_0] = &qnm_mdp1_0,
+       [MASTER_MDP_CORE1_1] = &qnm_mdp1_1,
+       [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+       [MASTER_ROTATOR] = &qnm_rot_0,
+       [MASTER_ROTATOR_1] = &qnm_rot_1,
+       [MASTER_VIDEO_P0] = &qnm_video0,
+       [MASTER_VIDEO_P1] = &qnm_video1,
+       [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+       [MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
+       [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+       [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+       [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+       [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_mmss_noc = {
+       .nodes = mmss_noc_nodes,
+       .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+       .bcms = mmss_noc_bcms,
+       .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
+       &bcm_nsa0,
+       &bcm_nsa1,
+};
+
+static struct qcom_icc_node * const nspa_noc_nodes[] = {
+       [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+       [MASTER_CDSP_PROC] = &qxm_nsp,
+       [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+       [SLAVE_NSP_XFR] = &qxs_nsp_xfr,
+       [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_nspa_noc = {
+       .nodes = nspa_noc_nodes,
+       .num_nodes = ARRAY_SIZE(nspa_noc_nodes),
+       .bcms = nspa_noc_bcms,
+       .num_bcms = ARRAY_SIZE(nspa_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
+       &bcm_nsb0,
+       &bcm_nsb1,
+};
+
+static struct qcom_icc_node * const nspb_noc_nodes[] = {
+       [MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
+       [MASTER_CDSP_PROC_B] = &qxm_nspb,
+       [SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
+       [SLAVE_NSPB_XFR] = &qxs_nspb_xfr,
+       [SLAVE_SERVICE_NSPB_NOC] = &service_nspb_noc,
+};
+
+static const struct qcom_icc_desc sc8280xp_nspb_noc = {
+       .nodes = nspb_noc_nodes,
+       .num_nodes = ARRAY_SIZE(nspb_noc_nodes),
+       .bcms = nspb_noc_bcms,
+       .num_bcms = ARRAY_SIZE(nspb_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_main_bcms[] = {
+       &bcm_sn0,
+       &bcm_sn1,
+       &bcm_sn3,
+       &bcm_sn4,
+       &bcm_sn5,
+       &bcm_sn9,
+};
+
+static struct qcom_icc_node * const system_noc_main_nodes[] = {
+       [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+       [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+       [MASTER_USB_NOC_SNOC] = &qnm_aggre_usb_noc,
+       [MASTER_LPASS_ANOC] = &qnm_lpass_noc,
+       [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+       [MASTER_PIMEM] = &qxm_pimem,
+       [MASTER_GIC] = &xm_gic,
+       [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+       [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+       [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static const struct qcom_icc_desc sc8280xp_system_noc_main = {
+       .nodes = system_noc_main_nodes,
+       .num_nodes = ARRAY_SIZE(system_noc_main_nodes),
+       .bcms = system_noc_main_bcms,
+       .num_bcms = ARRAY_SIZE(system_noc_main_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+       { .compatible = "qcom,sc8280xp-aggre1-noc", .data = &sc8280xp_aggre1_noc, },
+       { .compatible = "qcom,sc8280xp-aggre2-noc", .data = &sc8280xp_aggre2_noc, },
+       { .compatible = "qcom,sc8280xp-clk-virt", .data = &sc8280xp_clk_virt, },
+       { .compatible = "qcom,sc8280xp-config-noc", .data = &sc8280xp_config_noc, },
+       { .compatible = "qcom,sc8280xp-dc-noc", .data = &sc8280xp_dc_noc, },
+       { .compatible = "qcom,sc8280xp-gem-noc", .data = &sc8280xp_gem_noc, },
+       { .compatible = "qcom,sc8280xp-lpass-ag-noc", .data = &sc8280xp_lpass_ag_noc, },
+       { .compatible = "qcom,sc8280xp-mc-virt", .data = &sc8280xp_mc_virt, },
+       { .compatible = "qcom,sc8280xp-mmss-noc", .data = &sc8280xp_mmss_noc, },
+       { .compatible = "qcom,sc8280xp-nspa-noc", .data = &sc8280xp_nspa_noc, },
+       { .compatible = "qcom,sc8280xp-nspb-noc", .data = &sc8280xp_nspb_noc, },
+       { .compatible = "qcom,sc8280xp-system-noc", .data = &sc8280xp_system_noc_main, },
+       { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+       .probe = qcom_icc_rpmh_probe,
+       .remove = qcom_icc_rpmh_remove,
+       .driver = {
+               .name = "qnoc-sc8280xp",
+               .of_match_table = qnoc_of_match,
+               .sync_state = icc_sync_state,
+       },
+};
+
+static int __init qnoc_driver_init(void)
+{
+       return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+       platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("Qualcomm SC8280XP NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sc8280xp.h b/drivers/interconnect/qcom/sc8280xp.h
new file mode 100644 (file)
index 0000000..74d8fa4
--- /dev/null
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
+
+#define SC8280XP_MASTER_GPU_TCU                                0
+#define SC8280XP_MASTER_PCIE_TCU                       1
+#define SC8280XP_MASTER_SYS_TCU                                2
+#define SC8280XP_MASTER_APPSS_PROC                     3
+#define SC8280XP_MASTER_IPA_CORE                       4
+#define SC8280XP_MASTER_LLCC                           5
+#define SC8280XP_MASTER_CNOC_LPASS_AG_NOC              6
+#define SC8280XP_MASTER_CDSP_NOC_CFG                   7
+#define SC8280XP_MASTER_CDSPB_NOC_CFG                  8
+#define SC8280XP_MASTER_QDSS_BAM                       9
+#define SC8280XP_MASTER_QSPI_0                         10
+#define SC8280XP_MASTER_QUP_0                          11
+#define SC8280XP_MASTER_QUP_1                          12
+#define SC8280XP_MASTER_QUP_2                          13
+#define SC8280XP_MASTER_A1NOC_CFG                      14
+#define SC8280XP_MASTER_A2NOC_CFG                      15
+#define SC8280XP_MASTER_A1NOC_SNOC                     16
+#define SC8280XP_MASTER_A2NOC_SNOC                     17
+#define SC8280XP_MASTER_USB_NOC_SNOC                   18
+#define SC8280XP_MASTER_CAMNOC_HF                      19
+#define SC8280XP_MASTER_COMPUTE_NOC                    20
+#define SC8280XP_MASTER_COMPUTE_NOC_1                  21
+#define SC8280XP_MASTER_CNOC_DC_NOC                    22
+#define SC8280XP_MASTER_GEM_NOC_CFG                    23
+#define SC8280XP_MASTER_GEM_NOC_CNOC                   24
+#define SC8280XP_MASTER_GEM_NOC_PCIE_SNOC              25
+#define SC8280XP_MASTER_GFX3D                          26
+#define SC8280XP_MASTER_LPASS_ANOC                     27
+#define SC8280XP_MASTER_MDP0                           28
+#define SC8280XP_MASTER_MDP1                           29
+#define SC8280XP_MASTER_MDP_CORE1_0                    30
+#define SC8280XP_MASTER_MDP_CORE1_1                    31
+#define SC8280XP_MASTER_CNOC_MNOC_CFG                  32
+#define SC8280XP_MASTER_MNOC_HF_MEM_NOC                        33
+#define SC8280XP_MASTER_MNOC_SF_MEM_NOC                        34
+#define SC8280XP_MASTER_ANOC_PCIE_GEM_NOC              35
+#define SC8280XP_MASTER_ROTATOR                                36
+#define SC8280XP_MASTER_ROTATOR_1                      37
+#define SC8280XP_MASTER_SNOC_CFG                       38
+#define SC8280XP_MASTER_SNOC_GC_MEM_NOC                        39
+#define SC8280XP_MASTER_SNOC_SF_MEM_NOC                        40
+#define SC8280XP_MASTER_VIDEO_P0                       41
+#define SC8280XP_MASTER_VIDEO_P1                       42
+#define SC8280XP_MASTER_VIDEO_PROC                     43
+#define SC8280XP_MASTER_QUP_CORE_0                     44
+#define SC8280XP_MASTER_QUP_CORE_1                     45
+#define SC8280XP_MASTER_QUP_CORE_2                     46
+#define SC8280XP_MASTER_CAMNOC_ICP                     47
+#define SC8280XP_MASTER_CAMNOC_SF                      48
+#define SC8280XP_MASTER_CRYPTO                         49
+#define SC8280XP_MASTER_IPA                            50
+#define SC8280XP_MASTER_LPASS_PROC                     51
+#define SC8280XP_MASTER_CDSP_PROC                      52
+#define SC8280XP_MASTER_CDSP_PROC_B                    53
+#define SC8280XP_MASTER_PIMEM                          54
+#define SC8280XP_MASTER_SENSORS_PROC                   55
+#define SC8280XP_MASTER_SP                             56
+#define SC8280XP_MASTER_EMAC                           57
+#define SC8280XP_MASTER_EMAC_1                         58
+#define SC8280XP_MASTER_GIC                            59
+#define SC8280XP_MASTER_PCIE_0                         60
+#define SC8280XP_MASTER_PCIE_1                         61
+#define SC8280XP_MASTER_PCIE_2A                                62
+#define SC8280XP_MASTER_PCIE_2B                                63
+#define SC8280XP_MASTER_PCIE_3A                                64
+#define SC8280XP_MASTER_PCIE_3B                                65
+#define SC8280XP_MASTER_PCIE_4                         66
+#define SC8280XP_MASTER_QDSS_ETR                       67
+#define SC8280XP_MASTER_SDCC_2                         68
+#define SC8280XP_MASTER_SDCC_4                         69
+#define SC8280XP_MASTER_UFS_CARD                       70
+#define SC8280XP_MASTER_UFS_MEM                                71
+#define SC8280XP_MASTER_USB3_0                         72
+#define SC8280XP_MASTER_USB3_1                         73
+#define SC8280XP_MASTER_USB3_MP                                74
+#define SC8280XP_MASTER_USB4_0                         75
+#define SC8280XP_MASTER_USB4_1                         76
+#define SC8280XP_SLAVE_EBI1                            512
+#define SC8280XP_SLAVE_IPA_CORE                                513
+#define SC8280XP_SLAVE_AHB2PHY_0                       514
+#define SC8280XP_SLAVE_AHB2PHY_1                       515
+#define SC8280XP_SLAVE_AHB2PHY_2                       516
+#define SC8280XP_SLAVE_AOSS                            517
+#define SC8280XP_SLAVE_APPSS                           518
+#define SC8280XP_SLAVE_CAMERA_CFG                      519
+#define SC8280XP_SLAVE_CLK_CTL                         520
+#define SC8280XP_SLAVE_CDSP_CFG                                521
+#define SC8280XP_SLAVE_CDSP1_CFG                       522
+#define SC8280XP_SLAVE_RBCPR_CX_CFG                    523
+#define SC8280XP_SLAVE_RBCPR_MMCX_CFG                  524
+#define SC8280XP_SLAVE_RBCPR_MX_CFG                    525
+#define SC8280XP_SLAVE_CPR_NSPCX                       526
+#define SC8280XP_SLAVE_CRYPTO_0_CFG                    527
+#define SC8280XP_SLAVE_CX_RDPM                         528
+#define SC8280XP_SLAVE_DCC_CFG                         529
+#define SC8280XP_SLAVE_DISPLAY_CFG                     530
+#define SC8280XP_SLAVE_DISPLAY1_CFG                    531
+#define SC8280XP_SLAVE_EMAC_CFG                                532
+#define SC8280XP_SLAVE_EMAC1_CFG                       533
+#define SC8280XP_SLAVE_GFX3D_CFG                       534
+#define SC8280XP_SLAVE_HWKM                            535
+#define SC8280XP_SLAVE_IMEM_CFG                                536
+#define SC8280XP_SLAVE_IPA_CFG                         537
+#define SC8280XP_SLAVE_IPC_ROUTER_CFG                  538
+#define SC8280XP_SLAVE_LLCC_CFG                                539
+#define SC8280XP_SLAVE_LPASS                           540
+#define SC8280XP_SLAVE_LPASS_CORE_CFG                  541
+#define SC8280XP_SLAVE_LPASS_LPI_CFG                   542
+#define SC8280XP_SLAVE_LPASS_MPU_CFG                   543
+#define SC8280XP_SLAVE_LPASS_TOP_CFG                   544
+#define SC8280XP_SLAVE_MX_RDPM                         545
+#define SC8280XP_SLAVE_MXC_RDPM                                546
+#define SC8280XP_SLAVE_PCIE_0_CFG                      547
+#define SC8280XP_SLAVE_PCIE_1_CFG                      548
+#define SC8280XP_SLAVE_PCIE_2A_CFG                     549
+#define SC8280XP_SLAVE_PCIE_2B_CFG                     550
+#define SC8280XP_SLAVE_PCIE_3A_CFG                     551
+#define SC8280XP_SLAVE_PCIE_3B_CFG                     552
+#define SC8280XP_SLAVE_PCIE_4_CFG                      553
+#define SC8280XP_SLAVE_PCIE_RSC_CFG                    554
+#define SC8280XP_SLAVE_PDM                             555
+#define SC8280XP_SLAVE_PIMEM_CFG                       556
+#define SC8280XP_SLAVE_PKA_WRAPPER_CFG                 557
+#define SC8280XP_SLAVE_PMU_WRAPPER_CFG                 558
+#define SC8280XP_SLAVE_QDSS_CFG                                559
+#define SC8280XP_SLAVE_QSPI_0                          560
+#define SC8280XP_SLAVE_QUP_0                           561
+#define SC8280XP_SLAVE_QUP_1                           562
+#define SC8280XP_SLAVE_QUP_2                           563
+#define SC8280XP_SLAVE_SDCC_2                          564
+#define SC8280XP_SLAVE_SDCC_4                          565
+#define SC8280XP_SLAVE_SECURITY                                566
+#define SC8280XP_SLAVE_SMMUV3_CFG                      567
+#define SC8280XP_SLAVE_SMSS_CFG                                568
+#define SC8280XP_SLAVE_SPSS_CFG                                569
+#define SC8280XP_SLAVE_TCSR                            570
+#define SC8280XP_SLAVE_TLMM                            571
+#define SC8280XP_SLAVE_UFS_CARD_CFG                    572
+#define SC8280XP_SLAVE_UFS_MEM_CFG                     573
+#define SC8280XP_SLAVE_USB3_0                          574
+#define SC8280XP_SLAVE_USB3_1                          575
+#define SC8280XP_SLAVE_USB3_MP                         576
+#define SC8280XP_SLAVE_USB4_0                          577
+#define SC8280XP_SLAVE_USB4_1                          578
+#define SC8280XP_SLAVE_VENUS_CFG                       579
+#define SC8280XP_SLAVE_VSENSE_CTRL_CFG                 580
+#define SC8280XP_SLAVE_VSENSE_CTRL_R_CFG               581
+#define SC8280XP_SLAVE_A1NOC_CFG                       582
+#define SC8280XP_SLAVE_A1NOC_SNOC                      583
+#define SC8280XP_SLAVE_A2NOC_CFG                       584
+#define SC8280XP_SLAVE_A2NOC_SNOC                      585
+#define SC8280XP_SLAVE_USB_NOC_SNOC                    586
+#define SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG            587
+#define SC8280XP_SLAVE_DDRSS_CFG                       588
+#define SC8280XP_SLAVE_GEM_NOC_CNOC                    589
+#define SC8280XP_SLAVE_GEM_NOC_CFG                     590
+#define SC8280XP_SLAVE_SNOC_GEM_NOC_GC                 591
+#define SC8280XP_SLAVE_SNOC_GEM_NOC_SF                 592
+#define SC8280XP_SLAVE_LLCC                            593
+#define SC8280XP_SLAVE_MNOC_HF_MEM_NOC                 594
+#define SC8280XP_SLAVE_MNOC_SF_MEM_NOC                 595
+#define SC8280XP_SLAVE_CNOC_MNOC_CFG                   596
+#define SC8280XP_SLAVE_CDSP_MEM_NOC                    597
+#define SC8280XP_SLAVE_CDSPB_MEM_NOC                   598
+#define SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC               599
+#define SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC               600
+#define SC8280XP_SLAVE_SNOC_CFG                                601
+#define SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG              602
+#define SC8280XP_SLAVE_LPASS_SNOC                      603
+#define SC8280XP_SLAVE_QUP_CORE_0                      604
+#define SC8280XP_SLAVE_QUP_CORE_1                      605
+#define SC8280XP_SLAVE_QUP_CORE_2                      606
+#define SC8280XP_SLAVE_IMEM                            607
+#define SC8280XP_SLAVE_NSP_XFR                         608
+#define SC8280XP_SLAVE_NSPB_XFR                                609
+#define SC8280XP_SLAVE_PIMEM                           610
+#define SC8280XP_SLAVE_SERVICE_NSP_NOC                 611
+#define SC8280XP_SLAVE_SERVICE_NSPB_NOC                        612
+#define SC8280XP_SLAVE_SERVICE_A1NOC                   613
+#define SC8280XP_SLAVE_SERVICE_A2NOC                   614
+#define SC8280XP_SLAVE_SERVICE_CNOC                    615
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC_1               616
+#define SC8280XP_SLAVE_SERVICE_MNOC                    617
+#define SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC          618
+#define SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC            619
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC_2               620
+#define SC8280XP_SLAVE_SERVICE_SNOC                    621
+#define SC8280XP_SLAVE_SERVICE_GEM_NOC                 622
+#define SC8280XP_SLAVE_PCIE_0                          623
+#define SC8280XP_SLAVE_PCIE_1                          624
+#define SC8280XP_SLAVE_PCIE_2A                         625
+#define SC8280XP_SLAVE_PCIE_2B                         626
+#define SC8280XP_SLAVE_PCIE_3A                         627
+#define SC8280XP_SLAVE_PCIE_3B                         628
+#define SC8280XP_SLAVE_PCIE_4                          629
+#define SC8280XP_SLAVE_QDSS_STM                                630
+#define SC8280XP_SLAVE_SMSS                            631
+#define SC8280XP_SLAVE_TCU                             632
+
+#endif
+
index 274a713..8d879b0 100644 (file)
@@ -1490,7 +1490,7 @@ static struct qcom_icc_node slv_srvc_snoc = {
        .slv_rpm_id = 29,
 };
 
-static struct qcom_icc_node *sdm660_a2noc_nodes[] = {
+static struct qcom_icc_node * const sdm660_a2noc_nodes[] = {
        [MASTER_IPA] = &mas_ipa,
        [MASTER_CNOC_A2NOC] = &mas_cnoc_a2noc,
        [MASTER_SDCC_1] = &mas_sdcc_1,
@@ -1512,7 +1512,7 @@ static const struct regmap_config sdm660_a2noc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc sdm660_a2noc = {
+static const struct qcom_icc_desc sdm660_a2noc = {
        .type = QCOM_ICC_NOC,
        .nodes = sdm660_a2noc_nodes,
        .num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
@@ -1521,7 +1521,7 @@ static struct qcom_icc_desc sdm660_a2noc = {
        .regmap_cfg = &sdm660_a2noc_regmap_config,
 };
 
-static struct qcom_icc_node *sdm660_bimc_nodes[] = {
+static struct qcom_icc_node * const sdm660_bimc_nodes[] = {
        [MASTER_GNOC_BIMC] = &mas_gnoc_bimc,
        [MASTER_OXILI] = &mas_oxili,
        [MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
@@ -1540,14 +1540,14 @@ static const struct regmap_config sdm660_bimc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc sdm660_bimc = {
+static const struct qcom_icc_desc sdm660_bimc = {
        .type = QCOM_ICC_BIMC,
        .nodes = sdm660_bimc_nodes,
        .num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
        .regmap_cfg = &sdm660_bimc_regmap_config,
 };
 
-static struct qcom_icc_node *sdm660_cnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_cnoc_nodes[] = {
        [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
        [MASTER_QDSS_DAP] = &mas_qdss_dap,
        [SLAVE_CNOC_A2NOC] = &slv_cnoc_a2noc,
@@ -1594,14 +1594,14 @@ static const struct regmap_config sdm660_cnoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc sdm660_cnoc = {
+static const struct qcom_icc_desc sdm660_cnoc = {
        .type = QCOM_ICC_NOC,
        .nodes = sdm660_cnoc_nodes,
        .num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
        .regmap_cfg = &sdm660_cnoc_regmap_config,
 };
 
-static struct qcom_icc_node *sdm660_gnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_gnoc_nodes[] = {
        [MASTER_APSS_PROC] = &mas_apss_proc,
        [SLAVE_GNOC_BIMC] = &slv_gnoc_bimc,
        [SLAVE_GNOC_SNOC] = &slv_gnoc_snoc,
@@ -1615,14 +1615,14 @@ static const struct regmap_config sdm660_gnoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc sdm660_gnoc = {
+static const struct qcom_icc_desc sdm660_gnoc = {
        .type = QCOM_ICC_NOC,
        .nodes = sdm660_gnoc_nodes,
        .num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
        .regmap_cfg = &sdm660_gnoc_regmap_config,
 };
 
-static struct qcom_icc_node *sdm660_mnoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_mnoc_nodes[] = {
        [MASTER_CPP] = &mas_cpp,
        [MASTER_JPEG] = &mas_jpeg,
        [MASTER_MDP_P0] = &mas_mdp_p0,
@@ -1655,7 +1655,7 @@ static const struct regmap_config sdm660_mnoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc sdm660_mnoc = {
+static const struct qcom_icc_desc sdm660_mnoc = {
        .type = QCOM_ICC_NOC,
        .nodes = sdm660_mnoc_nodes,
        .num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
@@ -1664,7 +1664,7 @@ static struct qcom_icc_desc sdm660_mnoc = {
        .regmap_cfg = &sdm660_mnoc_regmap_config,
 };
 
-static struct qcom_icc_node *sdm660_snoc_nodes[] = {
+static struct qcom_icc_node * const sdm660_snoc_nodes[] = {
        [MASTER_QDSS_ETR] = &mas_qdss_etr,
        [MASTER_QDSS_BAM] = &mas_qdss_bam,
        [MASTER_SNOC_CFG] = &mas_snoc_cfg,
@@ -1692,7 +1692,7 @@ static const struct regmap_config sdm660_snoc_regmap_config = {
        .fast_io        = true,
 };
 
-static struct qcom_icc_desc sdm660_snoc = {
+static const struct qcom_icc_desc sdm660_snoc = {
        .type = QCOM_ICC_NOC,
        .nodes = sdm660_snoc_nodes,
        .num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
index d219507..954e7bd 100644 (file)
@@ -175,12 +175,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
 DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
 DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
 
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
        &bcm_sn9,
        &bcm_qup0,
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
        [MASTER_TSIF] = &qhm_tsif,
        [MASTER_SDCC_2] = &xm_sdc2,
@@ -201,13 +201,13 @@ static const struct qcom_icc_desc sdm845_aggre1_noc = {
        .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_ce0,
        &bcm_sn11,
        &bcm_qup0,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
        [MASTER_CNOC_A2NOC] = &qnm_cnoc,
@@ -230,11 +230,11 @@ static const struct qcom_icc_desc sdm845_aggre2_noc = {
        .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
 };
 
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
        &bcm_cn0,
 };
 
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
        [MASTER_SPDM] = &qhm_spdm,
        [MASTER_TIC] = &qhm_tic,
        [MASTER_SNOC_CNOC] = &qnm_snoc,
@@ -291,10 +291,10 @@ static const struct qcom_icc_desc sdm845_config_noc = {
        .num_bcms = ARRAY_SIZE(config_noc_bcms),
 };
 
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
        [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
        [SLAVE_LLCC_CFG] = &qhs_llcc,
        [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
@@ -307,10 +307,10 @@ static const struct qcom_icc_desc sdm845_dc_noc = {
        .num_bcms = ARRAY_SIZE(dc_noc_bcms),
 };
 
-static struct qcom_icc_bcm *gladiator_noc_bcms[] = {
+static struct qcom_icc_bcm * const gladiator_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *gladiator_noc_nodes[] = {
+static struct qcom_icc_node * const gladiator_noc_nodes[] = {
        [MASTER_APPSS_PROC] = &acm_l3,
        [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
        [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
@@ -325,7 +325,7 @@ static const struct qcom_icc_desc sdm845_gladiator_noc = {
        .num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
 };
 
-static struct qcom_icc_bcm *mem_noc_bcms[] = {
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
        &bcm_mc0,
        &bcm_acv,
        &bcm_sh0,
@@ -335,7 +335,7 @@ static struct qcom_icc_bcm *mem_noc_bcms[] = {
        &bcm_sh5,
 };
 
-static struct qcom_icc_node *mem_noc_nodes[] = {
+static struct qcom_icc_node * const mem_noc_nodes[] = {
        [MASTER_TCU_0] = &acm_tcu,
        [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
        [MASTER_GNOC_MEM_NOC] = &qnm_apps,
@@ -360,14 +360,14 @@ static const struct qcom_icc_desc sdm845_mem_noc = {
        .num_bcms = ARRAY_SIZE(mem_noc_bcms),
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm2,
        &bcm_mm3,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
        [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
        [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -394,7 +394,7 @@ static const struct qcom_icc_desc sdm845_mmss_noc = {
        .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn1,
        &bcm_sn2,
@@ -411,7 +411,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
        &bcm_sn15,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
        [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
        [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
index e3ac25a..130a828 100644 (file)
@@ -99,11 +99,11 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc);
 DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_memnoc_pcie);
 DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_ipa, &xm_ipa2pcie_slv);
 
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
        &bcm_mc0,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &llcc_mc,
        [SLAVE_EBI_CH0] = &ebi,
 };
@@ -115,13 +115,13 @@ static const struct qcom_icc_desc sdx55_mc_virt = {
        .num_bcms = ARRAY_SIZE(mc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mem_noc_bcms[] = {
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh3,
        &bcm_sh4,
 };
 
-static struct qcom_icc_node *mem_noc_nodes[] = {
+static struct qcom_icc_node * const mem_noc_nodes[] = {
        [MASTER_TCU_0] = &acm_tcu,
        [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
        [MASTER_AMPSS_M0] = &xm_apps_rdwr,
@@ -137,7 +137,7 @@ static const struct qcom_icc_desc sdx55_mem_noc = {
        .num_bcms = ARRAY_SIZE(mem_noc_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_ce0,
        &bcm_pn0,
        &bcm_pn1,
@@ -156,7 +156,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
        &bcm_sn11,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_AUDIO] = &qhm_audio,
        [MASTER_BLSP_1] = &qhm_blsp1,
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
diff --git a/drivers/interconnect/qcom/sdx65.c b/drivers/interconnect/qcom/sdx65.c
new file mode 100644 (file)
index 0000000..b16d31d
--- /dev/null
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sdx65.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdx65.h"
+
+DEFINE_QNODE(llcc_mc, SDX65_MASTER_LLCC, 1, 4, SDX65_SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, SDX65_MASTER_TCU_0, 1, 8, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDX65_MASTER_SNOC_GC_MEM_NOC, 1, 16, SDX65_SLAVE_LLCC);
+DEFINE_QNODE(xm_apps_rdwr, SDX65_MASTER_APPSS_PROC, 1, 16, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_audio, SDX65_MASTER_AUDIO, 1, 4, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_blsp1, SDX65_MASTER_BLSP_1, 1, 4, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_qdss_bam, SDX65_MASTER_QDSS_BAM, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qhm_qpic, SDX65_MASTER_QPIC, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_snoc_cfg, SDX65_MASTER_SNOC_CFG, 1, 4, SDX65_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qhm_spmi_fetcher1, SDX65_MASTER_SPMI_FETCHER, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qnm_aggre_noc, SDX65_MASTER_ANOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qnm_ipa, SDX65_MASTER_IPA, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_memnoc, SDX65_MASTER_MEM_NOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_IMEM, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc_pcie, SDX65_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_SLAVE_PCIE_0);
+DEFINE_QNODE(qxm_crypto, SDX65_MASTER_CRYPTO, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_ipa2pcie_slv, SDX65_MASTER_IPA_PCIE, 1, 8, SDX65_SLAVE_PCIE_0);
+DEFINE_QNODE(xm_pcie, SDX65_MASTER_PCIE_0, 1, 8, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDX65_MASTER_QDSS_ETR, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
+DEFINE_QNODE(xm_sdc1, SDX65_MASTER_SDCC_1, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_usb3, SDX65_MASTER_USB3, 1, 8, SDX65_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(ebi, SDX65_SLAVE_EBI1, 1, 4);
+DEFINE_QNODE(qns_llcc, SDX65_SLAVE_LLCC, 1, 16, SDX65_MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SDX65_SLAVE_MEM_NOC_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(qns_sys_pcie, SDX65_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhs_aoss, SDX65_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_apss, SDX65_SLAVE_APPSS, 1, 4);
+DEFINE_QNODE(qhs_audio, SDX65_SLAVE_AUDIO, 1, 4);
+DEFINE_QNODE(qhs_blsp1, SDX65_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDX65_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDX65_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SDX65_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_ecc_cfg, SDX65_SLAVE_ECC_CFG, 1, 4);
+DEFINE_QNODE(qhs_imem_cfg, SDX65_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDX65_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mss_cfg, SDX65_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_pcie_parf, SDX65_SLAVE_PCIE_PARF, 1, 4);
+DEFINE_QNODE(qhs_pdm, SDX65_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_prng, SDX65_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDX65_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qpic, SDX65_SLAVE_QPIC, 1, 4);
+DEFINE_QNODE(qhs_sdc1, SDX65_SLAVE_SDCC_1, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDX65_SLAVE_SNOC_CFG, 1, 4, SDX65_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spmi_fetcher, SDX65_SLAVE_SPMI_FETCHER, 1, 4);
+DEFINE_QNODE(qhs_spmi_vgi_coex, SDX65_SLAVE_SPMI_VGI_COEX, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDX65_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm, SDX65_SLAVE_TLMM, 1, 4);
+DEFINE_QNODE(qhs_usb3, SDX65_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_usb3_phy, SDX65_SLAVE_USB3_PHY_CFG, 1, 4);
+DEFINE_QNODE(qns_aggre_noc, SDX65_SLAVE_ANOC_SNOC, 1, 8, SDX65_MASTER_ANOC_SNOC);
+DEFINE_QNODE(qns_snoc_memnoc, SDX65_SLAVE_SNOC_MEM_NOC_GC, 1, 16, SDX65_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDX65_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDX65_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_pcie, SDX65_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SDX65_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDX65_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_pn0, "PN0", true, &qhm_snoc_cfg, &qhs_aoss, &qhs_apss, &qhs_audio, &qhs_blsp1, &qhs_clk_ctl, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_ecc_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mss_cfg, &qhs_pcie_parf, &qhs_pdm, &qhs_prng, &qhs_qdss_cfg, &qhs_qpic, &qhs_sdc1, &qhs_snoc_cfg, &qhs_spmi_fetcher, &qhs_spmi_vgi_coex, &qhs_tcsr, &qhs_tlmm, &qhs_usb3, &qhs_usb3_phy, &srvc_snoc);
+DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
+DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
+DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
+DEFINE_QBCM(bcm_pn4, "PN4", false, &qxm_crypto);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &qhm_qdss_bam, &xm_qdss_etr);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_memnoc);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc_pcie);
+DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_ipa, &xm_ipa2pcie_slv);
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+       &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+       [MASTER_LLCC] = &llcc_mc,
+       [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sdx65_mc_virt = {
+       .nodes = mc_virt_nodes,
+       .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+       .bcms = mc_virt_bcms,
+       .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
+       &bcm_sh0,
+       &bcm_sh1,
+       &bcm_sh3,
+};
+
+static struct qcom_icc_node * const mem_noc_nodes[] = {
+       [MASTER_TCU_0] = &acm_tcu,
+       [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+       [MASTER_APPSS_PROC] = &xm_apps_rdwr,
+       [SLAVE_LLCC] = &qns_llcc,
+       [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+       [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
+};
+
+static const struct qcom_icc_desc sdx65_mem_noc = {
+       .nodes = mem_noc_nodes,
+       .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+       .bcms = mem_noc_bcms,
+       .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+       &bcm_ce0,
+       &bcm_pn0,
+       &bcm_pn1,
+       &bcm_pn2,
+       &bcm_pn3,
+       &bcm_pn4,
+       &bcm_sn0,
+       &bcm_sn1,
+       &bcm_sn2,
+       &bcm_sn3,
+       &bcm_sn5,
+       &bcm_sn6,
+       &bcm_sn7,
+       &bcm_sn8,
+       &bcm_sn9,
+       &bcm_sn10,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+       [MASTER_AUDIO] = &qhm_audio,
+       [MASTER_BLSP_1] = &qhm_blsp1,
+       [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+       [MASTER_QPIC] = &qhm_qpic,
+       [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+       [MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
+       [MASTER_ANOC_SNOC] = &qnm_aggre_noc,
+       [MASTER_IPA] = &qnm_ipa,
+       [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+       [MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
+       [MASTER_CRYPTO] = &qxm_crypto,
+       [MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
+       [MASTER_PCIE_0] = &xm_pcie,
+       [MASTER_QDSS_ETR] = &xm_qdss_etr,
+       [MASTER_SDCC_1] = &xm_sdc1,
+       [MASTER_USB3] = &xm_usb3,
+       [SLAVE_AOSS] = &qhs_aoss,
+       [SLAVE_APPSS] = &qhs_apss,
+       [SLAVE_AUDIO] = &qhs_audio,
+       [SLAVE_BLSP_1] = &qhs_blsp1,
+       [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+       [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+       [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+       [SLAVE_ECC_CFG] = &qhs_ecc_cfg,
+       [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+       [SLAVE_IPA_CFG] = &qhs_ipa,
+       [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+       [SLAVE_PCIE_PARF] = &qhs_pcie_parf,
+       [SLAVE_PDM] = &qhs_pdm,
+       [SLAVE_PRNG] = &qhs_prng,
+       [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+       [SLAVE_QPIC] = &qhs_qpic,
+       [SLAVE_SDCC_1] = &qhs_sdc1,
+       [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+       [SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
+       [SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
+       [SLAVE_TCSR] = &qhs_tcsr,
+       [SLAVE_TLMM] = &qhs_tlmm,
+       [SLAVE_USB3] = &qhs_usb3,
+       [SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
+       [SLAVE_ANOC_SNOC] = &qns_aggre_noc,
+       [SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
+       [SLAVE_IMEM] = &qxs_imem,
+       [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+       [SLAVE_PCIE_0] = &xs_pcie,
+       [SLAVE_QDSS_STM] = &xs_qdss_stm,
+       [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sdx65_system_noc = {
+       .nodes = system_noc_nodes,
+       .num_nodes = ARRAY_SIZE(system_noc_nodes),
+       .bcms = system_noc_bcms,
+       .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+       { .compatible = "qcom,sdx65-mc-virt",
+         .data = &sdx65_mc_virt},
+       { .compatible = "qcom,sdx65-mem-noc",
+         .data = &sdx65_mem_noc},
+       { .compatible = "qcom,sdx65-system-noc",
+         .data = &sdx65_system_noc},
+       { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+       .probe = qcom_icc_rpmh_probe,
+       .remove = qcom_icc_rpmh_remove,
+       .driver = {
+               .name = "qnoc-sdx65",
+               .of_match_table = qnoc_of_match,
+               .sync_state = icc_sync_state,
+       },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDX65 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdx65.h b/drivers/interconnect/qcom/sdx65.h
new file mode 100644 (file)
index 0000000..5dca6e8
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX65_H
+#define __DRIVERS_INTERCONNECT_QCOM_SDX65_H
+
+#define SDX65_MASTER_TCU_0             0
+#define SDX65_MASTER_LLCC              1
+#define SDX65_MASTER_AUDIO             2
+#define SDX65_MASTER_BLSP_1            3
+#define SDX65_MASTER_QDSS_BAM          4
+#define SDX65_MASTER_QPIC              5
+#define SDX65_MASTER_SNOC_CFG          6
+#define SDX65_MASTER_SPMI_FETCHER      7
+#define SDX65_MASTER_ANOC_SNOC         8
+#define SDX65_MASTER_IPA               9
+#define SDX65_MASTER_MEM_NOC_SNOC      10
+#define SDX65_MASTER_MEM_NOC_PCIE_SNOC 11
+#define SDX65_MASTER_SNOC_GC_MEM_NOC   12
+#define SDX65_MASTER_CRYPTO            13
+#define SDX65_MASTER_APPSS_PROC                14
+#define SDX65_MASTER_IPA_PCIE          15
+#define SDX65_MASTER_PCIE_0            16
+#define SDX65_MASTER_QDSS_ETR          17
+#define SDX65_MASTER_SDCC_1            18
+#define SDX65_MASTER_USB3              19
+#define SDX65_SLAVE_EBI1               512
+#define SDX65_SLAVE_AOSS               513
+#define SDX65_SLAVE_APPSS              514
+#define SDX65_SLAVE_AUDIO              515
+#define SDX65_SLAVE_BLSP_1             516
+#define SDX65_SLAVE_CLK_CTL            517
+#define SDX65_SLAVE_CRYPTO_0_CFG       518
+#define SDX65_SLAVE_CNOC_DDRSS         519
+#define SDX65_SLAVE_ECC_CFG            520
+#define SDX65_SLAVE_IMEM_CFG           521
+#define SDX65_SLAVE_IPA_CFG            522
+#define SDX65_SLAVE_CNOC_MSS           523
+#define SDX65_SLAVE_PCIE_PARF          524
+#define SDX65_SLAVE_PDM                        525
+#define SDX65_SLAVE_PRNG               526
+#define SDX65_SLAVE_QDSS_CFG           527
+#define SDX65_SLAVE_QPIC               528
+#define SDX65_SLAVE_SDCC_1             529
+#define SDX65_SLAVE_SNOC_CFG           530
+#define SDX65_SLAVE_SPMI_FETCHER       531
+#define SDX65_SLAVE_SPMI_VGI_COEX      532
+#define SDX65_SLAVE_TCSR               533
+#define SDX65_SLAVE_TLMM               534
+#define SDX65_SLAVE_USB3               535
+#define SDX65_SLAVE_USB3_PHY_CFG       536
+#define SDX65_SLAVE_ANOC_SNOC          537
+#define SDX65_SLAVE_LLCC               538
+#define SDX65_SLAVE_MEM_NOC_SNOC       539
+#define SDX65_SLAVE_SNOC_MEM_NOC_GC    540
+#define SDX65_SLAVE_MEM_NOC_PCIE_SNOC  541
+#define SDX65_SLAVE_IMEM               542
+#define SDX65_SLAVE_SERVICE_SNOC       543
+#define SDX65_SLAVE_PCIE_0             544
+#define SDX65_SLAVE_QDSS_STM           545
+#define SDX65_SLAVE_TCU                        546
+
+#endif
index 745e3c3..1d04a4b 100644 (file)
@@ -186,12 +186,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
 DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
 DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
 
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
        &bcm_qup0,
        &bcm_sn3,
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
        [MASTER_QUP_0] = &qhm_qup0,
        [MASTER_EMAC] = &xm_emac,
@@ -202,21 +202,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
 };
 
-static struct qcom_icc_desc sm8150_aggre1_noc = {
+static const struct qcom_icc_desc sm8150_aggre1_noc = {
        .nodes = aggre1_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
        .bcms = aggre1_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_ce0,
        &bcm_qup0,
        &bcm_sn14,
        &bcm_sn3,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
        [MASTER_QSPI] = &qhm_qspi,
@@ -237,53 +237,53 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
 };
 
-static struct qcom_icc_desc sm8150_aggre2_noc = {
+static const struct qcom_icc_desc sm8150_aggre2_noc = {
        .nodes = aggre2_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
        .bcms = aggre2_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
 };
 
-static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
        &bcm_mm1,
 };
 
-static struct qcom_icc_node *camnoc_virt_nodes[] = {
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
        [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
        [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
        [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
        [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
 };
 
-static struct qcom_icc_desc sm8150_camnoc_virt = {
+static const struct qcom_icc_desc sm8150_camnoc_virt = {
        .nodes = camnoc_virt_nodes,
        .num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
        .bcms = camnoc_virt_bcms,
        .num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
        &bcm_co0,
        &bcm_co1,
 };
 
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
        [MASTER_NPU] = &qnm_npu,
        [SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
 };
 
-static struct qcom_icc_desc sm8150_compute_noc = {
+static const struct qcom_icc_desc sm8150_compute_noc = {
        .nodes = compute_noc_nodes,
        .num_nodes = ARRAY_SIZE(compute_noc_nodes),
        .bcms = compute_noc_bcms,
        .num_bcms = ARRAY_SIZE(compute_noc_bcms),
 };
 
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
        &bcm_cn0,
 };
 
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
        [MASTER_SPDM] = &qhm_spdm,
        [SNOC_CNOC_MAS] = &qnm_snoc,
        [MASTER_QDSS_DAP] = &xm_qdss_dap,
@@ -340,30 +340,30 @@ static struct qcom_icc_node *config_noc_nodes[] = {
        [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
 };
 
-static struct qcom_icc_desc sm8150_config_noc = {
+static const struct qcom_icc_desc sm8150_config_noc = {
        .nodes = config_noc_nodes,
        .num_nodes = ARRAY_SIZE(config_noc_nodes),
        .bcms = config_noc_bcms,
        .num_bcms = ARRAY_SIZE(config_noc_bcms),
 };
 
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
        [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
        [SLAVE_LLCC_CFG] = &qhs_llcc,
        [SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
 };
 
-static struct qcom_icc_desc sm8150_dc_noc = {
+static const struct qcom_icc_desc sm8150_dc_noc = {
        .nodes = dc_noc_nodes,
        .num_nodes = ARRAY_SIZE(dc_noc_nodes),
        .bcms = dc_noc_bcms,
        .num_bcms = ARRAY_SIZE(dc_noc_bcms),
 };
 
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh2,
        &bcm_sh3,
@@ -371,7 +371,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = {
        &bcm_sh5,
 };
 
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
        [MASTER_AMPSS_M0] = &acm_apps,
        [MASTER_GPU_TCU] = &acm_gpu_tcu,
        [MASTER_SYS_TCU] = &acm_sys_tcu,
@@ -391,54 +391,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
        [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
 };
 
-static struct qcom_icc_desc sm8150_gem_noc = {
+static const struct qcom_icc_desc sm8150_gem_noc = {
        .nodes = gem_noc_nodes,
        .num_nodes = ARRAY_SIZE(gem_noc_nodes),
        .bcms = gem_noc_bcms,
        .num_bcms = ARRAY_SIZE(gem_noc_bcms),
 };
 
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
        &bcm_ip0,
 };
 
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
        [MASTER_IPA_CORE] = &ipa_core_master,
        [SLAVE_IPA_CORE] = &ipa_core_slave,
 };
 
-static struct qcom_icc_desc sm8150_ipa_virt = {
+static const struct qcom_icc_desc sm8150_ipa_virt = {
        .nodes = ipa_virt_nodes,
        .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
        .bcms = ipa_virt_bcms,
        .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
        &bcm_acv,
        &bcm_mc0,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &llcc_mc,
        [SLAVE_EBI_CH0] = &ebi,
 };
 
-static struct qcom_icc_desc sm8150_mc_virt = {
+static const struct qcom_icc_desc sm8150_mc_virt = {
        .nodes = mc_virt_nodes,
        .num_nodes = ARRAY_SIZE(mc_virt_nodes),
        .bcms = mc_virt_bcms,
        .num_bcms = ARRAY_SIZE(mc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm2,
        &bcm_mm3,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
        [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
        [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
@@ -454,14 +454,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
 };
 
-static struct qcom_icc_desc sm8150_mmss_noc = {
+static const struct qcom_icc_desc sm8150_mmss_noc = {
        .nodes = mmss_noc_nodes,
        .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
        .bcms = mmss_noc_bcms,
        .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn1,
        &bcm_sn11,
@@ -475,7 +475,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
        &bcm_sn9,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
        [A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
        [A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
@@ -495,7 +495,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
        [SLAVE_TCU] = &xs_sys_tcu_cfg,
 };
 
-static struct qcom_icc_desc sm8150_system_noc = {
+static const struct qcom_icc_desc sm8150_system_noc = {
        .nodes = system_noc_nodes,
        .num_nodes = ARRAY_SIZE(system_noc_nodes),
        .bcms = system_noc_bcms,
index aa70758..5cdb058 100644 (file)
@@ -195,12 +195,12 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_gemnoc_pcie);
 DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gemnoc);
 DEFINE_QBCM(bcm_sn12, "SN12", false, &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc);
 
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
        &bcm_qup0,
        &bcm_sn12,
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
        [MASTER_QSPI_0] = &qhm_qspi,
        [MASTER_QUP_1] = &qhm_qup1,
@@ -216,20 +216,20 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
 };
 
-static struct qcom_icc_desc sm8250_aggre1_noc = {
+static const struct qcom_icc_desc sm8250_aggre1_noc = {
        .nodes = aggre1_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
        .bcms = aggre1_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_ce0,
        &bcm_qup0,
        &bcm_sn12,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
        [MASTER_QUP_0] = &qhm_qup0,
@@ -246,35 +246,35 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
 };
 
-static struct qcom_icc_desc sm8250_aggre2_noc = {
+static const struct qcom_icc_desc sm8250_aggre2_noc = {
        .nodes = aggre2_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
        .bcms = aggre2_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
 };
 
-static struct qcom_icc_bcm *compute_noc_bcms[] = {
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
        &bcm_co0,
        &bcm_co2,
 };
 
-static struct qcom_icc_node *compute_noc_nodes[] = {
+static struct qcom_icc_node * const compute_noc_nodes[] = {
        [MASTER_NPU] = &qnm_npu,
        [SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
 };
 
-static struct qcom_icc_desc sm8250_compute_noc = {
+static const struct qcom_icc_desc sm8250_compute_noc = {
        .nodes = compute_noc_nodes,
        .num_nodes = ARRAY_SIZE(compute_noc_nodes),
        .bcms = compute_noc_bcms,
        .num_bcms = ARRAY_SIZE(compute_noc_bcms),
 };
 
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
        &bcm_cn0,
 };
 
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
        [SNOC_CNOC_MAS] = &qnm_snoc,
        [MASTER_QDSS_DAP] = &xm_qdss_dap,
        [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
@@ -329,37 +329,37 @@ static struct qcom_icc_node *config_noc_nodes[] = {
        [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
 };
 
-static struct qcom_icc_desc sm8250_config_noc = {
+static const struct qcom_icc_desc sm8250_config_noc = {
        .nodes = config_noc_nodes,
        .num_nodes = ARRAY_SIZE(config_noc_nodes),
        .bcms = config_noc_bcms,
        .num_bcms = ARRAY_SIZE(config_noc_bcms),
 };
 
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
        [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
        [SLAVE_LLCC_CFG] = &qhs_llcc,
        [SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
 };
 
-static struct qcom_icc_desc sm8250_dc_noc = {
+static const struct qcom_icc_desc sm8250_dc_noc = {
        .nodes = dc_noc_nodes,
        .num_nodes = ARRAY_SIZE(dc_noc_nodes),
        .bcms = dc_noc_bcms,
        .num_bcms = ARRAY_SIZE(dc_noc_bcms),
 };
 
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh2,
        &bcm_sh3,
        &bcm_sh4,
 };
 
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
        [MASTER_GPU_TCU] = &alm_gpu_tcu,
        [MASTER_SYS_TCU] = &alm_sys_tcu,
        [MASTER_AMPSS_M0] = &chm_apps,
@@ -379,54 +379,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
        [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
 };
 
-static struct qcom_icc_desc sm8250_gem_noc = {
+static const struct qcom_icc_desc sm8250_gem_noc = {
        .nodes = gem_noc_nodes,
        .num_nodes = ARRAY_SIZE(gem_noc_nodes),
        .bcms = gem_noc_bcms,
        .num_bcms = ARRAY_SIZE(gem_noc_bcms),
 };
 
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
        &bcm_ip0,
 };
 
-static struct qcom_icc_node *ipa_virt_nodes[] = {
+static struct qcom_icc_node * const ipa_virt_nodes[] = {
        [MASTER_IPA_CORE] = &ipa_core_master,
        [SLAVE_IPA_CORE] = &ipa_core_slave,
 };
 
-static struct qcom_icc_desc sm8250_ipa_virt = {
+static const struct qcom_icc_desc sm8250_ipa_virt = {
        .nodes = ipa_virt_nodes,
        .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
        .bcms = ipa_virt_bcms,
        .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
        &bcm_acv,
        &bcm_mc0,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &llcc_mc,
        [SLAVE_EBI_CH0] = &ebi,
 };
 
-static struct qcom_icc_desc sm8250_mc_virt = {
+static const struct qcom_icc_desc sm8250_mc_virt = {
        .nodes = mc_virt_nodes,
        .num_nodes = ARRAY_SIZE(mc_virt_nodes),
        .bcms = mc_virt_bcms,
        .num_bcms = ARRAY_SIZE(mc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm2,
        &bcm_mm3,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
        [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
        [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
@@ -442,17 +442,17 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
 };
 
-static struct qcom_icc_desc sm8250_mmss_noc = {
+static const struct qcom_icc_desc sm8250_mmss_noc = {
        .nodes = mmss_noc_nodes,
        .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
        .bcms = mmss_noc_bcms,
        .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
 };
 
-static struct qcom_icc_bcm *npu_noc_bcms[] = {
+static struct qcom_icc_bcm * const npu_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *npu_noc_nodes[] = {
+static struct qcom_icc_node * const npu_noc_nodes[] = {
        [MASTER_NPU_SYS] = &amm_npu_sys,
        [MASTER_NPU_CDP] = &amm_npu_sys_cdp_w,
        [MASTER_NPU_NOC_CFG] = &qhm_cfg,
@@ -468,14 +468,14 @@ static struct qcom_icc_node *npu_noc_nodes[] = {
        [SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
 };
 
-static struct qcom_icc_desc sm8250_npu_noc = {
+static const struct qcom_icc_desc sm8250_npu_noc = {
        .nodes = npu_noc_nodes,
        .num_nodes = ARRAY_SIZE(npu_noc_nodes),
        .bcms = npu_noc_bcms,
        .num_bcms = ARRAY_SIZE(npu_noc_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn1,
        &bcm_sn11,
@@ -489,7 +489,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
        &bcm_sn9,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
        [A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
        [A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
@@ -511,7 +511,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
        [SLAVE_TCU] = &xs_sys_tcu_cfg,
 };
 
-static struct qcom_icc_desc sm8250_system_noc = {
+static const struct qcom_icc_desc sm8250_system_noc = {
        .nodes = system_noc_nodes,
        .num_nodes = ARRAY_SIZE(system_noc_nodes),
        .bcms = system_noc_bcms,
index c79f93a..5398e7c 100644 (file)
@@ -198,10 +198,10 @@ DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
 DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
 DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
 
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_QSPI_0] = &qhm_qspi,
        [MASTER_QUP_1] = &qhm_qup1,
        [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
@@ -213,21 +213,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
 };
 
-static struct qcom_icc_desc sm8350_aggre1_noc = {
+static const struct qcom_icc_desc sm8350_aggre1_noc = {
        .nodes = aggre1_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
        .bcms = aggre1_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_ce0,
        &bcm_sn5,
        &bcm_sn6,
        &bcm_sn14,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
        [MASTER_QUP_0] = &qhm_qup0,
        [MASTER_QUP_2] = &qhm_qup2,
@@ -244,14 +244,14 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
 };
 
-static struct qcom_icc_desc sm8350_aggre2_noc = {
+static const struct qcom_icc_desc sm8350_aggre2_noc = {
        .nodes = aggre2_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
        .bcms = aggre2_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
 };
 
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
        &bcm_cn0,
        &bcm_cn1,
        &bcm_cn2,
@@ -259,7 +259,7 @@ static struct qcom_icc_bcm *config_noc_bcms[] = {
        &bcm_sn4,
 };
 
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
        [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
        [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
        [MASTER_QDSS_DAP] = &xm_qdss_dap,
@@ -323,30 +323,30 @@ static struct qcom_icc_node *config_noc_nodes[] = {
        [SLAVE_TCU] = &xs_sys_tcu_cfg,
 };
 
-static struct qcom_icc_desc sm8350_config_noc = {
+static const struct qcom_icc_desc sm8350_config_noc = {
        .nodes = config_noc_nodes,
        .num_nodes = ARRAY_SIZE(config_noc_nodes),
        .bcms = config_noc_bcms,
        .num_bcms = ARRAY_SIZE(config_noc_bcms),
 };
 
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
        [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
        [SLAVE_LLCC_CFG] = &qhs_llcc,
        [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
 };
 
-static struct qcom_icc_desc sm8350_dc_noc = {
+static const struct qcom_icc_desc sm8350_dc_noc = {
        .nodes = dc_noc_nodes,
        .num_nodes = ARRAY_SIZE(dc_noc_nodes),
        .bcms = dc_noc_bcms,
        .num_bcms = ARRAY_SIZE(dc_noc_bcms),
 };
 
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh2,
        &bcm_sh3,
@@ -354,7 +354,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = {
        &bcm_sh0_disp,
 };
 
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
        [MASTER_GPU_TCU] = &alm_gpu_tcu,
        [MASTER_SYS_TCU] = &alm_sys_tcu,
        [MASTER_APPSS_PROC] = &chm_apps,
@@ -379,17 +379,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
        [SLAVE_LLCC_DISP] = &qns_llcc_disp,
 };
 
-static struct qcom_icc_desc sm8350_gem_noc = {
+static const struct qcom_icc_desc sm8350_gem_noc = {
        .nodes = gem_noc_nodes,
        .num_nodes = ARRAY_SIZE(gem_noc_nodes),
        .bcms = gem_noc_bcms,
        .num_bcms = ARRAY_SIZE(gem_noc_bcms),
 };
 
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
        [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
        [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
        [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
@@ -399,35 +399,35 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
        [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
 };
 
-static struct qcom_icc_desc sm8350_lpass_ag_noc = {
+static const struct qcom_icc_desc sm8350_lpass_ag_noc = {
        .nodes = lpass_ag_noc_nodes,
        .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
        .bcms = lpass_ag_noc_bcms,
        .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
 };
 
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
        &bcm_acv,
        &bcm_mc0,
        &bcm_acv_disp,
        &bcm_mc0_disp,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &llcc_mc,
        [SLAVE_EBI1] = &ebi,
        [MASTER_LLCC_DISP] = &llcc_mc_disp,
        [SLAVE_EBI1_DISP] = &ebi_disp,
 };
 
-static struct qcom_icc_desc sm8350_mc_virt = {
+static const struct qcom_icc_desc sm8350_mc_virt = {
        .nodes = mc_virt_nodes,
        .num_nodes = ARRAY_SIZE(mc_virt_nodes),
        .bcms = mc_virt_bcms,
        .num_bcms = ARRAY_SIZE(mc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm4,
@@ -438,7 +438,7 @@ static struct qcom_icc_bcm *mmss_noc_bcms[] = {
        &bcm_mm5_disp,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
        [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
        [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -459,40 +459,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
 };
 
-static struct qcom_icc_desc sm8350_mmss_noc = {
+static const struct qcom_icc_desc sm8350_mmss_noc = {
        .nodes = mmss_noc_nodes,
        .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
        .bcms = mmss_noc_bcms,
        .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
 };
 
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
        &bcm_co0,
        &bcm_co3,
 };
 
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
        [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
        [MASTER_CDSP_PROC] = &qxm_nsp,
        [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
        [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
 };
 
-static struct qcom_icc_desc sm8350_compute_noc = {
+static const struct qcom_icc_desc sm8350_compute_noc = {
        .nodes = nsp_noc_nodes,
        .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
        .bcms = nsp_noc_bcms,
        .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn2,
        &bcm_sn7,
        &bcm_sn8,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
        [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
        [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
@@ -503,7 +503,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
        [SLAVE_SERVICE_SNOC] = &srvc_snoc,
 };
 
-static struct qcom_icc_desc sm8350_system_noc = {
+static const struct qcom_icc_desc sm8350_system_noc = {
        .nodes = system_noc_nodes,
        .num_nodes = ARRAY_SIZE(system_noc_nodes),
        .bcms = system_noc_bcms,
index 8d99ee6..7e3d372 100644 (file)
@@ -1526,10 +1526,10 @@ static struct qcom_icc_bcm bcm_sh1_disp = {
        .nodes = { &qnm_pcie_disp },
 };
 
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
        [MASTER_QSPI_0] = &qhm_qspi,
        [MASTER_QUP_1] = &qhm_qup1,
        [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
@@ -1540,18 +1540,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
 };
 
-static struct qcom_icc_desc sm8450_aggre1_noc = {
+static const struct qcom_icc_desc sm8450_aggre1_noc = {
        .nodes = aggre1_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
        .bcms = aggre1_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
 };
 
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
        &bcm_ce0,
 };
 
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
        [MASTER_QDSS_BAM] = &qhm_qdss_bam,
        [MASTER_QUP_0] = &qhm_qup0,
        [MASTER_QUP_2] = &qhm_qup2,
@@ -1567,20 +1567,20 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
 };
 
-static struct qcom_icc_desc sm8450_aggre2_noc = {
+static const struct qcom_icc_desc sm8450_aggre2_noc = {
        .nodes = aggre2_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
        .bcms = aggre2_noc_bcms,
        .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
 };
 
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
        &bcm_qup0,
        &bcm_qup1,
        &bcm_qup2,
 };
 
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
        [MASTER_QUP_CORE_0] = &qup0_core_master,
        [MASTER_QUP_CORE_1] = &qup1_core_master,
        [MASTER_QUP_CORE_2] = &qup2_core_master,
@@ -1589,18 +1589,18 @@ static struct qcom_icc_node *clk_virt_nodes[] = {
        [SLAVE_QUP_CORE_2] = &qup2_core_slave,
 };
 
-static struct qcom_icc_desc sm8450_clk_virt = {
+static const struct qcom_icc_desc sm8450_clk_virt = {
        .nodes = clk_virt_nodes,
        .num_nodes = ARRAY_SIZE(clk_virt_nodes),
        .bcms = clk_virt_bcms,
        .num_bcms = ARRAY_SIZE(clk_virt_bcms),
 };
 
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
        &bcm_cn0,
 };
 
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
        [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
        [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
        [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
@@ -1658,21 +1658,21 @@ static struct qcom_icc_node *config_noc_nodes[] = {
        [SLAVE_TCU] = &xs_sys_tcu_cfg,
 };
 
-static struct qcom_icc_desc sm8450_config_noc = {
+static const struct qcom_icc_desc sm8450_config_noc = {
        .nodes = config_noc_nodes,
        .num_nodes = ARRAY_SIZE(config_noc_nodes),
        .bcms = config_noc_bcms,
        .num_bcms = ARRAY_SIZE(config_noc_bcms),
 };
 
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
        &bcm_sh0,
        &bcm_sh1,
        &bcm_sh0_disp,
        &bcm_sh1_disp,
 };
 
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
        [MASTER_GPU_TCU] = &alm_gpu_tcu,
        [MASTER_SYS_TCU] = &alm_sys_tcu,
        [MASTER_APPSS_PROC] = &chm_apps,
@@ -1693,17 +1693,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = {
        [SLAVE_LLCC_DISP] = &qns_llcc_disp,
 };
 
-static struct qcom_icc_desc sm8450_gem_noc = {
+static const struct qcom_icc_desc sm8450_gem_noc = {
        .nodes = gem_noc_nodes,
        .num_nodes = ARRAY_SIZE(gem_noc_nodes),
        .bcms = gem_noc_bcms,
        .num_bcms = ARRAY_SIZE(gem_noc_bcms),
 };
 
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
 };
 
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
        [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
        [MASTER_LPASS_PROC] = &qxm_lpass_dsp,
        [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
@@ -1715,42 +1715,42 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
        [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
 };
 
-static struct qcom_icc_desc sm8450_lpass_ag_noc = {
+static const struct qcom_icc_desc sm8450_lpass_ag_noc = {
        .nodes = lpass_ag_noc_nodes,
        .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
        .bcms = lpass_ag_noc_bcms,
        .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
 };
 
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
        &bcm_acv,
        &bcm_mc0,
        &bcm_acv_disp,
        &bcm_mc0_disp,
 };
 
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
        [MASTER_LLCC] = &llcc_mc,
        [SLAVE_EBI1] = &ebi,
        [MASTER_LLCC_DISP] = &llcc_mc_disp,
        [SLAVE_EBI1_DISP] = &ebi_disp,
 };
 
-static struct qcom_icc_desc sm8450_mc_virt = {
+static const struct qcom_icc_desc sm8450_mc_virt = {
        .nodes = mc_virt_nodes,
        .num_nodes = ARRAY_SIZE(mc_virt_nodes),
        .bcms = mc_virt_bcms,
        .num_bcms = ARRAY_SIZE(mc_virt_bcms),
 };
 
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
        &bcm_mm0,
        &bcm_mm1,
        &bcm_mm0_disp,
        &bcm_mm1_disp,
 };
 
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
        [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
        [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
        [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -1771,36 +1771,36 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
 };
 
-static struct qcom_icc_desc sm8450_mmss_noc = {
+static const struct qcom_icc_desc sm8450_mmss_noc = {
        .nodes = mmss_noc_nodes,
        .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
        .bcms = mmss_noc_bcms,
        .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
 };
 
-static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
        &bcm_co0,
 };
 
-static struct qcom_icc_node *nsp_noc_nodes[] = {
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
        [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
        [MASTER_CDSP_PROC] = &qxm_nsp,
        [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
        [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
 };
 
-static struct qcom_icc_desc sm8450_nsp_noc = {
+static const struct qcom_icc_desc sm8450_nsp_noc = {
        .nodes = nsp_noc_nodes,
        .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
        .bcms = nsp_noc_bcms,
        .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
 };
 
-static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
        &bcm_sn7,
 };
 
-static struct qcom_icc_node *pcie_anoc_nodes[] = {
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
        [MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg,
        [MASTER_PCIE_0] = &xm_pcie3_0,
        [MASTER_PCIE_1] = &xm_pcie3_1,
@@ -1808,14 +1808,14 @@ static struct qcom_icc_node *pcie_anoc_nodes[] = {
        [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
 };
 
-static struct qcom_icc_desc sm8450_pcie_anoc = {
+static const struct qcom_icc_desc sm8450_pcie_anoc = {
        .nodes = pcie_anoc_nodes,
        .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
        .bcms = pcie_anoc_bcms,
        .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
 };
 
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
        &bcm_sn0,
        &bcm_sn1,
        &bcm_sn2,
@@ -1823,7 +1823,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
        &bcm_sn4,
 };
 
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
        [MASTER_GIC_AHB] = &qhm_gic,
        [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
        [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
@@ -1836,7 +1836,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
        [SLAVE_SERVICE_SNOC] = &srvc_snoc,
 };
 
-static struct qcom_icc_desc sm8450_system_noc = {
+static const struct qcom_icc_desc sm8450_system_noc = {
        .nodes = system_noc_nodes,
        .num_nodes = ARRAY_SIZE(system_noc_nodes),
        .bcms = system_noc_bcms,
@@ -1848,7 +1848,7 @@ static int qnoc_probe(struct platform_device *pdev)
        const struct qcom_icc_desc *desc;
        struct icc_onecell_data *data;
        struct icc_provider *provider;
-       struct qcom_icc_node **qnodes;
+       struct qcom_icc_node * const *qnodes;
        struct qcom_icc_provider *qp;
        struct icc_node *node;
        size_t num_nodes, i;
index 47108ed..72d0f5e 100644 (file)
 /* IOMMU IVINFO */
 #define IOMMU_IVINFO_OFFSET     36
 #define IOMMU_IVINFO_EFRSUP     BIT(0)
+#define IOMMU_IVINFO_DMA_REMAP  BIT(1)
 
 /* IOMMU Feature Reporting Field (for IVHD type 10h */
 #define IOMMU_FEAT_GASUP_SHIFT 6
@@ -449,6 +450,9 @@ extern struct irq_remap_table **irq_lookup_table;
 /* Interrupt remapping feature used? */
 extern bool amd_iommu_irq_remap;
 
+/* IVRS indicates that pre-boot remapping was enabled */
+extern bool amdr_ivrs_remap_support;
+
 /* kmem_cache to get tables with 128 byte alignement */
 extern struct kmem_cache *amd_iommu_irq_cache;
 
index 1a3ad58..1d08f87 100644 (file)
@@ -83,7 +83,7 @@
 #define ACPI_DEVFLAG_LINT1              0x80
 #define ACPI_DEVFLAG_ATSDIS             0x10000000
 
-#define LOOP_TIMEOUT   100000
+#define LOOP_TIMEOUT   2000000
 /*
  * ACPI table definitions
  *
@@ -181,6 +181,7 @@ u32 amd_iommu_max_pasid __read_mostly = ~0;
 
 bool amd_iommu_v2_present __read_mostly;
 static bool amd_iommu_pc_present __read_mostly;
+bool amdr_ivrs_remap_support __read_mostly;
 
 bool amd_iommu_force_isolation __read_mostly;
 
@@ -325,6 +326,8 @@ static void __init early_iommu_features_init(struct amd_iommu *iommu,
 {
        if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
                iommu->features = h->efr_reg;
+       if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
+               amdr_ivrs_remap_support = true;
 }
 
 /* Access to l1 and l2 indexed register spaces */
@@ -1985,8 +1988,7 @@ static int __init amd_iommu_init_pci(void)
        for_each_iommu(iommu)
                iommu_flush_all_caches(iommu);
 
-       if (!ret)
-               print_iommu_info();
+       print_iommu_info();
 
 out:
        return ret;
index b47220a..840831d 100644 (file)
@@ -1838,20 +1838,10 @@ void amd_iommu_domain_update(struct protection_domain *domain)
        amd_iommu_domain_flush_complete(domain);
 }
 
-static void __init amd_iommu_init_dma_ops(void)
-{
-       if (iommu_default_passthrough() || sme_me_mask)
-               x86_swiotlb_enable = true;
-       else
-               x86_swiotlb_enable = false;
-}
-
 int __init amd_iommu_init_api(void)
 {
        int err;
 
-       amd_iommu_init_dma_ops();
-
        err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
        if (err)
                return err;
@@ -2165,6 +2155,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
                return (irq_remapping_enabled == 1);
        case IOMMU_CAP_NOEXEC:
                return false;
+       case IOMMU_CAP_PRE_BOOT_PROTECTION:
+               return amdr_ivrs_remap_support;
        default:
                break;
        }
@@ -2274,6 +2266,12 @@ static int amd_iommu_def_domain_type(struct device *dev)
        return 0;
 }
 
+static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+{
+       /* IOMMU_PTE_FC is always set */
+       return true;
+}
+
 const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
@@ -2296,6 +2294,7 @@ const struct iommu_ops amd_iommu_ops = {
                .flush_iotlb_all = amd_iommu_flush_iotlb_all,
                .iotlb_sync     = amd_iommu_iotlb_sync,
                .free           = amd_iommu_domain_free,
+               .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
        }
 };
 
index e56b137..afb3efd 100644 (file)
@@ -956,6 +956,7 @@ static void __exit amd_iommu_v2_exit(void)
 {
        struct device_state *dev_state, *next;
        unsigned long flags;
+       LIST_HEAD(freelist);
 
        if (!amd_iommu_v2_supported())
                return;
@@ -975,11 +976,20 @@ static void __exit amd_iommu_v2_exit(void)
 
                put_device_state(dev_state);
                list_del(&dev_state->list);
-               free_device_state(dev_state);
+               list_add_tail(&dev_state->list, &freelist);
        }
 
        spin_unlock_irqrestore(&state_lock, flags);
 
+       /*
+        * Since free_device_state waits on the count to be zero,
+        * we need to free dev_state outside the spinlock.
+        */
+       list_for_each_entry_safe(dev_state, next, &freelist, list) {
+               list_del(&dev_state->list);
+               free_device_state(dev_state);
+       }
+
        destroy_workqueue(iommu_wq);
 }
 
index c623dae..1ef7bbb 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/mm.h>
 #include <linux/mmu_context.h>
 #include <linux/mmu_notifier.h>
+#include <linux/sched/mm.h>
 #include <linux/slab.h>
 
 #include "arm-smmu-v3.h"
@@ -96,9 +97,14 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
        struct arm_smmu_ctx_desc *cd;
        struct arm_smmu_ctx_desc *ret = NULL;
 
+       /* Don't free the mm until we release the ASID */
+       mmgrab(mm);
+
        asid = arm64_mm_context_get(mm);
-       if (!asid)
-               return ERR_PTR(-ESRCH);
+       if (!asid) {
+               err = -ESRCH;
+               goto out_drop_mm;
+       }
 
        cd = kzalloc(sizeof(*cd), GFP_KERNEL);
        if (!cd) {
@@ -165,6 +171,8 @@ out_free_cd:
        kfree(cd);
 out_put_context:
        arm64_mm_context_put(mm);
+out_drop_mm:
+       mmdrop(mm);
        return err < 0 ? ERR_PTR(err) : ret;
 }
 
@@ -173,6 +181,7 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
        if (arm_smmu_free_asid(cd)) {
                /* Unpin ASID */
                arm64_mm_context_put(cd->mm);
+               mmdrop(cd->mm);
                kfree(cd);
        }
 }
index 627a3ed..88817a3 100644 (file)
@@ -3770,6 +3770,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 
        /* Base address */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -EINVAL;
        if (resource_size(res) < arm_smmu_resource_size(smmu)) {
                dev_err(dev, "MMIO region too small (%pr)\n", res);
                return -EINVAL;
index 2c25cce..658f3cc 100644 (file)
@@ -211,7 +211,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
        if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
                smmu->impl = &calxeda_impl;
 
-       if (of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
+       if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
+           of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
            of_device_is_compatible(np, "nvidia,tegra186-smmu"))
                return nvidia_smmu_impl_init(smmu);
 
index ba6298c..7820711 100644 (file)
@@ -408,6 +408,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
        { .compatible = "qcom,sc7180-smmu-500" },
        { .compatible = "qcom,sc7280-smmu-500" },
        { .compatible = "qcom,sc8180x-smmu-500" },
+       { .compatible = "qcom,sc8280xp-smmu-500" },
        { .compatible = "qcom,sdm630-smmu-v2" },
        { .compatible = "qcom,sdm845-smmu-500" },
        { .compatible = "qcom,sm6125-smmu-500" },
index 568cce5..2ed3594 100644 (file)
@@ -1574,6 +1574,9 @@ static int arm_smmu_def_domain_type(struct device *dev)
        struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
        const struct arm_smmu_impl *impl = cfg->smmu->impl;
 
+       if (using_legacy_binding)
+               return IOMMU_DOMAIN_IDENTITY;
+
        if (impl && impl->def_domain_type)
                return impl->def_domain_type(dev);
 
@@ -2092,11 +2095,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ioaddr = res->start;
-       smmu->base = devm_ioremap_resource(dev, res);
+       smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(smmu->base))
                return PTR_ERR(smmu->base);
+       ioaddr = res->start;
        /*
         * The resource size should effectively match the value of SMMU_TOP;
         * stash that temporarily until we know PAGESIZE to validate it with.
index 09f6e1c..f902515 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/iommu.h>
 #include <linux/iova.h>
 #include <linux/irq.h>
+#include <linux/list_sort.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
@@ -414,6 +415,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
        return 0;
 }
 
+static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
+               const struct list_head *b)
+{
+       struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
+       struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
+
+       return res_a->res->start > res_b->res->start;
+}
+
 static int iova_reserve_pci_windows(struct pci_dev *dev,
                struct iova_domain *iovad)
 {
@@ -432,6 +442,7 @@ static int iova_reserve_pci_windows(struct pci_dev *dev,
        }
 
        /* Get reserved DMA windows from host bridge */
+       list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
        resource_list_for_each_entry(window, &bridge->dma_ranges) {
                end = window->res->start - window->offset;
 resv_iova:
@@ -440,7 +451,7 @@ resv_iova:
                        hi = iova_pfn(iovad, end);
                        reserve_iova(iovad, lo, hi);
                } else if (end < start) {
-                       /* dma_ranges list should be sorted */
+                       /* DMA ranges should be non-overlapping */
                        dev_err(&dev->dev,
                                "Failed to reserve IOVA [%pa-%pa]\n",
                                &start, &end);
@@ -776,6 +787,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
        unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
        struct page **pages;
        dma_addr_t iova;
+       ssize_t ret;
 
        if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
            iommu_deferred_attach(dev, domain))
@@ -813,8 +825,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
                        arch_dma_prep_coherent(sg_page(sg), sg->length);
        }
 
-       if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
-                       < size)
+       ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
+       if (ret < 0 || ret < size)
                goto out_free_sg;
 
        sgt->sgl->dma_address = iova;
@@ -971,6 +983,11 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
                void *padding_start;
                size_t padding_size, aligned_size;
 
+               if (!is_swiotlb_active(dev)) {
+                       dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+                       return DMA_MAPPING_ERROR;
+               }
+
                aligned_size = iova_align(iovad, size);
                phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
                                              iova_mask(iovad), dir, attrs);
@@ -1209,7 +1226,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
         * implementation - it knows better than we do.
         */
        ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
-       if (ret < iova_len)
+       if (ret < 0 || ret < iova_len)
                goto out_free_iova;
 
        return __finalise_sg(dev, sg, nents, iova);
index fc38b1f..0d03f83 100644 (file)
@@ -11,6 +11,9 @@
 #include <linux/fsl/guts.h>
 #include <linux/interrupt.h>
 #include <linux/genalloc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
 
 #include <asm/mpc85xx.h>
 
index 69a4a62..94b4589 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "fsl_pamu_domain.h"
 
+#include <linux/platform_device.h>
 #include <sysdev/fsl_pci.h>
 
 /*
index ba9a63c..4401659 100644 (file)
@@ -533,33 +533,6 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
        rcu_read_unlock();
 }
 
-static bool domain_update_iommu_snooping(struct intel_iommu *skip)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-       bool ret = true;
-
-       rcu_read_lock();
-       for_each_active_iommu(iommu, drhd) {
-               if (iommu != skip) {
-                       /*
-                        * If the hardware is operating in the scalable mode,
-                        * the snooping control is always supported since we
-                        * always set PASID-table-entry.PGSNP bit if the domain
-                        * is managed outside (UNMANAGED).
-                        */
-                       if (!sm_supported(iommu) &&
-                           !ecap_sc_support(iommu->ecap)) {
-                               ret = false;
-                               break;
-                       }
-               }
-       }
-       rcu_read_unlock();
-
-       return ret;
-}
-
 static int domain_update_iommu_superpage(struct dmar_domain *domain,
                                         struct intel_iommu *skip)
 {
@@ -641,7 +614,6 @@ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
 static void domain_update_iommu_cap(struct dmar_domain *domain)
 {
        domain_update_iommu_coherency(domain);
-       domain->iommu_snooping = domain_update_iommu_snooping(NULL);
        domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
 
        /*
@@ -2460,7 +2432,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
        if (level == 5)
                flags |= PASID_FLAG_FL5LP;
 
-       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
+       if (domain->force_snooping)
                flags |= PASID_FLAG_PAGE_SNOOP;
 
        return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
@@ -2474,64 +2446,6 @@ static bool dev_is_real_dma_subdevice(struct device *dev)
               pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
 }
 
-static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
-                                                   int bus, int devfn,
-                                                   struct device *dev,
-                                                   struct dmar_domain *domain)
-{
-       struct device_domain_info *info = dev_iommu_priv_get(dev);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&device_domain_lock, flags);
-       info->domain = domain;
-       spin_lock(&iommu->lock);
-       ret = domain_attach_iommu(domain, iommu);
-       spin_unlock(&iommu->lock);
-       if (ret) {
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-               return NULL;
-       }
-       list_add(&info->link, &domain->devices);
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-
-       /* PASID table is mandatory for a PCI device in scalable mode. */
-       if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
-               ret = intel_pasid_alloc_table(dev);
-               if (ret) {
-                       dev_err(dev, "PASID table allocation failed\n");
-                       dmar_remove_one_dev_info(dev);
-                       return NULL;
-               }
-
-               /* Setup the PASID entry for requests without PASID: */
-               spin_lock_irqsave(&iommu->lock, flags);
-               if (hw_pass_through && domain_type_is_si(domain))
-                       ret = intel_pasid_setup_pass_through(iommu, domain,
-                                       dev, PASID_RID2PASID);
-               else if (domain_use_first_level(domain))
-                       ret = domain_setup_first_level(iommu, domain, dev,
-                                       PASID_RID2PASID);
-               else
-                       ret = intel_pasid_setup_second_level(iommu, domain,
-                                       dev, PASID_RID2PASID);
-               spin_unlock_irqrestore(&iommu->lock, flags);
-               if (ret) {
-                       dev_err(dev, "Setup RID2PASID failed\n");
-                       dmar_remove_one_dev_info(dev);
-                       return NULL;
-               }
-       }
-
-       if (dev && domain_context_mapping(domain, dev)) {
-               dev_err(dev, "Domain context map failed\n");
-               dmar_remove_one_dev_info(dev);
-               return NULL;
-       }
-
-       return domain;
-}
-
 static int iommu_domain_identity_map(struct dmar_domain *domain,
                                     unsigned long first_vpfn,
                                     unsigned long last_vpfn)
@@ -2607,17 +2521,62 @@ static int __init si_domain_init(int hw)
 
 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
 {
-       struct dmar_domain *ndomain;
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct intel_iommu *iommu;
+       unsigned long flags;
        u8 bus, devfn;
+       int ret;
 
        iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
                return -ENODEV;
 
-       ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
-       if (ndomain != domain)
-               return -EBUSY;
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info->domain = domain;
+       spin_lock(&iommu->lock);
+       ret = domain_attach_iommu(domain, iommu);
+       spin_unlock(&iommu->lock);
+       if (ret) {
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               return ret;
+       }
+       list_add(&info->link, &domain->devices);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       /* PASID table is mandatory for a PCI device in scalable mode. */
+       if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
+               ret = intel_pasid_alloc_table(dev);
+               if (ret) {
+                       dev_err(dev, "PASID table allocation failed\n");
+                       dmar_remove_one_dev_info(dev);
+                       return ret;
+               }
+
+               /* Setup the PASID entry for requests without PASID: */
+               spin_lock_irqsave(&iommu->lock, flags);
+               if (hw_pass_through && domain_type_is_si(domain))
+                       ret = intel_pasid_setup_pass_through(iommu, domain,
+                                       dev, PASID_RID2PASID);
+               else if (domain_use_first_level(domain))
+                       ret = domain_setup_first_level(iommu, domain, dev,
+                                       PASID_RID2PASID);
+               else
+                       ret = intel_pasid_setup_second_level(iommu, domain,
+                                       dev, PASID_RID2PASID);
+               spin_unlock_irqrestore(&iommu->lock, flags);
+               if (ret) {
+                       dev_err(dev, "Setup RID2PASID failed\n");
+                       dmar_remove_one_dev_info(dev);
+                       return ret;
+               }
+       }
+
+       ret = domain_context_mapping(domain, dev);
+       if (ret) {
+               dev_err(dev, "Domain context map failed\n");
+               dmar_remove_one_dev_info(dev);
+               return ret;
+       }
 
        return 0;
 }
@@ -3607,12 +3566,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
                        iommu->name);
                return -ENXIO;
        }
-       if (!ecap_sc_support(iommu->ecap) &&
-           domain_update_iommu_snooping(iommu)) {
-               pr_warn("%s: Doesn't support snooping.\n",
-                       iommu->name);
-               return -ENXIO;
-       }
+
        sp = domain_update_iommu_superpage(NULL, iommu) - 1;
        if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
                pr_warn("%s: Doesn't support large page.\n",
@@ -4304,7 +4258,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
        domain->agaw = width_to_agaw(adjust_width);
 
        domain->iommu_coherency = false;
-       domain->iommu_snooping = false;
        domain->iommu_superpage = 0;
        domain->max_addr = 0;
 
@@ -4369,6 +4322,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
        if (!iommu)
                return -ENODEV;
 
+       if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
+               return -EOPNOTSUPP;
+
        /* check if this iommu agaw is sufficient for max mapped address */
        addr_width = agaw_to_width(iommu->agaw);
        if (addr_width > cap_mgaw(iommu->cap))
@@ -4443,7 +4399,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
                prot |= DMA_PTE_READ;
        if (iommu_prot & IOMMU_WRITE)
                prot |= DMA_PTE_WRITE;
-       if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
+       if (dmar_domain->set_pte_snp)
                prot |= DMA_PTE_SNP;
 
        max_addr = iova + size;
@@ -4566,12 +4522,71 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        return phys;
 }
 
+static bool domain_support_force_snooping(struct dmar_domain *domain)
+{
+       struct device_domain_info *info;
+       bool support = true;
+
+       assert_spin_locked(&device_domain_lock);
+       list_for_each_entry(info, &domain->devices, link) {
+               if (!ecap_sc_support(info->iommu->ecap)) {
+                       support = false;
+                       break;
+               }
+       }
+
+       return support;
+}
+
+static void domain_set_force_snooping(struct dmar_domain *domain)
+{
+       struct device_domain_info *info;
+
+       assert_spin_locked(&device_domain_lock);
+
+       /*
+        * Second level page table supports per-PTE snoop control. The
+        * iommu_map() interface will handle this by setting SNP bit.
+        */
+       if (!domain_use_first_level(domain)) {
+               domain->set_pte_snp = true;
+               return;
+       }
+
+       list_for_each_entry(info, &domain->devices, link)
+               intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
+                                                    PASID_RID2PASID);
+}
+
+static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       unsigned long flags;
+
+       if (dmar_domain->force_snooping)
+               return true;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       if (!domain_support_force_snooping(dmar_domain)) {
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               return false;
+       }
+
+       domain_set_force_snooping(dmar_domain);
+       dmar_domain->force_snooping = true;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return true;
+}
+
 static bool intel_iommu_capable(enum iommu_cap cap)
 {
        if (cap == IOMMU_CAP_CACHE_COHERENCY)
-               return domain_update_iommu_snooping(NULL);
+               return true;
        if (cap == IOMMU_CAP_INTR_REMAP)
                return irq_remapping_enabled == 1;
+       if (cap == IOMMU_CAP_PRE_BOOT_PROTECTION)
+               return dmar_platform_optin();
 
        return false;
 }
@@ -4919,6 +4934,7 @@ const struct iommu_ops intel_iommu_ops = {
                .iotlb_sync             = intel_iommu_tlb_sync,
                .iova_to_phys           = intel_iommu_iova_to_phys,
                .free                   = intel_iommu_domain_free,
+               .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
        }
 };
 
index f8d215d..cb4c1d0 100644 (file)
@@ -710,9 +710,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
        pasid_set_fault_enable(pte);
        pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
 
-       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
-               pasid_set_pgsnp(pte);
-
        /*
         * Since it is a second level only translation setup, we should
         * set SRE bit as well (addresses are expected to be GPAs).
@@ -762,3 +759,45 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
 
        return 0;
 }
+
+/*
+ * Set the page snoop control for a pasid entry which has been set up.
+ */
+void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
+                                         struct device *dev, u32 pasid)
+{
+       struct pasid_entry *pte;
+       u16 did;
+
+       spin_lock(&iommu->lock);
+       pte = intel_pasid_get_entry(dev, pasid);
+       if (WARN_ON(!pte || !pasid_pte_is_present(pte))) {
+               spin_unlock(&iommu->lock);
+               return;
+       }
+
+       pasid_set_pgsnp(pte);
+       did = pasid_get_domain_id(pte);
+       spin_unlock(&iommu->lock);
+
+       if (!ecap_coherent(iommu->ecap))
+               clflush_cache_range(pte, sizeof(*pte));
+
+       /*
+        * VT-d spec 3.4 table23 states guides for cache invalidation:
+        *
+        * - PASID-selective-within-Domain PASID-cache invalidation
+        * - PASID-selective PASID-based IOTLB invalidation
+        * - If (pasid is RID_PASID)
+        *    - Global Device-TLB invalidation to affected functions
+        *   Else
+        *    - PASID-based Device-TLB invalidation (with S=1 and
+        *      Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
+        */
+       pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+       qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+       /* Device IOTLB doesn't need to be flushed in caching mode. */
+       if (!cap_caching_mode(iommu->cap))
+               devtlb_invalidation_with_pasid(iommu, dev, pasid);
+}
index ab4408c..583ea67 100644 (file)
@@ -123,4 +123,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
                                 bool fault_ignore);
 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
+void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
+                                         struct device *dev, u32 pasid);
 #endif /* __INTEL_PASID_H */
index 857d4c2..847ad47 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/errno.h>
 #include <linux/iommu.h>
 #include <linux/idr.h>
-#include <linux/notifier.h>
 #include <linux/err.h>
 #include <linux/pci.h>
 #include <linux/bitops.h>
@@ -40,14 +39,16 @@ struct iommu_group {
        struct kobject *devices_kobj;
        struct list_head devices;
        struct mutex mutex;
-       struct blocking_notifier_head notifier;
        void *iommu_data;
        void (*iommu_data_release)(void *iommu_data);
        char *name;
        int id;
        struct iommu_domain *default_domain;
+       struct iommu_domain *blocking_domain;
        struct iommu_domain *domain;
        struct list_head entry;
+       unsigned int owner_cnt;
+       void *owner;
 };
 
 struct group_device {
@@ -82,8 +83,8 @@ static int __iommu_attach_device(struct iommu_domain *domain,
                                 struct device *dev);
 static int __iommu_attach_group(struct iommu_domain *domain,
                                struct iommu_group *group);
-static void __iommu_detach_group(struct iommu_domain *domain,
-                                struct iommu_group *group);
+static int __iommu_group_set_domain(struct iommu_group *group,
+                                   struct iommu_domain *new_domain);
 static int iommu_create_device_direct_mappings(struct iommu_group *group,
                                               struct device *dev);
 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
@@ -294,7 +295,11 @@ int iommu_probe_device(struct device *dev)
        mutex_lock(&group->mutex);
        iommu_alloc_default_domain(group, dev);
 
-       if (group->default_domain) {
+       /*
+        * If device joined an existing group which has been claimed, don't
+        * attach the default domain.
+        */
+       if (group->default_domain && !group->owner) {
                ret = __iommu_attach_device(group->default_domain, dev);
                if (ret) {
                        mutex_unlock(&group->mutex);
@@ -599,6 +604,8 @@ static void iommu_group_release(struct kobject *kobj)
 
        if (group->default_domain)
                iommu_domain_free(group->default_domain);
+       if (group->blocking_domain)
+               iommu_domain_free(group->blocking_domain);
 
        kfree(group->name);
        kfree(group);
@@ -633,7 +640,6 @@ struct iommu_group *iommu_group_alloc(void)
        mutex_init(&group->mutex);
        INIT_LIST_HEAD(&group->devices);
        INIT_LIST_HEAD(&group->entry);
-       BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
 
        ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
        if (ret < 0) {
@@ -906,10 +912,6 @@ rename:
        if (ret)
                goto err_put_group;
 
-       /* Notify any listeners about change to group. */
-       blocking_notifier_call_chain(&group->notifier,
-                                    IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
-
        trace_add_device_to_group(group->id, dev);
 
        dev_info(dev, "Adding to iommu group %d\n", group->id);
@@ -951,10 +953,6 @@ void iommu_group_remove_device(struct device *dev)
 
        dev_info(dev, "Removing from iommu group %d\n", group->id);
 
-       /* Pre-notify listeners that a device is being removed. */
-       blocking_notifier_call_chain(&group->notifier,
-                                    IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
-
        mutex_lock(&group->mutex);
        list_for_each_entry(tmp_device, &group->devices, list) {
                if (tmp_device->dev == dev) {
@@ -1077,36 +1075,6 @@ void iommu_group_put(struct iommu_group *group)
 EXPORT_SYMBOL_GPL(iommu_group_put);
 
 /**
- * iommu_group_register_notifier - Register a notifier for group changes
- * @group: the group to watch
- * @nb: notifier block to signal
- *
- * This function allows iommu group users to track changes in a group.
- * See include/linux/iommu.h for actions sent via this notifier.  Caller
- * should hold a reference to the group throughout notifier registration.
- */
-int iommu_group_register_notifier(struct iommu_group *group,
-                                 struct notifier_block *nb)
-{
-       return blocking_notifier_chain_register(&group->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
-
-/**
- * iommu_group_unregister_notifier - Unregister a notifier
- * @group: the group to watch
- * @nb: notifier block to signal
- *
- * Unregister a previously registered group notifier block.
- */
-int iommu_group_unregister_notifier(struct iommu_group *group,
-                                   struct notifier_block *nb)
-{
-       return blocking_notifier_chain_unregister(&group->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
-
-/**
  * iommu_register_device_fault_handler() - Register a device fault handler
  * @dev: the device
  * @handler: the fault handler
@@ -1651,14 +1619,8 @@ static int remove_iommu_group(struct device *dev, void *data)
 static int iommu_bus_notifier(struct notifier_block *nb,
                              unsigned long action, void *data)
 {
-       unsigned long group_action = 0;
        struct device *dev = data;
-       struct iommu_group *group;
 
-       /*
-        * ADD/DEL call into iommu driver ops if provided, which may
-        * result in ADD/DEL notifiers to group->notifier
-        */
        if (action == BUS_NOTIFY_ADD_DEVICE) {
                int ret;
 
@@ -1669,34 +1631,6 @@ static int iommu_bus_notifier(struct notifier_block *nb,
                return NOTIFY_OK;
        }
 
-       /*
-        * Remaining BUS_NOTIFYs get filtered and republished to the
-        * group, if anyone is listening
-        */
-       group = iommu_group_get(dev);
-       if (!group)
-               return 0;
-
-       switch (action) {
-       case BUS_NOTIFY_BIND_DRIVER:
-               group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
-               break;
-       case BUS_NOTIFY_BOUND_DRIVER:
-               group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
-               break;
-       case BUS_NOTIFY_UNBIND_DRIVER:
-               group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
-               break;
-       case BUS_NOTIFY_UNBOUND_DRIVER:
-               group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
-               break;
-       }
-
-       if (group_action)
-               blocking_notifier_call_chain(&group->notifier,
-                                            group_action, dev);
-
-       iommu_group_put(group);
        return 0;
 }
 
@@ -1913,6 +1847,29 @@ bool iommu_present(struct bus_type *bus)
 }
 EXPORT_SYMBOL_GPL(iommu_present);
 
+/**
+ * device_iommu_capable() - check for a general IOMMU capability
+ * @dev: device to which the capability would be relevant, if available
+ * @cap: IOMMU capability
+ *
+ * Return: true if an IOMMU is present and supports the given capability
+ * for the given device, otherwise false.
+ */
+bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
+{
+       const struct iommu_ops *ops;
+
+       if (!dev->iommu || !dev->iommu->iommu_dev)
+               return false;
+
+       ops = dev_iommu_ops(dev);
+       if (!ops->capable)
+               return false;
+
+       return ops->capable(cap);
+}
+EXPORT_SYMBOL_GPL(device_iommu_capable);
+
 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
 {
        if (!bus->iommu_ops || !bus->iommu_ops->capable)
@@ -1983,6 +1940,24 @@ void iommu_domain_free(struct iommu_domain *domain)
 }
 EXPORT_SYMBOL_GPL(iommu_domain_free);
 
+/*
+ * Put the group's domain back to the appropriate core-owned domain - either the
+ * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
+ */
+static void __iommu_group_set_core_domain(struct iommu_group *group)
+{
+       struct iommu_domain *new_domain;
+       int ret;
+
+       if (group->owner)
+               new_domain = group->blocking_domain;
+       else
+               new_domain = group->default_domain;
+
+       ret = __iommu_group_set_domain(group, new_domain);
+       WARN(ret, "iommu driver failed to attach the default/blocking domain");
+}
+
 static int __iommu_attach_device(struct iommu_domain *domain,
                                 struct device *dev)
 {
@@ -2039,9 +2014,6 @@ static void __iommu_detach_device(struct iommu_domain *domain,
        if (iommu_is_attach_deferred(dev))
                return;
 
-       if (unlikely(domain->ops->detach_dev == NULL))
-               return;
-
        domain->ops->detach_dev(domain, dev);
        trace_detach_device_from_domain(dev);
 }
@@ -2055,12 +2027,10 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
                return;
 
        mutex_lock(&group->mutex);
-       if (iommu_group_device_count(group) != 1) {
-               WARN_ON(1);
+       if (WARN_ON(domain != group->domain) ||
+           WARN_ON(iommu_group_device_count(group) != 1))
                goto out_unlock;
-       }
-
-       __iommu_detach_group(domain, group);
+       __iommu_group_set_core_domain(group);
 
 out_unlock:
        mutex_unlock(&group->mutex);
@@ -2116,7 +2086,8 @@ static int __iommu_attach_group(struct iommu_domain *domain,
 {
        int ret;
 
-       if (group->default_domain && group->domain != group->default_domain)
+       if (group->domain && group->domain != group->default_domain &&
+           group->domain != group->blocking_domain)
                return -EBUSY;
 
        ret = __iommu_group_for_each_dev(group, domain,
@@ -2148,34 +2119,49 @@ static int iommu_group_do_detach_device(struct device *dev, void *data)
        return 0;
 }
 
-static void __iommu_detach_group(struct iommu_domain *domain,
-                                struct iommu_group *group)
+static int __iommu_group_set_domain(struct iommu_group *group,
+                                   struct iommu_domain *new_domain)
 {
        int ret;
 
-       if (!group->default_domain) {
-               __iommu_group_for_each_dev(group, domain,
+       if (group->domain == new_domain)
+               return 0;
+
+       /*
+        * New drivers should support default domains and so the detach_dev() op
+        * will never be called. Otherwise the NULL domain represents some
+        * platform specific behavior.
+        */
+       if (!new_domain) {
+               if (WARN_ON(!group->domain->ops->detach_dev))
+                       return -EINVAL;
+               __iommu_group_for_each_dev(group, group->domain,
                                           iommu_group_do_detach_device);
                group->domain = NULL;
-               return;
+               return 0;
        }
 
-       if (group->domain == group->default_domain)
-               return;
-
-       /* Detach by re-attaching to the default domain */
-       ret = __iommu_group_for_each_dev(group, group->default_domain,
+       /*
+        * Changing the domain is done by calling attach_dev() on the new
+        * domain. This switch does not have to be atomic and DMA can be
+        * discarded during the transition. DMA must only be able to access
+        * either new_domain or group->domain, never something else.
+        *
+        * Note that this is called in error unwind paths, attaching to a
+        * domain that has already been attached cannot fail.
+        */
+       ret = __iommu_group_for_each_dev(group, new_domain,
                                         iommu_group_do_attach_device);
-       if (ret != 0)
-               WARN_ON(1);
-       else
-               group->domain = group->default_domain;
+       if (ret)
+               return ret;
+       group->domain = new_domain;
+       return 0;
 }
 
 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
 {
        mutex_lock(&group->mutex);
-       __iommu_detach_group(domain, group);
+       __iommu_group_set_core_domain(group);
        mutex_unlock(&group->mutex);
 }
 EXPORT_SYMBOL_GPL(iommu_detach_group);
@@ -3102,3 +3088,167 @@ out:
 
        return ret;
 }
+
+/**
+ * iommu_device_use_default_domain() - Device driver wants to handle device
+ *                                     DMA through the kernel DMA API.
+ * @dev: The device.
+ *
+ * The device driver about to bind @dev wants to do DMA through the kernel
+ * DMA API. Return 0 if it is allowed, otherwise an error.
+ */
+int iommu_device_use_default_domain(struct device *dev)
+{
+       struct iommu_group *group = iommu_group_get(dev);
+       int ret = 0;
+
+       if (!group)
+               return 0;
+
+       mutex_lock(&group->mutex);
+       if (group->owner_cnt) {
+               if (group->domain != group->default_domain ||
+                   group->owner) {
+                       ret = -EBUSY;
+                       goto unlock_out;
+               }
+       }
+
+       group->owner_cnt++;
+
+unlock_out:
+       mutex_unlock(&group->mutex);
+       iommu_group_put(group);
+
+       return ret;
+}
+
+/**
+ * iommu_device_unuse_default_domain() - Device driver stops handling device
+ *                                       DMA through the kernel DMA API.
+ * @dev: The device.
+ *
+ * The device driver doesn't want to do DMA through kernel DMA API anymore.
+ * It must be called after iommu_device_use_default_domain().
+ */
+void iommu_device_unuse_default_domain(struct device *dev)
+{
+       struct iommu_group *group = iommu_group_get(dev);
+
+       if (!group)
+               return;
+
+       mutex_lock(&group->mutex);
+       if (!WARN_ON(!group->owner_cnt))
+               group->owner_cnt--;
+
+       mutex_unlock(&group->mutex);
+       iommu_group_put(group);
+}
+
+static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
+{
+       struct group_device *dev =
+               list_first_entry(&group->devices, struct group_device, list);
+
+       if (group->blocking_domain)
+               return 0;
+
+       group->blocking_domain =
+               __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
+       if (!group->blocking_domain) {
+               /*
+                * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
+                * create an empty domain instead.
+                */
+               group->blocking_domain = __iommu_domain_alloc(
+                       dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
+               if (!group->blocking_domain)
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+/**
+ * iommu_group_claim_dma_owner() - Set DMA ownership of a group
+ * @group: The group.
+ * @owner: Caller specified pointer. Used for exclusive ownership.
+ *
+ * This is to support backward compatibility for vfio which manages
+ * the dma ownership in iommu_group level. New invocations on this
+ * interface should be prohibited.
+ */
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
+{
+       int ret = 0;
+
+       mutex_lock(&group->mutex);
+       if (group->owner_cnt) {
+               ret = -EPERM;
+               goto unlock_out;
+       } else {
+               if (group->domain && group->domain != group->default_domain) {
+                       ret = -EBUSY;
+                       goto unlock_out;
+               }
+
+               ret = __iommu_group_alloc_blocking_domain(group);
+               if (ret)
+                       goto unlock_out;
+
+               ret = __iommu_group_set_domain(group, group->blocking_domain);
+               if (ret)
+                       goto unlock_out;
+               group->owner = owner;
+       }
+
+       group->owner_cnt++;
+unlock_out:
+       mutex_unlock(&group->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
+
+/**
+ * iommu_group_release_dma_owner() - Release DMA ownership of a group
+ * @group: The group.
+ *
+ * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
+ */
+void iommu_group_release_dma_owner(struct iommu_group *group)
+{
+       int ret;
+
+       mutex_lock(&group->mutex);
+       if (WARN_ON(!group->owner_cnt || !group->owner))
+               goto unlock_out;
+
+       group->owner_cnt = 0;
+       group->owner = NULL;
+       ret = __iommu_group_set_domain(group, group->default_domain);
+       WARN(ret, "iommu driver failed to attach the default domain");
+
+unlock_out:
+       mutex_unlock(&group->mutex);
+}
+EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
+
+/**
+ * iommu_group_dma_owner_claimed() - Query group dma ownership status
+ * @group: The group.
+ *
+ * This provides status query on a given group. It is racy and only for
+ * non-binding status reporting.
+ */
+bool iommu_group_dma_owner_claimed(struct iommu_group *group)
+{
+       unsigned int user;
+
+       mutex_lock(&group->mutex);
+       user = group->owner_cnt;
+       mutex_unlock(&group->mutex);
+
+       return user;
+}
+EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
index 50f5762..f09aedf 100644 (file)
@@ -583,7 +583,7 @@ static void print_ctx_regs(void __iomem *base, int ctx)
               GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
 }
 
-static void insert_iommu_master(struct device *dev,
+static int insert_iommu_master(struct device *dev,
                                struct msm_iommu_dev **iommu,
                                struct of_phandle_args *spec)
 {
@@ -592,6 +592,10 @@ static void insert_iommu_master(struct device *dev,
 
        if (list_empty(&(*iommu)->ctx_list)) {
                master = kzalloc(sizeof(*master), GFP_ATOMIC);
+               if (!master) {
+                       dev_err(dev, "Failed to allocate iommu_master\n");
+                       return -ENOMEM;
+               }
                master->of_node = dev->of_node;
                list_add(&master->list, &(*iommu)->ctx_list);
                dev_iommu_priv_set(dev, master);
@@ -601,30 +605,34 @@ static void insert_iommu_master(struct device *dev,
                if (master->mids[sid] == spec->args[0]) {
                        dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
                                 sid);
-                       return;
+                       return 0;
                }
 
        master->mids[master->num_mids++] = spec->args[0];
+       return 0;
 }
 
 static int qcom_iommu_of_xlate(struct device *dev,
                               struct of_phandle_args *spec)
 {
-       struct msm_iommu_dev *iommu;
+       struct msm_iommu_dev *iommu = NULL, *iter;
        unsigned long flags;
        int ret = 0;
 
        spin_lock_irqsave(&msm_iommu_lock, flags);
-       list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
-               if (iommu->dev->of_node == spec->np)
+       list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
+               if (iter->dev->of_node == spec->np) {
+                       iommu = iter;
                        break;
+               }
+       }
 
-       if (!iommu || iommu->dev->of_node != spec->np) {
+       if (!iommu) {
                ret = -ENODEV;
                goto fail;
        }
 
-       insert_iommu_master(dev, &iommu, spec);
+       ret = insert_iommu_master(dev, &iommu, spec);
 fail:
        spin_unlock_irqrestore(&msm_iommu_lock, flags);
 
index 6fd75a6..bb9dd92 100644 (file)
 #include <linux/io.h>
 #include <linux/iommu.h>
 #include <linux/iopoll.h>
+#include <linux/io-pgtable.h>
 #include <linux/list.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
+#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -29,7 +31,7 @@
 #include <asm/barrier.h>
 #include <soc/mediatek/smi.h>
 
-#include "mtk_iommu.h"
+#include <dt-bindings/memory/mtk-memory-port.h>
 
 #define REG_MMU_PT_BASE_ADDR                   0x000
 #define MMU_PT_ADDR_MASK                       GENMASK(31, 7)
@@ -51,6 +53,8 @@
 #define F_MMU_STANDARD_AXI_MODE_MASK           (BIT(3) | BIT(19))
 
 #define REG_MMU_DCM_DIS                                0x050
+#define F_MMU_DCM                              BIT(8)
+
 #define REG_MMU_WR_LEN_CTRL                    0x054
 #define F_MMU_WR_THROT_DIS_MASK                        (BIT(5) | BIT(21))
 
 #define REG_MMU1_INT_ID                                0x154
 #define F_MMU_INT_ID_COMM_ID(a)                        (((a) >> 9) & 0x7)
 #define F_MMU_INT_ID_SUB_COMM_ID(a)            (((a) >> 7) & 0x3)
+#define F_MMU_INT_ID_COMM_ID_EXT(a)            (((a) >> 10) & 0x7)
+#define F_MMU_INT_ID_SUB_COMM_ID_EXT(a)                (((a) >> 7) & 0x7)
 #define F_MMU_INT_ID_LARB_ID(a)                        (((a) >> 7) & 0x7)
 #define F_MMU_INT_ID_PORT_ID(a)                        (((a) >> 2) & 0x1f)
 
 #define MTK_PROTECT_PA_ALIGN                   256
+#define MTK_IOMMU_BANK_SZ                      0x1000
+
+#define PERICFG_IOMMU_1                                0x714
 
 #define HAS_4GB_MODE                   BIT(0)
 /* HW will use the EMI clock if there isn't the "bclk". */
 #define HAS_VLD_PA_RNG                 BIT(2)
 #define RESET_AXI                      BIT(3)
 #define OUT_ORDER_WR_EN                        BIT(4)
-#define HAS_SUB_COMM                   BIT(5)
-#define WR_THROT_EN                    BIT(6)
-#define HAS_LEGACY_IVRP_PADDR          BIT(7)
-#define IOVA_34_EN                     BIT(8)
+#define HAS_SUB_COMM_2BITS             BIT(5)
+#define HAS_SUB_COMM_3BITS             BIT(6)
+#define WR_THROT_EN                    BIT(7)
+#define HAS_LEGACY_IVRP_PADDR          BIT(8)
+#define IOVA_34_EN                     BIT(9)
+#define SHARE_PGTABLE                  BIT(10) /* 2 HW share pgtable */
+#define DCM_DISABLE                    BIT(11)
+#define STD_AXI_MODE                   BIT(12) /* For non MM iommu */
+/* 2 bits: iommu type */
+#define MTK_IOMMU_TYPE_MM              (0x0 << 13)
+#define MTK_IOMMU_TYPE_INFRA           (0x1 << 13)
+#define MTK_IOMMU_TYPE_MASK            (0x3 << 13)
+/* PM and clock always on. e.g. infra iommu */
+#define PM_CLK_AO                      BIT(15)
+#define IFA_IOMMU_PCIE_SUPPORT         BIT(16)
+
+#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask)       \
+                               ((((pdata)->flags) & (mask)) == (_x))
+
+#define MTK_IOMMU_HAS_FLAG(pdata, _x)  MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, _x)
+#define MTK_IOMMU_IS_TYPE(pdata, _x)   MTK_IOMMU_HAS_FLAG_MASK(pdata, _x,\
+                                                       MTK_IOMMU_TYPE_MASK)
+
+#define MTK_INVALID_LARBID             MTK_LARB_NR_MAX
+
+#define MTK_LARB_COM_MAX       8
+#define MTK_LARB_SUBCOM_MAX    8
+
+#define MTK_IOMMU_GROUP_MAX    8
+#define MTK_IOMMU_BANK_MAX     5
+
+enum mtk_iommu_plat {
+       M4U_MT2712,
+       M4U_MT6779,
+       M4U_MT8167,
+       M4U_MT8173,
+       M4U_MT8183,
+       M4U_MT8186,
+       M4U_MT8192,
+       M4U_MT8195,
+};
+
+struct mtk_iommu_iova_region {
+       dma_addr_t              iova_base;
+       unsigned long long      size;
+};
+
+struct mtk_iommu_suspend_reg {
+       u32                     misc_ctrl;
+       u32                     dcm_dis;
+       u32                     ctrl_reg;
+       u32                     vld_pa_rng;
+       u32                     wr_len_ctrl;
+
+       u32                     int_control[MTK_IOMMU_BANK_MAX];
+       u32                     int_main_control[MTK_IOMMU_BANK_MAX];
+       u32                     ivrp_paddr[MTK_IOMMU_BANK_MAX];
+};
+
+struct mtk_iommu_plat_data {
+       enum mtk_iommu_plat     m4u_plat;
+       u32                     flags;
+       u32                     inv_sel_reg;
+
+       char                    *pericfg_comp_str;
+       struct list_head        *hw_list;
+       unsigned int            iova_region_nr;
+       const struct mtk_iommu_iova_region      *iova_region;
+
+       u8                  banks_num;
+       bool                banks_enable[MTK_IOMMU_BANK_MAX];
+       unsigned int        banks_portmsk[MTK_IOMMU_BANK_MAX];
+       unsigned char       larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
+};
+
+struct mtk_iommu_bank_data {
+       void __iomem                    *base;
+       int                             irq;
+       u8                              id;
+       struct device                   *parent_dev;
+       struct mtk_iommu_data           *parent_data;
+       spinlock_t                      tlb_lock; /* lock for tlb range flush */
+       struct mtk_iommu_domain         *m4u_dom; /* Each bank has a domain */
+};
+
+struct mtk_iommu_data {
+       struct device                   *dev;
+       struct clk                      *bclk;
+       phys_addr_t                     protect_base; /* protect memory base */
+       struct mtk_iommu_suspend_reg    reg;
+       struct iommu_group              *m4u_group[MTK_IOMMU_GROUP_MAX];
+       bool                            enable_4GB;
 
-#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
-               ((((pdata)->flags) & (_x)) == (_x))
+       struct iommu_device             iommu;
+       const struct mtk_iommu_plat_data *plat_data;
+       struct device                   *smicomm_dev;
+
+       struct mtk_iommu_bank_data      *bank;
+
+       struct dma_iommu_mapping        *mapping; /* For mtk_iommu_v1.c */
+       struct regmap                   *pericfg;
+
+       struct mutex                    mutex; /* Protect m4u_group/m4u_dom above */
+
+       /*
+        * In the sharing pgtable case, list data->list to the global list like m4ulist.
+        * In the non-sharing pgtable case, list data->list to the itself hw_list_head.
+        */
+       struct list_head                *hw_list;
+       struct list_head                hw_list_head;
+       struct list_head                list;
+       struct mtk_smi_larb_iommu       larb_imu[MTK_LARB_NR_MAX];
+};
 
 struct mtk_iommu_domain {
        struct io_pgtable_cfg           cfg;
        struct io_pgtable_ops           *iop;
 
-       struct mtk_iommu_data           *data;
+       struct mtk_iommu_bank_data      *bank;
        struct iommu_domain             domain;
+
+       struct mutex                    mutex; /* Protect "data" in this structure */
 };
 
+static int mtk_iommu_bind(struct device *dev)
+{
+       struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+       return component_bind_all(dev, &data->larb_imu);
+}
+
+static void mtk_iommu_unbind(struct device *dev)
+{
+       struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+       component_unbind_all(dev, &data->larb_imu);
+}
+
 static const struct iommu_ops mtk_iommu_ops;
 
-static int mtk_iommu_hw_init(const struct mtk_iommu_data *data);
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid);
 
 #define MTK_IOMMU_TLB_ADDR(iova) ({                                    \
        dma_addr_t _addr = iova;                                        \
@@ -165,42 +296,28 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data);
 
 static LIST_HEAD(m4ulist);     /* List all the M4U HWs */
 
-#define for_each_m4u(data)     list_for_each_entry(data, &m4ulist, list)
-
-struct mtk_iommu_iova_region {
-       dma_addr_t              iova_base;
-       unsigned long long      size;
-};
+#define for_each_m4u(data, head)  list_for_each_entry(data, head, list)
 
 static const struct mtk_iommu_iova_region single_domain[] = {
        {.iova_base = 0,                .size = SZ_4G},
 };
 
 static const struct mtk_iommu_iova_region mt8192_multi_dom[] = {
-       { .iova_base = 0x0,             .size = SZ_4G},         /* disp: 0 ~ 4G */
+       { .iova_base = 0x0,             .size = SZ_4G},         /* 0 ~ 4G */
        #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
-       { .iova_base = SZ_4G,           .size = SZ_4G},         /* vdec: 4G ~ 8G */
-       { .iova_base = SZ_4G * 2,       .size = SZ_4G},         /* CAM/MDP: 8G ~ 12G */
+       { .iova_base = SZ_4G,           .size = SZ_4G},         /* 4G ~ 8G */
+       { .iova_base = SZ_4G * 2,       .size = SZ_4G},         /* 8G ~ 12G */
+       { .iova_base = SZ_4G * 3,       .size = SZ_4G},         /* 12G ~ 16G */
+
        { .iova_base = 0x240000000ULL,  .size = 0x4000000},     /* CCU0 */
        { .iova_base = 0x244000000ULL,  .size = 0x4000000},     /* CCU1 */
        #endif
 };
 
-/*
- * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
- * for the performance.
- *
- * Here always return the mtk_iommu_data of the first probed M4U where the
- * iommu domain information is recorded.
- */
-static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
+/* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/
+static struct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist)
 {
-       struct mtk_iommu_data *data;
-
-       for_each_m4u(data)
-               return data;
-
-       return NULL;
+       return list_first_entry(hwlist, struct mtk_iommu_data, list);
 }
 
 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
@@ -210,46 +327,72 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
 
 static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
 {
+       /* Tlb flush all always is in bank0. */
+       struct mtk_iommu_bank_data *bank = &data->bank[0];
+       void __iomem *base = bank->base;
        unsigned long flags;
 
-       spin_lock_irqsave(&data->tlb_lock, flags);
-       writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
-                      data->base + data->plat_data->inv_sel_reg);
-       writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+       spin_lock_irqsave(&bank->tlb_lock, flags);
+       writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg);
+       writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE);
        wmb(); /* Make sure the tlb flush all done */
-       spin_unlock_irqrestore(&data->tlb_lock, flags);
+       spin_unlock_irqrestore(&bank->tlb_lock, flags);
 }
 
 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
-                                          size_t granule,
-                                          struct mtk_iommu_data *data)
+                                          struct mtk_iommu_bank_data *bank)
 {
+       struct list_head *head = bank->parent_data->hw_list;
+       struct mtk_iommu_bank_data *curbank;
+       struct mtk_iommu_data *data;
+       bool check_pm_status;
        unsigned long flags;
+       void __iomem *base;
        int ret;
        u32 tmp;
 
-       for_each_m4u(data) {
-               if (pm_runtime_get_if_in_use(data->dev) <= 0)
-                       continue;
+       for_each_m4u(data, head) {
+               /*
+                * To avoid resume the iommu device frequently when the iommu device
+                * is not active, it doesn't always call pm_runtime_get here, then tlb
+                * flush depends on the tlb flush all in the runtime resume.
+                *
+                * There are 2 special cases:
+                *
+                * Case1: The iommu dev doesn't have power domain but has bclk. This case
+                * should also avoid the tlb flush while the dev is not active to mute
+                * the tlb timeout log. like mt8173.
+                *
+                * Case2: The power/clock of infra iommu is always on, and it doesn't
+                * have the device link with the master devices. This case should avoid
+                * the PM status check.
+                */
+               check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO);
 
-               spin_lock_irqsave(&data->tlb_lock, flags);
+               if (check_pm_status) {
+                       if (pm_runtime_get_if_in_use(data->dev) <= 0)
+                               continue;
+               }
+
+               curbank = &data->bank[bank->id];
+               base = curbank->base;
+
+               spin_lock_irqsave(&curbank->tlb_lock, flags);
                writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
-                              data->base + data->plat_data->inv_sel_reg);
+                              base + data->plat_data->inv_sel_reg);
 
-               writel_relaxed(MTK_IOMMU_TLB_ADDR(iova),
-                              data->base + REG_MMU_INVLD_START_A);
+               writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A);
                writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
-                              data->base + REG_MMU_INVLD_END_A);
-               writel_relaxed(F_MMU_INV_RANGE,
-                              data->base + REG_MMU_INVALIDATE);
+                              base + REG_MMU_INVLD_END_A);
+               writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE);
 
                /* tlb sync */
-               ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
+               ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE,
                                                tmp, tmp != 0, 10, 1000);
 
                /* Clear the CPE status */
-               writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
-               spin_unlock_irqrestore(&data->tlb_lock, flags);
+               writel_relaxed(0, base + REG_MMU_CPE_DONE);
+               spin_unlock_irqrestore(&curbank->tlb_lock, flags);
 
                if (ret) {
                        dev_warn(data->dev,
@@ -257,70 +400,103 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
                        mtk_iommu_tlb_flush_all(data);
                }
 
-               pm_runtime_put(data->dev);
+               if (check_pm_status)
+                       pm_runtime_put(data->dev);
        }
 }
 
 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
 {
-       struct mtk_iommu_data *data = dev_id;
-       struct mtk_iommu_domain *dom = data->m4u_dom;
-       unsigned int fault_larb, fault_port, sub_comm = 0;
+       struct mtk_iommu_bank_data *bank = dev_id;
+       struct mtk_iommu_data *data = bank->parent_data;
+       struct mtk_iommu_domain *dom = bank->m4u_dom;
+       unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0;
        u32 int_state, regval, va34_32, pa34_32;
+       const struct mtk_iommu_plat_data *plat_data = data->plat_data;
+       void __iomem *base = bank->base;
        u64 fault_iova, fault_pa;
        bool layer, write;
 
        /* Read error info from registers */
-       int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
+       int_state = readl_relaxed(base + REG_MMU_FAULT_ST1);
        if (int_state & F_REG_MMU0_FAULT_MASK) {
-               regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
-               fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
-               fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
+               regval = readl_relaxed(base + REG_MMU0_INT_ID);
+               fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA);
+               fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA);
        } else {
-               regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
-               fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
-               fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
+               regval = readl_relaxed(base + REG_MMU1_INT_ID);
+               fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA);
+               fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA);
        }
        layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
        write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
-       if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) {
+       if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) {
                va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
-               pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
                fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
                fault_iova |= (u64)va34_32 << 32;
-               fault_pa |= (u64)pa34_32 << 32;
        }
-
-       fault_port = F_MMU_INT_ID_PORT_ID(regval);
-       if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
-               fault_larb = F_MMU_INT_ID_COMM_ID(regval);
-               sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
-       } else {
-               fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+       pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
+       fault_pa |= (u64)pa34_32 << 32;
+
+       if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) {
+               fault_port = F_MMU_INT_ID_PORT_ID(regval);
+               if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) {
+                       fault_larb = F_MMU_INT_ID_COMM_ID(regval);
+                       sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
+               } else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) {
+                       fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval);
+                       sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval);
+               } else {
+                       fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+               }
+               fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
        }
-       fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
 
-       if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
+       if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
                               write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
                dev_err_ratelimited(
-                       data->dev,
-                       "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n",
-                       int_state, fault_iova, fault_pa, fault_larb, fault_port,
+                       bank->parent_dev,
+                       "fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n",
+                       int_state, fault_iova, fault_pa, regval, fault_larb, fault_port,
                        layer, write ? "write" : "read");
        }
 
        /* Interrupt clear */
-       regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
+       regval = readl_relaxed(base + REG_MMU_INT_CONTROL0);
        regval |= F_INT_CLR_BIT;
-       writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
+       writel_relaxed(regval, base + REG_MMU_INT_CONTROL0);
 
        mtk_iommu_tlb_flush_all(data);
 
        return IRQ_HANDLED;
 }
 
-static int mtk_iommu_get_domain_id(struct device *dev,
-                                  const struct mtk_iommu_plat_data *plat_data)
+static unsigned int mtk_iommu_get_bank_id(struct device *dev,
+                                         const struct mtk_iommu_plat_data *plat_data)
+{
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+       unsigned int i, portmsk = 0, bankid = 0;
+
+       if (plat_data->banks_num == 1)
+               return bankid;
+
+       for (i = 0; i < fwspec->num_ids; i++)
+               portmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i]));
+
+       for (i = 0; i < plat_data->banks_num && i < MTK_IOMMU_BANK_MAX; i++) {
+               if (!plat_data->banks_enable[i])
+                       continue;
+
+               if (portmsk & plat_data->banks_portmsk[i]) {
+                       bankid = i;
+                       break;
+               }
+       }
+       return bankid; /* default is 0 */
+}
+
+static int mtk_iommu_get_iova_region_id(struct device *dev,
+                                       const struct mtk_iommu_plat_data *plat_data)
 {
        const struct mtk_iommu_iova_region *rgn = plat_data->iova_region;
        const struct bus_dma_region *dma_rgn = dev->dma_range_map;
@@ -349,46 +525,65 @@ static int mtk_iommu_get_domain_id(struct device *dev,
        return -EINVAL;
 }
 
-static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
-                            bool enable, unsigned int domid)
+static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
+                           bool enable, unsigned int regionid)
 {
        struct mtk_smi_larb_iommu    *larb_mmu;
        unsigned int                 larbid, portid;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
        const struct mtk_iommu_iova_region *region;
-       int i;
+       u32 peri_mmuen, peri_mmuen_msk;
+       int i, ret = 0;
 
        for (i = 0; i < fwspec->num_ids; ++i) {
                larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
                portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
 
-               larb_mmu = &data->larb_imu[larbid];
-
-               region = data->plat_data->iova_region + domid;
-               larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
-
-               dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n",
-                       enable ? "enable" : "disable", dev_name(larb_mmu->dev),
-                       portid, domid, larb_mmu->bank[portid]);
-
-               if (enable)
-                       larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
-               else
-                       larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+               if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+                       larb_mmu = &data->larb_imu[larbid];
+
+                       region = data->plat_data->iova_region + regionid;
+                       larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
+
+                       dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n",
+                               enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+                               portid, regionid, larb_mmu->bank[portid]);
+
+                       if (enable)
+                               larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
+                       else
+                               larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+               } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+                       peri_mmuen_msk = BIT(portid);
+                       /* PCI dev has only one output id, enable the next writing bit for PCIe */
+                       if (dev_is_pci(dev))
+                               peri_mmuen_msk |= BIT(portid + 1);
+
+                       peri_mmuen = enable ? peri_mmuen_msk : 0;
+                       ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1,
+                                                peri_mmuen_msk, peri_mmuen);
+                       if (ret)
+                               dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n",
+                                       enable ? "enable" : "disable",
+                                       dev_name(data->dev), peri_mmuen_msk, ret);
+               }
        }
+       return ret;
 }
 
 static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
                                     struct mtk_iommu_data *data,
-                                    unsigned int domid)
+                                    unsigned int region_id)
 {
        const struct mtk_iommu_iova_region *region;
-
-       /* Use the exist domain as there is only one pgtable here. */
-       if (data->m4u_dom) {
-               dom->iop = data->m4u_dom->iop;
-               dom->cfg = data->m4u_dom->cfg;
-               dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap;
+       struct mtk_iommu_domain *m4u_dom;
+
+       /* Always use bank0 in sharing pgtable case */
+       m4u_dom = data->bank[0].m4u_dom;
+       if (m4u_dom) {
+               dom->iop = m4u_dom->iop;
+               dom->cfg = m4u_dom->cfg;
+               dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap;
                goto update_iova_region;
        }
 
@@ -417,7 +612,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
 
 update_iova_region:
        /* Update the iova region for this domain */
-       region = data->plat_data->iova_region + domid;
+       region = data->plat_data->iova_region + region_id;
        dom->domain.geometry.aperture_start = region->iova_base;
        dom->domain.geometry.aperture_end = region->iova_base + region->size - 1;
        dom->domain.geometry.force_aperture = true;
@@ -428,12 +623,13 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
 {
        struct mtk_iommu_domain *dom;
 
-       if (type != IOMMU_DOMAIN_DMA)
+       if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
                return NULL;
 
        dom = kzalloc(sizeof(*dom), GFP_KERNEL);
        if (!dom)
                return NULL;
+       mutex_init(&dom->mutex);
 
        return &dom->domain;
 }
@@ -446,40 +642,60 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
 static int mtk_iommu_attach_device(struct iommu_domain *domain,
                                   struct device *dev)
 {
-       struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+       struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata;
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+       struct list_head *hw_list = data->hw_list;
        struct device *m4udev = data->dev;
-       int ret, domid;
+       struct mtk_iommu_bank_data *bank;
+       unsigned int bankid;
+       int ret, region_id;
 
-       domid = mtk_iommu_get_domain_id(dev, data->plat_data);
-       if (domid < 0)
-               return domid;
+       region_id = mtk_iommu_get_iova_region_id(dev, data->plat_data);
+       if (region_id < 0)
+               return region_id;
 
-       if (!dom->data) {
-               if (mtk_iommu_domain_finalise(dom, data, domid))
+       bankid = mtk_iommu_get_bank_id(dev, data->plat_data);
+       mutex_lock(&dom->mutex);
+       if (!dom->bank) {
+               /* Data is in the frstdata in sharing pgtable case. */
+               frstdata = mtk_iommu_get_frst_data(hw_list);
+
+               ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
+               if (ret) {
+                       mutex_unlock(&dom->mutex);
                        return -ENODEV;
-               dom->data = data;
+               }
+               dom->bank = &data->bank[bankid];
        }
+       mutex_unlock(&dom->mutex);
 
-       if (!data->m4u_dom) { /* Initialize the M4U HW */
+       mutex_lock(&data->mutex);
+       bank = &data->bank[bankid];
+       if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */
                ret = pm_runtime_resume_and_get(m4udev);
-               if (ret < 0)
-                       return ret;
+               if (ret < 0) {
+                       dev_err(m4udev, "pm get fail(%d) in attach.\n", ret);
+                       goto err_unlock;
+               }
 
-               ret = mtk_iommu_hw_init(data);
+               ret = mtk_iommu_hw_init(data, bankid);
                if (ret) {
                        pm_runtime_put(m4udev);
-                       return ret;
+                       goto err_unlock;
                }
-               data->m4u_dom = dom;
+               bank->m4u_dom = dom;
                writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
-                      data->base + REG_MMU_PT_BASE_ADDR);
+                      bank->base + REG_MMU_PT_BASE_ADDR);
 
                pm_runtime_put(m4udev);
        }
+       mutex_unlock(&data->mutex);
 
-       mtk_iommu_config(data, dev, true, domid);
-       return 0;
+       return mtk_iommu_config(data, dev, true, region_id);
+
+err_unlock:
+       mutex_unlock(&data->mutex);
+       return ret;
 }
 
 static void mtk_iommu_detach_device(struct iommu_domain *domain,
@@ -496,7 +712,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
        /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
-       if (dom->data->enable_4GB)
+       if (dom->bank->parent_data->enable_4GB)
                paddr |= BIT_ULL(32);
 
        /* Synchronize with the tlb_lock */
@@ -517,7 +733,7 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
-       mtk_iommu_tlb_flush_all(dom->data);
+       mtk_iommu_tlb_flush_all(dom->bank->parent_data);
 }
 
 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
@@ -526,8 +742,7 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
        size_t length = gather->end - gather->start + 1;
 
-       mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
-                                      dom->data);
+       mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
 }
 
 static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
@@ -535,7 +750,7 @@ static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
-       mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data);
+       mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
 }
 
 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -546,7 +761,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
 
        pa = dom->iop->iova_to_phys(dom->iop, iova);
        if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
-           dom->data->enable_4GB &&
+           dom->bank->parent_data->enable_4GB &&
            pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
                pa &= ~BIT_ULL(32);
 
@@ -566,12 +781,18 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
 
        data = dev_iommu_priv_get(dev);
 
+       if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
+               return &data->iommu;
+
        /*
         * Link the consumer device with the smi-larb device(supplier).
         * The device that connects with each a larb is a independent HW.
         * All the ports in each a device should be in the same larbs.
         */
        larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+       if (larbid >= MTK_LARB_NR_MAX)
+               return ERR_PTR(-EINVAL);
+
        for (i = 1; i < fwspec->num_ids; i++) {
                larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
                if (larbid != larbidx) {
@@ -581,6 +802,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
                }
        }
        larbdev = data->larb_imu[larbid].dev;
+       if (!larbdev)
+               return ERR_PTR(-EINVAL);
+
        link = device_link_add(dev, larbdev,
                               DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
        if (!link)
@@ -599,34 +823,55 @@ static void mtk_iommu_release_device(struct device *dev)
                return;
 
        data = dev_iommu_priv_get(dev);
-       larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
-       larbdev = data->larb_imu[larbid].dev;
-       device_link_remove(dev, larbdev);
+       if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+               larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+               larbdev = data->larb_imu[larbid].dev;
+               device_link_remove(dev, larbdev);
+       }
 
        iommu_fwspec_free(dev);
 }
 
+static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data)
+{
+       unsigned int bankid;
+
+       /*
+        * If the bank function is enabled, each bank is a iommu group/domain.
+        * Otherwise, each iova region is a iommu group/domain.
+        */
+       bankid = mtk_iommu_get_bank_id(dev, plat_data);
+       if (bankid)
+               return bankid;
+
+       return mtk_iommu_get_iova_region_id(dev, plat_data);
+}
+
 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
 {
-       struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+       struct mtk_iommu_data *c_data = dev_iommu_priv_get(dev), *data;
+       struct list_head *hw_list = c_data->hw_list;
        struct iommu_group *group;
-       int domid;
+       int groupid;
 
+       data = mtk_iommu_get_frst_data(hw_list);
        if (!data)
                return ERR_PTR(-ENODEV);
 
-       domid = mtk_iommu_get_domain_id(dev, data->plat_data);
-       if (domid < 0)
-               return ERR_PTR(domid);
+       groupid = mtk_iommu_get_group_id(dev, data->plat_data);
+       if (groupid < 0)
+               return ERR_PTR(groupid);
 
-       group = data->m4u_group[domid];
+       mutex_lock(&data->mutex);
+       group = data->m4u_group[groupid];
        if (!group) {
                group = iommu_group_alloc();
                if (!IS_ERR(group))
-                       data->m4u_group[domid] = group;
+                       data->m4u_group[groupid] = group;
        } else {
                iommu_group_ref_get(group);
        }
+       mutex_unlock(&data->mutex);
        return group;
 }
 
@@ -656,14 +901,14 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
                                       struct list_head *head)
 {
        struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
-       unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i;
+       unsigned int regionid = mtk_iommu_get_iova_region_id(dev, data->plat_data), i;
        const struct mtk_iommu_iova_region *resv, *curdom;
        struct iommu_resv_region *region;
        int prot = IOMMU_WRITE | IOMMU_READ;
 
-       if ((int)domid < 0)
+       if ((int)regionid < 0)
                return;
-       curdom = data->plat_data->iova_region + domid;
+       curdom = data->plat_data->iova_region + regionid;
        for (i = 0; i < data->plat_data->iova_region_nr; i++) {
                resv = data->plat_data->iova_region + i;
 
@@ -704,42 +949,24 @@ static const struct iommu_ops mtk_iommu_ops = {
        }
 };
 
-static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid)
 {
+       const struct mtk_iommu_bank_data *bankx = &data->bank[bankid];
+       const struct mtk_iommu_bank_data *bank0 = &data->bank[0];
        u32 regval;
 
+       /*
+        * Global control settings are in bank0. May re-init these global registers
+        * since no sure if there is bank0 consumers.
+        */
        if (data->plat_data->m4u_plat == M4U_MT8173) {
                regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
                         F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
        } else {
-               regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
+               regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG);
                regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
        }
-       writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
-
-       regval = F_L2_MULIT_HIT_EN |
-               F_TABLE_WALK_FAULT_INT_EN |
-               F_PREETCH_FIFO_OVERFLOW_INT_EN |
-               F_MISS_FIFO_OVERFLOW_INT_EN |
-               F_PREFETCH_FIFO_ERR_INT_EN |
-               F_MISS_FIFO_ERR_INT_EN;
-       writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
-
-       regval = F_INT_TRANSLATION_FAULT |
-               F_INT_MAIN_MULTI_HIT_FAULT |
-               F_INT_INVALID_PA_FAULT |
-               F_INT_ENTRY_REPLACEMENT_FAULT |
-               F_INT_TLB_MISS_FAULT |
-               F_INT_MISS_TRANSACTION_FIFO_FAULT |
-               F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
-       writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
-
-       if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
-               regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
-       else
-               regval = lower_32_bits(data->protect_base) |
-                        upper_32_bits(data->protect_base);
-       writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
+       writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG);
 
        if (data->enable_4GB &&
            MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
@@ -748,31 +975,61 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
                 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
                 */
                regval = F_MMU_VLD_PA_RNG(7, 4);
-               writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
+               writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG);
        }
-       writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
+       if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE))
+               writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS);
+       else
+               writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS);
+
        if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
                /* write command throttling mode */
-               regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
+               regval = readl_relaxed(bank0->base + REG_MMU_WR_LEN_CTRL);
                regval &= ~F_MMU_WR_THROT_DIS_MASK;
-               writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
+               writel_relaxed(regval, bank0->base + REG_MMU_WR_LEN_CTRL);
        }
 
        if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
                /* The register is called STANDARD_AXI_MODE in this case */
                regval = 0;
        } else {
-               regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
-               regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
+               regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL);
+               if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE))
+                       regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
                if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
                        regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
        }
-       writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
+       writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL);
 
-       if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
-                            dev_name(data->dev), (void *)data)) {
-               writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
-               dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
+       /* Independent settings for each bank */
+       regval = F_L2_MULIT_HIT_EN |
+               F_TABLE_WALK_FAULT_INT_EN |
+               F_PREETCH_FIFO_OVERFLOW_INT_EN |
+               F_MISS_FIFO_OVERFLOW_INT_EN |
+               F_PREFETCH_FIFO_ERR_INT_EN |
+               F_MISS_FIFO_ERR_INT_EN;
+       writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0);
+
+       regval = F_INT_TRANSLATION_FAULT |
+               F_INT_MAIN_MULTI_HIT_FAULT |
+               F_INT_INVALID_PA_FAULT |
+               F_INT_ENTRY_REPLACEMENT_FAULT |
+               F_INT_TLB_MISS_FAULT |
+               F_INT_MISS_TRANSACTION_FIFO_FAULT |
+               F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
+       writel_relaxed(regval, bankx->base + REG_MMU_INT_MAIN_CONTROL);
+
+       if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
+               regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
+       else
+               regval = lower_32_bits(data->protect_base) |
+                        upper_32_bits(data->protect_base);
+       writel_relaxed(regval, bankx->base + REG_MMU_IVRP_PADDR);
+
+       if (devm_request_irq(bankx->parent_dev, bankx->irq, mtk_iommu_isr, 0,
+                            dev_name(bankx->parent_dev), (void *)bankx)) {
+               writel_relaxed(0, bankx->base + REG_MMU_PT_BASE_ADDR);
+               dev_err(bankx->parent_dev, "Failed @ IRQ-%d Request\n", bankx->irq);
                return -ENODEV;
        }
 
@@ -784,21 +1041,91 @@ static const struct component_master_ops mtk_iommu_com_ops = {
        .unbind         = mtk_iommu_unbind,
 };
 
+static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match,
+                                 struct mtk_iommu_data *data)
+{
+       struct device_node *larbnode, *smicomm_node, *smi_subcomm_node;
+       struct platform_device *plarbdev;
+       struct device_link *link;
+       int i, larb_nr, ret;
+
+       larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL);
+       if (larb_nr < 0)
+               return larb_nr;
+
+       for (i = 0; i < larb_nr; i++) {
+               u32 id;
+
+               larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+               if (!larbnode)
+                       return -EINVAL;
+
+               if (!of_device_is_available(larbnode)) {
+                       of_node_put(larbnode);
+                       continue;
+               }
+
+               ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
+               if (ret)/* The id is consecutive if there is no this property */
+                       id = i;
+
+               plarbdev = of_find_device_by_node(larbnode);
+               if (!plarbdev) {
+                       of_node_put(larbnode);
+                       return -ENODEV;
+               }
+               if (!plarbdev->dev.driver) {
+                       of_node_put(larbnode);
+                       return -EPROBE_DEFER;
+               }
+               data->larb_imu[id].dev = &plarbdev->dev;
+
+               component_match_add_release(dev, match, component_release_of,
+                                           component_compare_of, larbnode);
+       }
+
+       /* Get smi-(sub)-common dev from the last larb. */
+       smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
+       if (!smi_subcomm_node)
+               return -EINVAL;
+
+       /*
+        * It may have two level smi-common. the node is smi-sub-common if it
+        * has a new mediatek,smi property. otherwise it is smi-commmon.
+        */
+       smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0);
+       if (smicomm_node)
+               of_node_put(smi_subcomm_node);
+       else
+               smicomm_node = smi_subcomm_node;
+
+       plarbdev = of_find_device_by_node(smicomm_node);
+       of_node_put(smicomm_node);
+       data->smicomm_dev = &plarbdev->dev;
+
+       link = device_link_add(data->smicomm_dev, dev,
+                              DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+       if (!link) {
+               dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int mtk_iommu_probe(struct platform_device *pdev)
 {
        struct mtk_iommu_data   *data;
        struct device           *dev = &pdev->dev;
-       struct device_node      *larbnode, *smicomm_node;
-       struct platform_device  *plarbdev;
-       struct device_link      *link;
        struct resource         *res;
        resource_size_t         ioaddr;
        struct component_match  *match = NULL;
        struct regmap           *infracfg;
        void                    *protect;
-       int                     i, larb_nr, ret;
+       int                     ret, banks_num, i = 0;
        u32                     val;
        char                    *p;
+       struct mtk_iommu_bank_data *bank;
+       void __iomem            *base;
 
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
@@ -835,15 +1162,36 @@ static int mtk_iommu_probe(struct platform_device *pdev)
                data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
        }
 
+       banks_num = data->plat_data->banks_num;
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       data->base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(data->base))
-               return PTR_ERR(data->base);
+       if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) {
+               dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res);
+               return -EINVAL;
+       }
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
        ioaddr = res->start;
 
-       data->irq = platform_get_irq(pdev, 0);
-       if (data->irq < 0)
-               return data->irq;
+       data->bank = devm_kmalloc(dev, banks_num * sizeof(*data->bank), GFP_KERNEL);
+       if (!data->bank)
+               return -ENOMEM;
+
+       do {
+               if (!data->plat_data->banks_enable[i])
+                       continue;
+               bank = &data->bank[i];
+               bank->id = i;
+               bank->base = base + i * MTK_IOMMU_BANK_SZ;
+               bank->m4u_dom = NULL;
+
+               bank->irq = platform_get_irq(pdev, i);
+               if (bank->irq < 0)
+                       return bank->irq;
+               bank->parent_dev = dev;
+               bank->parent_data = data;
+               spin_lock_init(&bank->tlb_lock);
+       } while (++i < banks_num);
 
        if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
                data->bclk = devm_clk_get(dev, "bclk");
@@ -851,62 +1199,27 @@ static int mtk_iommu_probe(struct platform_device *pdev)
                        return PTR_ERR(data->bclk);
        }
 
-       larb_nr = of_count_phandle_with_args(dev->of_node,
-                                            "mediatek,larbs", NULL);
-       if (larb_nr < 0)
-               return larb_nr;
-
-       for (i = 0; i < larb_nr; i++) {
-               u32 id;
-
-               larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
-               if (!larbnode)
-                       return -EINVAL;
-
-               if (!of_device_is_available(larbnode)) {
-                       of_node_put(larbnode);
-                       continue;
-               }
-
-               ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
-               if (ret)/* The id is consecutive if there is no this property */
-                       id = i;
+       pm_runtime_enable(dev);
 
-               plarbdev = of_find_device_by_node(larbnode);
-               if (!plarbdev) {
-                       of_node_put(larbnode);
-                       return -ENODEV;
+       if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+               ret = mtk_iommu_mm_dts_parse(dev, &match, data);
+               if (ret) {
+                       dev_err(dev, "mm dts parse fail(%d).", ret);
+                       goto out_runtime_disable;
                }
-               if (!plarbdev->dev.driver) {
-                       of_node_put(larbnode);
-                       return -EPROBE_DEFER;
+       } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+                  data->plat_data->pericfg_comp_str) {
+               infracfg = syscon_regmap_lookup_by_compatible(data->plat_data->pericfg_comp_str);
+               if (IS_ERR(infracfg)) {
+                       ret = PTR_ERR(infracfg);
+                       goto out_runtime_disable;
                }
-               data->larb_imu[id].dev = &plarbdev->dev;
-
-               component_match_add_release(dev, &match, component_release_of,
-                                           component_compare_of, larbnode);
-       }
-
-       /* Get smi-common dev from the last larb. */
-       smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
-       if (!smicomm_node)
-               return -EINVAL;
-
-       plarbdev = of_find_device_by_node(smicomm_node);
-       of_node_put(smicomm_node);
-       data->smicomm_dev = &plarbdev->dev;
-
-       pm_runtime_enable(dev);
 
-       link = device_link_add(data->smicomm_dev, dev,
-                       DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
-       if (!link) {
-               dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
-               ret = -EINVAL;
-               goto out_runtime_disable;
+               data->pericfg = infracfg;
        }
 
        platform_set_drvdata(pdev, data);
+       mutex_init(&data->mutex);
 
        ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
                                     "mtk-iommu.%pa", &ioaddr);
@@ -917,8 +1230,14 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        if (ret)
                goto out_sysfs_remove;
 
-       spin_lock_init(&data->tlb_lock);
-       list_add_tail(&data->list, &m4ulist);
+       if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) {
+               list_add_tail(&data->list, data->plat_data->hw_list);
+               data->hw_list = data->plat_data->hw_list;
+       } else {
+               INIT_LIST_HEAD(&data->hw_list_head);
+               list_add_tail(&data->list, &data->hw_list_head);
+               data->hw_list = &data->hw_list_head;
+       }
 
        if (!iommu_present(&platform_bus_type)) {
                ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
@@ -926,9 +1245,20 @@ static int mtk_iommu_probe(struct platform_device *pdev)
                        goto out_list_del;
        }
 
-       ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
-       if (ret)
-               goto out_bus_set_null;
+       if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+               ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+               if (ret)
+                       goto out_bus_set_null;
+       } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+                  MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
+#ifdef CONFIG_PCI
+               if (!iommu_present(&pci_bus_type)) {
+                       ret = bus_set_iommu(&pci_bus_type, &mtk_iommu_ops);
+                       if (ret) /* PCIe fail don't affect platform_bus. */
+                               goto out_list_del;
+               }
+#endif
+       }
        return ret;
 
 out_bus_set_null:
@@ -939,7 +1269,8 @@ out_list_del:
 out_sysfs_remove:
        iommu_device_sysfs_remove(&data->iommu);
 out_link_remove:
-       device_link_remove(data->smicomm_dev, dev);
+       if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
+               device_link_remove(data->smicomm_dev, dev);
 out_runtime_disable:
        pm_runtime_disable(dev);
        return ret;
@@ -948,18 +1279,30 @@ out_runtime_disable:
 static int mtk_iommu_remove(struct platform_device *pdev)
 {
        struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+       struct mtk_iommu_bank_data *bank;
+       int i;
 
        iommu_device_sysfs_remove(&data->iommu);
        iommu_device_unregister(&data->iommu);
 
-       if (iommu_present(&platform_bus_type))
-               bus_set_iommu(&platform_bus_type, NULL);
+       list_del(&data->list);
 
-       clk_disable_unprepare(data->bclk);
-       device_link_remove(data->smicomm_dev, &pdev->dev);
+       if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+               device_link_remove(data->smicomm_dev, &pdev->dev);
+               component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+       } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+                  MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
+#ifdef CONFIG_PCI
+               bus_set_iommu(&pci_bus_type, NULL);
+#endif
+       }
        pm_runtime_disable(&pdev->dev);
-       devm_free_irq(&pdev->dev, data->irq, data);
-       component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+       for (i = 0; i < data->plat_data->banks_num; i++) {
+               bank = &data->bank[i];
+               if (!bank->m4u_dom)
+                       continue;
+               devm_free_irq(&pdev->dev, bank->irq, bank);
+       }
        return 0;
 }
 
@@ -967,16 +1310,23 @@ static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev)
 {
        struct mtk_iommu_data *data = dev_get_drvdata(dev);
        struct mtk_iommu_suspend_reg *reg = &data->reg;
-       void __iomem *base = data->base;
+       void __iomem *base;
+       int i = 0;
 
+       base = data->bank[i].base;
        reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
        reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
        reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
        reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
-       reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
-       reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
-       reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
        reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
+       do {
+               if (!data->plat_data->banks_enable[i])
+                       continue;
+               base = data->bank[i].base;
+               reg->int_control[i] = readl_relaxed(base + REG_MMU_INT_CONTROL0);
+               reg->int_main_control[i] = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
+               reg->ivrp_paddr[i] = readl_relaxed(base + REG_MMU_IVRP_PADDR);
+       } while (++i < data->plat_data->banks_num);
        clk_disable_unprepare(data->bclk);
        return 0;
 }
@@ -985,9 +1335,9 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
 {
        struct mtk_iommu_data *data = dev_get_drvdata(dev);
        struct mtk_iommu_suspend_reg *reg = &data->reg;
-       struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
-       void __iomem *base = data->base;
-       int ret;
+       struct mtk_iommu_domain *m4u_dom;
+       void __iomem *base;
+       int ret, i = 0;
 
        ret = clk_prepare_enable(data->bclk);
        if (ret) {
@@ -999,18 +1349,26 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
         * Uppon first resume, only enable the clk and return, since the values of the
         * registers are not yet set.
         */
-       if (!m4u_dom)
+       if (!reg->wr_len_ctrl)
                return 0;
 
+       base = data->bank[i].base;
        writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
        writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
        writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
        writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
-       writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
-       writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
-       writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
        writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
-       writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
+       do {
+               m4u_dom = data->bank[i].m4u_dom;
+               if (!data->plat_data->banks_enable[i] || !m4u_dom)
+                       continue;
+               base = data->bank[i].base;
+               writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
+               writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
+               writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
+               writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
+                      base + REG_MMU_PT_BASE_ADDR);
+       } while (++i < data->plat_data->banks_num);
 
        /*
         * Users may allocate dma buffer before they call pm_runtime_get,
@@ -1029,17 +1387,24 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
 
 static const struct mtk_iommu_plat_data mt2712_data = {
        .m4u_plat     = M4U_MT2712,
-       .flags        = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
+       .flags        = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG | SHARE_PGTABLE |
+                       MTK_IOMMU_TYPE_MM,
+       .hw_list      = &m4ulist,
        .inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
        .iova_region  = single_domain,
+       .banks_num    = 1,
+       .banks_enable = {true},
        .iova_region_nr = ARRAY_SIZE(single_domain),
        .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
 };
 
 static const struct mtk_iommu_plat_data mt6779_data = {
        .m4u_plat      = M4U_MT6779,
-       .flags         = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
+       .flags         = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN |
+                        MTK_IOMMU_TYPE_MM,
        .inv_sel_reg   = REG_MMU_INV_SEL_GEN2,
+       .banks_num    = 1,
+       .banks_enable = {true},
        .iova_region   = single_domain,
        .iova_region_nr = ARRAY_SIZE(single_domain),
        .larbid_remap  = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
@@ -1047,8 +1412,10 @@ static const struct mtk_iommu_plat_data mt6779_data = {
 
 static const struct mtk_iommu_plat_data mt8167_data = {
        .m4u_plat     = M4U_MT8167,
-       .flags        = RESET_AXI | HAS_LEGACY_IVRP_PADDR,
+       .flags        = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
        .inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
+       .banks_num    = 1,
+       .banks_enable = {true},
        .iova_region  = single_domain,
        .iova_region_nr = ARRAY_SIZE(single_domain),
        .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
@@ -1057,8 +1424,10 @@ static const struct mtk_iommu_plat_data mt8167_data = {
 static const struct mtk_iommu_plat_data mt8173_data = {
        .m4u_plat     = M4U_MT8173,
        .flags        = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
-                       HAS_LEGACY_IVRP_PADDR,
+                       HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
        .inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
+       .banks_num    = 1,
+       .banks_enable = {true},
        .iova_region  = single_domain,
        .iova_region_nr = ARRAY_SIZE(single_domain),
        .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
@@ -1066,31 +1435,100 @@ static const struct mtk_iommu_plat_data mt8173_data = {
 
 static const struct mtk_iommu_plat_data mt8183_data = {
        .m4u_plat     = M4U_MT8183,
-       .flags        = RESET_AXI,
+       .flags        = RESET_AXI | MTK_IOMMU_TYPE_MM,
        .inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
+       .banks_num    = 1,
+       .banks_enable = {true},
        .iova_region  = single_domain,
        .iova_region_nr = ARRAY_SIZE(single_domain),
        .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
 };
 
+static const struct mtk_iommu_plat_data mt8186_data_mm = {
+       .m4u_plat       = M4U_MT8186,
+       .flags          = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
+                         WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM,
+       .larbid_remap   = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20},
+                          {MTK_INVALID_LARBID, 14, 16},
+                          {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}},
+       .inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
+       .banks_num      = 1,
+       .banks_enable   = {true},
+       .iova_region    = mt8192_multi_dom,
+       .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+};
+
 static const struct mtk_iommu_plat_data mt8192_data = {
        .m4u_plat       = M4U_MT8192,
-       .flags          = HAS_BCLK | HAS_SUB_COMM | OUT_ORDER_WR_EN |
-                         WR_THROT_EN | IOVA_34_EN,
+       .flags          = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
+                         WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM,
        .inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
+       .banks_num      = 1,
+       .banks_enable   = {true},
        .iova_region    = mt8192_multi_dom,
        .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
        .larbid_remap   = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
                           {0, 14, 16}, {0, 13, 18, 17}},
 };
 
+static const struct mtk_iommu_plat_data mt8195_data_infra = {
+       .m4u_plat         = M4U_MT8195,
+       .flags            = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO |
+                           MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT,
+       .pericfg_comp_str = "mediatek,mt8195-pericfg_ao",
+       .inv_sel_reg      = REG_MMU_INV_SEL_GEN2,
+       .banks_num        = 5,
+       .banks_enable     = {true, false, false, false, true},
+       .banks_portmsk    = {[0] = GENMASK(19, 16),     /* PCIe */
+                            [4] = GENMASK(31, 20),     /* USB */
+                           },
+       .iova_region      = single_domain,
+       .iova_region_nr   = ARRAY_SIZE(single_domain),
+};
+
+static const struct mtk_iommu_plat_data mt8195_data_vdo = {
+       .m4u_plat       = M4U_MT8195,
+       .flags          = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
+                         WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+       .hw_list        = &m4ulist,
+       .inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
+       .banks_num      = 1,
+       .banks_enable   = {true},
+       .iova_region    = mt8192_multi_dom,
+       .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+       .larbid_remap   = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11},
+                          {13, 17, 15/* 17b */, 25}, {5}},
+};
+
+static const struct mtk_iommu_plat_data mt8195_data_vpp = {
+       .m4u_plat       = M4U_MT8195,
+       .flags          = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
+                         WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+       .hw_list        = &m4ulist,
+       .inv_sel_reg    = REG_MMU_INV_SEL_GEN2,
+       .banks_num      = 1,
+       .banks_enable   = {true},
+       .iova_region    = mt8192_multi_dom,
+       .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+       .larbid_remap   = {{1}, {3},
+                          {22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23},
+                          {8}, {20}, {12},
+                          /* 16: 16a; 29: 16b; 30: CCUtop0; 31: CCUtop1 */
+                          {14, 16, 29, 26, 30, 31, 18},
+                          {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}},
+};
+
 static const struct of_device_id mtk_iommu_of_ids[] = {
        { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
        { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
        { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
        { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
        { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
+       { .compatible = "mediatek,mt8186-iommu-mm",    .data = &mt8186_data_mm}, /* mm: m4u */
        { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
+       { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
+       { .compatible = "mediatek,mt8195-iommu-vdo",   .data = &mt8195_data_vdo},
+       { .compatible = "mediatek,mt8195-iommu-vpp",   .data = &mt8195_data_vpp},
        {}
 };
 
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
deleted file mode 100644 (file)
index b742432..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015-2016 MediaTek Inc.
- * Author: Honghui Zhang <honghui.zhang@mediatek.com>
- */
-
-#ifndef _MTK_IOMMU_H_
-#define _MTK_IOMMU_H_
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/io-pgtable.h>
-#include <linux/iommu.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <soc/mediatek/smi.h>
-#include <dt-bindings/memory/mtk-memory-port.h>
-
-#define MTK_LARB_COM_MAX       8
-#define MTK_LARB_SUBCOM_MAX    4
-
-#define MTK_IOMMU_GROUP_MAX    8
-
-struct mtk_iommu_suspend_reg {
-       union {
-               u32                     standard_axi_mode;/* v1 */
-               u32                     misc_ctrl;/* v2 */
-       };
-       u32                             dcm_dis;
-       u32                             ctrl_reg;
-       u32                             int_control0;
-       u32                             int_main_control;
-       u32                             ivrp_paddr;
-       u32                             vld_pa_rng;
-       u32                             wr_len_ctrl;
-};
-
-enum mtk_iommu_plat {
-       M4U_MT2701,
-       M4U_MT2712,
-       M4U_MT6779,
-       M4U_MT8167,
-       M4U_MT8173,
-       M4U_MT8183,
-       M4U_MT8192,
-};
-
-struct mtk_iommu_iova_region;
-
-struct mtk_iommu_plat_data {
-       enum mtk_iommu_plat m4u_plat;
-       u32                 flags;
-       u32                 inv_sel_reg;
-
-       unsigned int                            iova_region_nr;
-       const struct mtk_iommu_iova_region      *iova_region;
-       unsigned char       larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
-};
-
-struct mtk_iommu_domain;
-
-struct mtk_iommu_data {
-       void __iomem                    *base;
-       int                             irq;
-       struct device                   *dev;
-       struct clk                      *bclk;
-       phys_addr_t                     protect_base; /* protect memory base */
-       struct mtk_iommu_suspend_reg    reg;
-       struct mtk_iommu_domain         *m4u_dom;
-       struct iommu_group              *m4u_group[MTK_IOMMU_GROUP_MAX];
-       bool                            enable_4GB;
-       spinlock_t                      tlb_lock; /* lock for tlb range flush */
-
-       struct iommu_device             iommu;
-       const struct mtk_iommu_plat_data *plat_data;
-       struct device                   *smicomm_dev;
-
-       struct dma_iommu_mapping        *mapping; /* For mtk_iommu_v1.c */
-
-       struct list_head                list;
-       struct mtk_smi_larb_iommu       larb_imu[MTK_LARB_NR_MAX];
-};
-
-static inline int mtk_iommu_bind(struct device *dev)
-{
-       struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
-       return component_bind_all(dev, &data->larb_imu);
-}
-
-static inline void mtk_iommu_unbind(struct device *dev)
-{
-       struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
-       component_unbind_all(dev, &data->larb_imu);
-}
-
-#endif
index ecff800..e1cb51b 100644 (file)
@@ -7,7 +7,6 @@
  *
  * Based on driver/iommu/mtk_iommu.c
  */
-#include <linux/memblock.h>
 #include <linux/bug.h>
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/spinlock.h>
 #include <asm/barrier.h>
 #include <asm/dma-iommu.h>
-#include <linux/init.h>
+#include <dt-bindings/memory/mtk-memory-port.h>
 #include <dt-bindings/memory/mt2701-larb-port.h>
 #include <soc/mediatek/smi.h>
-#include "mtk_iommu.h"
 
 #define REG_MMU_PT_BASE_ADDR                   0x000
 
@@ -80,6 +78,7 @@
 /* MTK generation one iommu HW only support 4K size mapping */
 #define MT2701_IOMMU_PAGE_SHIFT                        12
 #define MT2701_IOMMU_PAGE_SIZE                 (1UL << MT2701_IOMMU_PAGE_SHIFT)
+#define MT2701_LARB_NR_MAX                     3
 
 /*
  * MTK m4u support 4GB iova address space, and only support 4K page
  */
 #define M2701_IOMMU_PGT_SIZE                   SZ_4M
 
-struct mtk_iommu_domain {
+struct mtk_iommu_v1_suspend_reg {
+       u32                     standard_axi_mode;
+       u32                     dcm_dis;
+       u32                     ctrl_reg;
+       u32                     int_control0;
+};
+
+struct mtk_iommu_v1_data {
+       void __iomem                    *base;
+       int                             irq;
+       struct device                   *dev;
+       struct clk                      *bclk;
+       phys_addr_t                     protect_base; /* protect memory base */
+       struct mtk_iommu_v1_domain      *m4u_dom;
+
+       struct iommu_device             iommu;
+       struct dma_iommu_mapping        *mapping;
+       struct mtk_smi_larb_iommu       larb_imu[MTK_LARB_NR_MAX];
+
+       struct mtk_iommu_v1_suspend_reg reg;
+};
+
+struct mtk_iommu_v1_domain {
        spinlock_t                      pgtlock; /* lock for page table */
        struct iommu_domain             domain;
        u32                             *pgt_va;
        dma_addr_t                      pgt_pa;
-       struct mtk_iommu_data           *data;
+       struct mtk_iommu_v1_data        *data;
 };
 
-static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
+static int mtk_iommu_v1_bind(struct device *dev)
+{
+       struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+
+       return component_bind_all(dev, &data->larb_imu);
+}
+
+static void mtk_iommu_v1_unbind(struct device *dev)
+{
+       struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+
+       component_unbind_all(dev, &data->larb_imu);
+}
+
+static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
 {
-       return container_of(dom, struct mtk_iommu_domain, domain);
+       return container_of(dom, struct mtk_iommu_v1_domain, domain);
 }
 
 static const int mt2701_m4u_in_larb[] = {
@@ -123,7 +158,7 @@ static inline int mt2701_m4u_to_port(int id)
        return id - mt2701_m4u_in_larb[larb];
 }
 
-static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
+static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
 {
        writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
                        data->base + REG_MMU_INV_SEL);
@@ -131,8 +166,8 @@ static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
        wmb(); /* Make sure the tlb flush all done */
 }
 
-static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
-                               unsigned long iova, size_t size)
+static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
+                                        unsigned long iova, size_t size)
 {
        int ret;
        u32 tmp;
@@ -150,16 +185,16 @@ static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
        if (ret) {
                dev_warn(data->dev,
                         "Partial TLB flush timed out, falling back to full flush\n");
-               mtk_iommu_tlb_flush_all(data);
+               mtk_iommu_v1_tlb_flush_all(data);
        }
        /* Clear the CPE status */
        writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
 }
 
-static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
 {
-       struct mtk_iommu_data *data = dev_id;
-       struct mtk_iommu_domain *dom = data->m4u_dom;
+       struct mtk_iommu_v1_data *data = dev_id;
+       struct mtk_iommu_v1_domain *dom = data->m4u_dom;
        u32 int_state, regval, fault_iova, fault_pa;
        unsigned int fault_larb, fault_port;
 
@@ -189,13 +224,13 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
        regval |= F_INT_CLR_BIT;
        writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
 
-       mtk_iommu_tlb_flush_all(data);
+       mtk_iommu_v1_tlb_flush_all(data);
 
        return IRQ_HANDLED;
 }
 
-static void mtk_iommu_config(struct mtk_iommu_data *data,
-                            struct device *dev, bool enable)
+static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
+                               struct device *dev, bool enable)
 {
        struct mtk_smi_larb_iommu    *larb_mmu;
        unsigned int                 larbid, portid;
@@ -217,9 +252,9 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
        }
 }
 
-static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
 {
-       struct mtk_iommu_domain *dom = data->m4u_dom;
+       struct mtk_iommu_v1_domain *dom = data->m4u_dom;
 
        spin_lock_init(&dom->pgtlock);
 
@@ -235,9 +270,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
        return 0;
 }
 
-static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
 {
-       struct mtk_iommu_domain *dom;
+       struct mtk_iommu_v1_domain *dom;
 
        if (type != IOMMU_DOMAIN_UNMANAGED)
                return NULL;
@@ -249,21 +284,20 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
        return &dom->domain;
 }
 
-static void mtk_iommu_domain_free(struct iommu_domain *domain)
+static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
 {
-       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-       struct mtk_iommu_data *data = dom->data;
+       struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
+       struct mtk_iommu_v1_data *data = dom->data;
 
        dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
                        dom->pgt_va, dom->pgt_pa);
        kfree(to_mtk_domain(domain));
 }
 
-static int mtk_iommu_attach_device(struct iommu_domain *domain,
-                                  struct device *dev)
+static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
 {
-       struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
-       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+       struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
+       struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
        struct dma_iommu_mapping *mtk_mapping;
        int ret;
 
@@ -274,29 +308,28 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
 
        if (!data->m4u_dom) {
                data->m4u_dom = dom;
-               ret = mtk_iommu_domain_finalise(data);
+               ret = mtk_iommu_v1_domain_finalise(data);
                if (ret) {
                        data->m4u_dom = NULL;
                        return ret;
                }
        }
 
-       mtk_iommu_config(data, dev, true);
+       mtk_iommu_v1_config(data, dev, true);
        return 0;
 }
 
-static void mtk_iommu_detach_device(struct iommu_domain *domain,
-                                   struct device *dev)
+static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev)
 {
-       struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+       struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
 
-       mtk_iommu_config(data, dev, false);
+       mtk_iommu_v1_config(data, dev, false);
 }
 
-static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
-                        phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
+                           phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
-       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+       struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
        unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
        unsigned long flags;
        unsigned int i;
@@ -317,16 +350,15 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
 
        spin_unlock_irqrestore(&dom->pgtlock, flags);
 
-       mtk_iommu_tlb_flush_range(dom->data, iova, size);
+       mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
 
        return map_size == size ? 0 : -EEXIST;
 }
 
-static size_t mtk_iommu_unmap(struct iommu_domain *domain,
-                             unsigned long iova, size_t size,
-                             struct iommu_iotlb_gather *gather)
+static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
+                                size_t size, struct iommu_iotlb_gather *gather)
 {
-       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+       struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
        unsigned long flags;
        u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
        unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
@@ -335,15 +367,14 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
        memset(pgt_base_iova, 0, page_num * sizeof(u32));
        spin_unlock_irqrestore(&dom->pgtlock, flags);
 
-       mtk_iommu_tlb_flush_range(dom->data, iova, size);
+       mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
 
        return size;
 }
 
-static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
-                                         dma_addr_t iova)
+static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
 {
-       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+       struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
        unsigned long flags;
        phys_addr_t pa;
 
@@ -355,17 +386,16 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
        return pa;
 }
 
-static const struct iommu_ops mtk_iommu_ops;
+static const struct iommu_ops mtk_iommu_v1_ops;
 
 /*
  * MTK generation one iommu HW only support one iommu domain, and all the client
  * sharing the same iova address space.
  */
-static int mtk_iommu_create_mapping(struct device *dev,
-                                   struct of_phandle_args *args)
+static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct mtk_iommu_data *data;
+       struct mtk_iommu_v1_data *data;
        struct platform_device *m4updev;
        struct dma_iommu_mapping *mtk_mapping;
        int ret;
@@ -377,11 +407,11 @@ static int mtk_iommu_create_mapping(struct device *dev,
        }
 
        if (!fwspec) {
-               ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
+               ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops);
                if (ret)
                        return ret;
                fwspec = dev_iommu_fwspec_get(dev);
-       } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) {
+       } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) {
                return -EINVAL;
        }
 
@@ -413,16 +443,16 @@ static int mtk_iommu_create_mapping(struct device *dev,
        return 0;
 }
 
-static int mtk_iommu_def_domain_type(struct device *dev)
+static int mtk_iommu_v1_def_domain_type(struct device *dev)
 {
        return IOMMU_DOMAIN_UNMANAGED;
 }
 
-static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
+static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
        struct of_phandle_args iommu_spec;
-       struct mtk_iommu_data *data;
+       struct mtk_iommu_v1_data *data;
        int err, idx = 0, larbid, larbidx;
        struct device_link *link;
        struct device *larbdev;
@@ -440,7 +470,7 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
                                           "#iommu-cells",
                                           idx, &iommu_spec)) {
 
-               err = mtk_iommu_create_mapping(dev, &iommu_spec);
+               err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
                of_node_put(iommu_spec.np);
                if (err)
                        return ERR_PTR(err);
@@ -450,13 +480,16 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
                idx++;
        }
 
-       if (!fwspec || fwspec->ops != &mtk_iommu_ops)
+       if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
                return ERR_PTR(-ENODEV); /* Not a iommu client device */
 
        data = dev_iommu_priv_get(dev);
 
        /* Link the consumer device with the smi-larb device(supplier) */
        larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+       if (larbid >= MT2701_LARB_NR_MAX)
+               return ERR_PTR(-EINVAL);
+
        for (idx = 1; idx < fwspec->num_ids; idx++) {
                larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
                if (larbid != larbidx) {
@@ -467,6 +500,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
        }
 
        larbdev = data->larb_imu[larbid].dev;
+       if (!larbdev)
+               return ERR_PTR(-EINVAL);
+
        link = device_link_add(dev, larbdev,
                               DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
        if (!link)
@@ -475,10 +511,10 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
        return &data->iommu;
 }
 
-static void mtk_iommu_probe_finalize(struct device *dev)
+static void mtk_iommu_v1_probe_finalize(struct device *dev)
 {
        struct dma_iommu_mapping *mtk_mapping;
-       struct mtk_iommu_data *data;
+       struct mtk_iommu_v1_data *data;
        int err;
 
        data        = dev_iommu_priv_get(dev);
@@ -489,14 +525,14 @@ static void mtk_iommu_probe_finalize(struct device *dev)
                dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
 }
 
-static void mtk_iommu_release_device(struct device *dev)
+static void mtk_iommu_v1_release_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct mtk_iommu_data *data;
+       struct mtk_iommu_v1_data *data;
        struct device *larbdev;
        unsigned int larbid;
 
-       if (!fwspec || fwspec->ops != &mtk_iommu_ops)
+       if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
                return;
 
        data = dev_iommu_priv_get(dev);
@@ -507,7 +543,7 @@ static void mtk_iommu_release_device(struct device *dev)
        iommu_fwspec_free(dev);
 }
 
-static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
 {
        u32 regval;
        int ret;
@@ -537,7 +573,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
 
        writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
 
-       if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+       if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
                             dev_name(data->dev), (void *)data)) {
                writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
                clk_disable_unprepare(data->bclk);
@@ -548,39 +584,39 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
        return 0;
 }
 
-static const struct iommu_ops mtk_iommu_ops = {
-       .domain_alloc   = mtk_iommu_domain_alloc,
-       .probe_device   = mtk_iommu_probe_device,
-       .probe_finalize = mtk_iommu_probe_finalize,
-       .release_device = mtk_iommu_release_device,
-       .def_domain_type = mtk_iommu_def_domain_type,
+static const struct iommu_ops mtk_iommu_v1_ops = {
+       .domain_alloc   = mtk_iommu_v1_domain_alloc,
+       .probe_device   = mtk_iommu_v1_probe_device,
+       .probe_finalize = mtk_iommu_v1_probe_finalize,
+       .release_device = mtk_iommu_v1_release_device,
+       .def_domain_type = mtk_iommu_v1_def_domain_type,
        .device_group   = generic_device_group,
        .pgsize_bitmap  = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
        .owner          = THIS_MODULE,
        .default_domain_ops = &(const struct iommu_domain_ops) {
-               .attach_dev     = mtk_iommu_attach_device,
-               .detach_dev     = mtk_iommu_detach_device,
-               .map            = mtk_iommu_map,
-               .unmap          = mtk_iommu_unmap,
-               .iova_to_phys   = mtk_iommu_iova_to_phys,
-               .free           = mtk_iommu_domain_free,
+               .attach_dev     = mtk_iommu_v1_attach_device,
+               .detach_dev     = mtk_iommu_v1_detach_device,
+               .map            = mtk_iommu_v1_map,
+               .unmap          = mtk_iommu_v1_unmap,
+               .iova_to_phys   = mtk_iommu_v1_iova_to_phys,
+               .free           = mtk_iommu_v1_domain_free,
        }
 };
 
-static const struct of_device_id mtk_iommu_of_ids[] = {
+static const struct of_device_id mtk_iommu_v1_of_ids[] = {
        { .compatible = "mediatek,mt2701-m4u", },
        {}
 };
 
-static const struct component_master_ops mtk_iommu_com_ops = {
-       .bind           = mtk_iommu_bind,
-       .unbind         = mtk_iommu_unbind,
+static const struct component_master_ops mtk_iommu_v1_com_ops = {
+       .bind           = mtk_iommu_v1_bind,
+       .unbind         = mtk_iommu_v1_unbind,
 };
 
-static int mtk_iommu_probe(struct platform_device *pdev)
+static int mtk_iommu_v1_probe(struct platform_device *pdev)
 {
-       struct mtk_iommu_data           *data;
        struct device                   *dev = &pdev->dev;
+       struct mtk_iommu_v1_data        *data;
        struct resource                 *res;
        struct component_match          *match = NULL;
        void                            *protect;
@@ -647,7 +683,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, data);
 
-       ret = mtk_iommu_hw_init(data);
+       ret = mtk_iommu_v1_hw_init(data);
        if (ret)
                return ret;
 
@@ -656,17 +692,17 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
+       ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
        if (ret)
                goto out_sysfs_remove;
 
        if (!iommu_present(&platform_bus_type)) {
-               ret = bus_set_iommu(&platform_bus_type,  &mtk_iommu_ops);
+               ret = bus_set_iommu(&platform_bus_type,  &mtk_iommu_v1_ops);
                if (ret)
                        goto out_dev_unreg;
        }
 
-       ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+       ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
        if (ret)
                goto out_bus_set_null;
        return ret;
@@ -680,9 +716,9 @@ out_sysfs_remove:
        return ret;
 }
 
-static int mtk_iommu_remove(struct platform_device *pdev)
+static int mtk_iommu_v1_remove(struct platform_device *pdev)
 {
-       struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+       struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
 
        iommu_device_sysfs_remove(&data->iommu);
        iommu_device_unregister(&data->iommu);
@@ -692,14 +728,14 @@ static int mtk_iommu_remove(struct platform_device *pdev)
 
        clk_disable_unprepare(data->bclk);
        devm_free_irq(&pdev->dev, data->irq, data);
-       component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+       component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
        return 0;
 }
 
-static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
 {
-       struct mtk_iommu_data *data = dev_get_drvdata(dev);
-       struct mtk_iommu_suspend_reg *reg = &data->reg;
+       struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+       struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
        void __iomem *base = data->base;
 
        reg->standard_axi_mode = readl_relaxed(base +
@@ -710,10 +746,10 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused mtk_iommu_resume(struct device *dev)
+static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
 {
-       struct mtk_iommu_data *data = dev_get_drvdata(dev);
-       struct mtk_iommu_suspend_reg *reg = &data->reg;
+       struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
+       struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
        void __iomem *base = data->base;
 
        writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
@@ -726,20 +762,20 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
        return 0;
 }
 
-static const struct dev_pm_ops mtk_iommu_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
 };
 
-static struct platform_driver mtk_iommu_driver = {
-       .probe  = mtk_iommu_probe,
-       .remove = mtk_iommu_remove,
+static struct platform_driver mtk_iommu_v1_driver = {
+       .probe  = mtk_iommu_v1_probe,
+       .remove = mtk_iommu_v1_remove,
        .driver = {
                .name = "mtk-iommu-v1",
-               .of_match_table = mtk_iommu_of_ids,
-               .pm = &mtk_iommu_pm_ops,
+               .of_match_table = mtk_iommu_v1_of_ids,
+               .pm = &mtk_iommu_v1_pm_ops,
        }
 };
-module_platform_driver(mtk_iommu_driver);
+module_platform_driver(mtk_iommu_v1_driver);
 
 MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
 MODULE_LICENSE("GPL v2");
index 3833e86..c898bcb 100644 (file)
@@ -99,7 +99,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        if (!domain_device)
                return -ENOMEM;
 
-       if (zdev->dma_table) {
+       if (zdev->dma_table && !zdev->s390_domain) {
                cc = zpci_dma_exit_device(zdev);
                if (cc) {
                        rc = -EIO;
@@ -107,6 +107,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
                }
        }
 
+       if (zdev->s390_domain)
+               zpci_unregister_ioat(zdev, 0);
+
        zdev->dma_table = s390_domain->dma_table;
        cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
                                virt_to_phys(zdev->dma_table));
@@ -136,7 +139,13 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        return 0;
 
 out_restore:
-       zpci_dma_init_device(zdev);
+       if (!zdev->s390_domain) {
+               zpci_dma_init_device(zdev);
+       } else {
+               zdev->dma_table = zdev->s390_domain->dma_table;
+               zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+                                  virt_to_phys(zdev->dma_table));
+       }
 out_free:
        kfree(domain_device);
 
@@ -167,7 +176,7 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
        }
        spin_unlock_irqrestore(&s390_domain->list_lock, flags);
 
-       if (found) {
+       if (found && (zdev->s390_domain == s390_domain)) {
                zdev->s390_domain = NULL;
                zpci_unregister_ioat(zdev, 0);
                zpci_dma_init_device(zdev);
index 44fb884..4ab1038 100644 (file)
@@ -557,7 +557,7 @@ config LOONGSON_LIOINTC
 
 config LOONGSON_HTPIC
        bool "Loongson3 HyperTransport PIC Controller"
-       depends on MACH_LOONGSON64
+       depends on MACH_LOONGSON64 && MIPS
        default y
        select IRQ_DOMAIN
        select GENERIC_IRQ_CHIP
@@ -565,12 +565,12 @@ config LOONGSON_HTPIC
          Support for the Loongson-3 HyperTransport PIC Controller.
 
 config LOONGSON_HTVEC
-       bool "Loongson3 HyperTransport Interrupt Vector Controller"
+       bool "Loongson HyperTransport Interrupt Vector Controller"
        depends on MACH_LOONGSON64
        default MACH_LOONGSON64
        select IRQ_DOMAIN_HIERARCHY
        help
-         Support for the Loongson3 HyperTransport Interrupt Vector Controller.
+         Support for the Loongson HyperTransport Interrupt Vector Controller.
 
 config LOONGSON_PCH_PIC
        bool "Loongson PCH PIC Controller"
index 649c583..aed8885 100644 (file)
 #include <linux/smp.h>
 #include <linux/irqchip/chained_irq.h>
 
+#ifdef CONFIG_MIPS
 #include <loongson.h>
+#else
+#include <asm/loongson.h>
+#endif
 
 #define LIOINTC_CHIP_IRQ       32
 #define LIOINTC_NUM_PARENT 4
@@ -53,7 +57,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc)
        struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct irq_chip_generic *gc = handler->priv->gc;
-       int core = get_ebase_cpunum() % LIOINTC_NUM_CORES;
+       int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES;
        u32 pending;
 
        chained_irq_enter(chip, desc);
index 6090e64..a49979f 100644 (file)
@@ -869,6 +869,9 @@ source "drivers/leds/blink/Kconfig"
 comment "Flash and Torch LED drivers"
 source "drivers/leds/flash/Kconfig"
 
+comment "RGB LED drivers"
+source "drivers/leds/rgb/Kconfig"
+
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
index e58ecb3..4fd2f92 100644 (file)
@@ -99,6 +99,9 @@ obj-$(CONFIG_LEDS_USER)                       += uleds.o
 # Flash and Torch LED Drivers
 obj-$(CONFIG_LEDS_CLASS_FLASH)         += flash/
 
+# RGB LED Drivers
+obj-$(CONFIG_LEDS_CLASS_MULTICOLOR)    += rgb/
+
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGERS)            += trigger/
 
index ed1f20a..670f3bf 100644 (file)
@@ -279,17 +279,12 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
 
        led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
        ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
-       if (ret) {
-               dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
-               return ret;
-       }
+       if (ret)
+               return dev_err_probe(dev, ret, "cannot get ctrl-gpios\n");
 
-       led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
-       ret = PTR_ERR_OR_ZERO(led->aux_gpio);
-       if (ret) {
-               dev_err(dev, "cannot get aux-gpios %d\n", ret);
-               return ret;
-       }
+       led->aux_gpio = devm_gpiod_get_optional(dev, "aux", GPIOD_ASIS);
+       if (IS_ERR(led->aux_gpio))
+               return dev_err_probe(dev, PTR_ERR(led->aux_gpio), "cannot get aux-gpios\n");
 
        led->regulator = devm_regulator_get(dev, "vin");
        if (IS_ERR(led->regulator))
index 22c092a..fc63fce 100644 (file)
@@ -460,8 +460,14 @@ static int is31fl32xx_probe(struct i2c_client *client,
 static int is31fl32xx_remove(struct i2c_client *client)
 {
        struct is31fl32xx_priv *priv = i2c_get_clientdata(client);
+       int ret;
 
-       return is31fl32xx_reset_regs(priv);
+       ret = is31fl32xx_reset_regs(priv);
+       if (ret)
+               dev_err(&client->dev, "Failed to reset registers on removal (%pe)\n",
+                       ERR_PTR(ret));
+
+       return 0;
 }
 
 /*
index 42dc46e..9aa3fcc 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/device.h>
 #include <linux/leds.h>
 
-#include <mach/hardware.h>
 #include <asm/hardware/locomo.h>
 
 static void locomoled_brightness_set(struct led_classdev *led_cdev,
index 50b195f..e129dcc 100644 (file)
@@ -569,10 +569,8 @@ static int lp50xx_remove(struct i2c_client *client)
        int ret;
 
        ret = lp50xx_enable_disable(led, 0);
-       if (ret) {
+       if (ret)
                dev_err(led->dev, "Failed to disable chip\n");
-               return ret;
-       }
 
        if (led->regulator) {
                ret = regulator_disable(led->regulator);
index 017794b..f72b5d1 100644 (file)
@@ -318,13 +318,10 @@ static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
 }
 #endif /* CONFIG_LEDS_PCA9532_GPIO */
 
-static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
+static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
 {
        int i = n_devs;
 
-       if (!data)
-               return -EINVAL;
-
        while (--i >= 0) {
                switch (data->leds[i].type) {
                case PCA9532_TYPE_NONE:
@@ -346,8 +343,6 @@ static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
        if (data->gpio.parent)
                gpiochip_remove(&data->gpio);
 #endif
-
-       return 0;
 }
 
 static int pca9532_configure(struct i2c_client *client,
@@ -555,7 +550,9 @@ static int pca9532_remove(struct i2c_client *client)
 {
        struct pca9532_data *data = i2c_get_clientdata(client);
 
-       return pca9532_destroy_devices(data, data->chip_info->num_leds);
+       pca9532_destroy_devices(data, data->chip_info->num_leds);
+
+       return 0;
 }
 
 module_i2c_driver(pca9532_driver);
index 208c989..8a8b73b 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/leds.h>
@@ -123,34 +124,37 @@ static int regulator_led_probe(struct platform_device *pdev)
 {
        struct led_regulator_platform_data *pdata =
                        dev_get_platdata(&pdev->dev);
+       struct device *dev = &pdev->dev;
+       struct led_init_data init_data = {};
        struct regulator_led *led;
        struct regulator *vcc;
        int ret = 0;
 
-       if (pdata == NULL) {
-               dev_err(&pdev->dev, "no platform data\n");
-               return -ENODEV;
-       }
-
-       vcc = devm_regulator_get_exclusive(&pdev->dev, "vled");
+       vcc = devm_regulator_get_exclusive(dev, "vled");
        if (IS_ERR(vcc)) {
-               dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
+               dev_err(dev, "Cannot get vcc\n");
                return PTR_ERR(vcc);
        }
 
-       led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+       led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
        if (led == NULL)
                return -ENOMEM;
 
+       init_data.fwnode = dev->fwnode;
+
        led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
-       if (pdata->brightness > led->cdev.max_brightness) {
-               dev_err(&pdev->dev, "Invalid default brightness %d\n",
+       /* Legacy platform data label assignment */
+       if (pdata) {
+               if (pdata->brightness > led->cdev.max_brightness) {
+                       dev_err(dev, "Invalid default brightness %d\n",
                                pdata->brightness);
-               return -EINVAL;
+                       return -EINVAL;
+               }
+               led->cdev.brightness = pdata->brightness;
+               init_data.default_label = pdata->name;
        }
 
        led->cdev.brightness_set_blocking = regulator_led_brightness_set;
-       led->cdev.name = pdata->name;
        led->cdev.flags |= LED_CORE_SUSPENDRESUME;
        led->vcc = vcc;
 
@@ -162,16 +166,10 @@ static int regulator_led_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, led);
 
-       ret = led_classdev_register(&pdev->dev, &led->cdev);
+       ret = led_classdev_register_ext(dev, &led->cdev, &init_data);
        if (ret < 0)
                return ret;
 
-       /* to expose the default value to userspace */
-       led->cdev.brightness = pdata->brightness;
-
-       /* Set the default led status */
-       regulator_led_brightness_set(&led->cdev, led->cdev.brightness);
-
        return 0;
 }
 
@@ -184,10 +182,17 @@ static int regulator_led_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id regulator_led_of_match[] = {
+       { .compatible = "regulator-led", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, regulator_led_of_match);
+
 static struct platform_driver regulator_led_driver = {
        .driver = {
-                  .name  = "leds-regulator",
-                  },
+               .name  = "leds-regulator",
+               .of_match_table = regulator_led_of_match,
+       },
        .probe  = regulator_led_probe,
        .remove = regulator_led_remove,
 };
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
new file mode 100644 (file)
index 0000000..204cf47
--- /dev/null
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if LEDS_CLASS_MULTICOLOR
+
+config LEDS_PWM_MULTICOLOR
+       tristate "PWM driven multi-color LED Support"
+       depends on PWM
+       help
+         This option enables support for PWM driven monochrome LEDs that are
+         grouped into multicolor LEDs.
+
+         To compile this driver as a module, choose M here: the module
+         will be called leds-pwm-multicolor.
+
+config LEDS_QCOM_LPG
+       tristate "LED support for Qualcomm LPG"
+       depends on OF
+       depends on PWM
+       depends on SPMI
+       help
+         This option enables support for the Light Pulse Generator found in a
+         wide variety of Qualcomm PMICs. The LPG consists of a number of PWM
+         channels and typically a shared pattern lookup table and a current
+         sink, intended to drive RGB LEDs. Each channel can either be used as
+         a LED, grouped to represent a RGB LED or exposed as PWM channels.
+
+         If compiled as a module, the module will be named leds-qcom-lpg.
+
+endif # LEDS_CLASS_MULTICOLOR
diff --git a/drivers/leds/rgb/Makefile b/drivers/leds/rgb/Makefile
new file mode 100644 (file)
index 0000000..0675bc0
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_LEDS_PWM_MULTICOLOR)      += leds-pwm-multicolor.o
+obj-$(CONFIG_LEDS_QCOM_LPG)            += leds-qcom-lpg.o
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
new file mode 100644 (file)
index 0000000..45e3870
--- /dev/null
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PWM-based multi-color LED control
+ *
+ * Copyright 2022 Sven Schwermer <sven.schwermer@disruptive-technologies.com>
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+
+struct pwm_led {
+       struct pwm_device *pwm;
+       struct pwm_state state;
+};
+
+struct pwm_mc_led {
+       struct led_classdev_mc mc_cdev;
+       struct mutex lock;
+       struct pwm_led leds[];
+};
+
+static int led_pwm_mc_set(struct led_classdev *cdev,
+                         enum led_brightness brightness)
+{
+       struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+       struct pwm_mc_led *priv = container_of(mc_cdev, struct pwm_mc_led, mc_cdev);
+       unsigned long long duty;
+       int ret = 0;
+       int i;
+
+       led_mc_calc_color_components(mc_cdev, brightness);
+
+       mutex_lock(&priv->lock);
+
+       for (i = 0; i < mc_cdev->num_colors; i++) {
+               duty = priv->leds[i].state.period;
+               duty *= mc_cdev->subled_info[i].brightness;
+               do_div(duty, cdev->max_brightness);
+
+               priv->leds[i].state.duty_cycle = duty;
+               priv->leds[i].state.enabled = duty > 0;
+               ret = pwm_apply_state(priv->leds[i].pwm,
+                                     &priv->leds[i].state);
+               if (ret)
+                       break;
+       }
+
+       mutex_unlock(&priv->lock);
+
+       return ret;
+}
+
+static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv,
+                          struct fwnode_handle *mcnode)
+{
+       struct mc_subled *subled = priv->mc_cdev.subled_info;
+       struct fwnode_handle *fwnode;
+       struct pwm_led *pwmled;
+       u32 color;
+       int ret;
+
+       /* iterate over the nodes inside the multi-led node */
+       fwnode_for_each_child_node(mcnode, fwnode) {
+               pwmled = &priv->leds[priv->mc_cdev.num_colors];
+               pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
+               if (IS_ERR(pwmled->pwm)) {
+                       ret = PTR_ERR(pwmled->pwm);
+                       dev_err(dev, "unable to request PWM: %d\n", ret);
+                       goto release_fwnode;
+               }
+               pwm_init_state(pwmled->pwm, &pwmled->state);
+
+               ret = fwnode_property_read_u32(fwnode, "color", &color);
+               if (ret) {
+                       dev_err(dev, "cannot read color: %d\n", ret);
+                       goto release_fwnode;
+               }
+
+               subled[priv->mc_cdev.num_colors].color_index = color;
+               priv->mc_cdev.num_colors++;
+       }
+
+       return 0;
+
+release_fwnode:
+       fwnode_handle_put(fwnode);
+       return ret;
+}
+
+static int led_pwm_mc_probe(struct platform_device *pdev)
+{
+       struct fwnode_handle *mcnode, *fwnode;
+       struct led_init_data init_data = {};
+       struct led_classdev *cdev;
+       struct mc_subled *subled;
+       struct pwm_mc_led *priv;
+       int count = 0;
+       int ret = 0;
+
+       mcnode = device_get_named_child_node(&pdev->dev, "multi-led");
+       if (!mcnode)
+               return dev_err_probe(&pdev->dev, -ENODEV,
+                                    "expected multi-led node\n");
+
+       /* count the nodes inside the multi-led node */
+       fwnode_for_each_child_node(mcnode, fwnode)
+               count++;
+
+       priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
+                           GFP_KERNEL);
+       if (!priv) {
+               ret = -ENOMEM;
+               goto release_mcnode;
+       }
+       mutex_init(&priv->lock);
+
+       subled = devm_kcalloc(&pdev->dev, count, sizeof(*subled), GFP_KERNEL);
+       if (!subled) {
+               ret = -ENOMEM;
+               goto release_mcnode;
+       }
+       priv->mc_cdev.subled_info = subled;
+
+       /* init the multicolor's LED class device */
+       cdev = &priv->mc_cdev.led_cdev;
+       fwnode_property_read_u32(mcnode, "max-brightness",
+                                &cdev->max_brightness);
+       cdev->flags = LED_CORE_SUSPENDRESUME;
+       cdev->brightness_set_blocking = led_pwm_mc_set;
+
+       ret = iterate_subleds(&pdev->dev, priv, mcnode);
+       if (ret)
+               goto release_mcnode;
+
+       init_data.fwnode = mcnode;
+       ret = devm_led_classdev_multicolor_register_ext(&pdev->dev,
+                                                       &priv->mc_cdev,
+                                                       &init_data);
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "failed to register multicolor PWM led for %s: %d\n",
+                       cdev->name, ret);
+               goto release_mcnode;
+       }
+
+       ret = led_pwm_mc_set(cdev, cdev->brightness);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "failed to set led PWM value for %s: %d",
+                                    cdev->name, ret);
+
+       platform_set_drvdata(pdev, priv);
+       return 0;
+
+release_mcnode:
+       fwnode_handle_put(mcnode);
+       return ret;
+}
+
+static const struct of_device_id of_pwm_leds_mc_match[] = {
+       { .compatible = "pwm-leds-multicolor", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, of_pwm_leds_mc_match);
+
+static struct platform_driver led_pwm_mc_driver = {
+       .probe          = led_pwm_mc_probe,
+       .driver         = {
+               .name   = "leds_pwm_multicolor",
+               .of_match_table = of_pwm_leds_mc_match,
+       },
+};
+module_platform_driver(led_pwm_mc_driver);
+
+MODULE_AUTHOR("Sven Schwermer <sven.schwermer@disruptive-technologies.com>");
+MODULE_DESCRIPTION("multi-color PWM LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds-pwm-multicolor");
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
new file mode 100644 (file)
index 0000000..02f51cc
--- /dev/null
@@ -0,0 +1,1451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2022 Linaro Ltd
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ */
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define LPG_SUBTYPE_REG                0x05
+#define  LPG_SUBTYPE_LPG       0x2
+#define  LPG_SUBTYPE_PWM       0xb
+#define  LPG_SUBTYPE_LPG_LITE  0x11
+#define LPG_PATTERN_CONFIG_REG 0x40
+#define LPG_SIZE_CLK_REG       0x41
+#define  PWM_CLK_SELECT_MASK   GENMASK(1, 0)
+#define LPG_PREDIV_CLK_REG     0x42
+#define  PWM_FREQ_PRE_DIV_MASK GENMASK(6, 5)
+#define  PWM_FREQ_EXP_MASK     GENMASK(2, 0)
+#define PWM_TYPE_CONFIG_REG    0x43
+#define PWM_VALUE_REG          0x44
+#define PWM_ENABLE_CONTROL_REG 0x46
+#define PWM_SYNC_REG           0x47
+#define LPG_RAMP_DURATION_REG  0x50
+#define LPG_HI_PAUSE_REG       0x52
+#define LPG_LO_PAUSE_REG       0x54
+#define LPG_HI_IDX_REG         0x56
+#define LPG_LO_IDX_REG         0x57
+#define PWM_SEC_ACCESS_REG     0xd0
+#define PWM_DTEST_REG(x)       (0xe2 + (x) - 1)
+
+#define TRI_LED_SRC_SEL                0x45
+#define TRI_LED_EN_CTL         0x46
+#define TRI_LED_ATC_CTL                0x47
+
+#define LPG_LUT_REG(x)         (0x40 + (x) * 2)
+#define RAMP_CONTROL_REG       0xc8
+
+#define LPG_RESOLUTION         512
+#define LPG_MAX_M              7
+
+struct lpg_channel;
+struct lpg_data;
+
+/**
+ * struct lpg - LPG device context
+ * @dev:       pointer to LPG device
+ * @map:       regmap for register access
+ * @lock:      used to synchronize LED and pwm callback requests
+ * @pwm:       PWM-chip object, if operating in PWM mode
+ * @data:      reference to version specific data
+ * @lut_base:  base address of the LUT block (optional)
+ * @lut_size:  number of entries in the LUT block
+ * @lut_bitmap:        allocation bitmap for LUT entries
+ * @triled_base: base address of the TRILED block (optional)
+ * @triled_src:        power-source for the TRILED
+ * @triled_has_atc_ctl:        true if there is TRI_LED_ATC_CTL register
+ * @triled_has_src_sel:        true if there is TRI_LED_SRC_SEL register
+ * @channels:  list of PWM channels
+ * @num_channels: number of @channels
+ */
+struct lpg {
+       struct device *dev;
+       struct regmap *map;
+
+       struct mutex lock;
+
+       struct pwm_chip pwm;
+
+       const struct lpg_data *data;
+
+       u32 lut_base;
+       u32 lut_size;
+       unsigned long *lut_bitmap;
+
+       u32 triled_base;
+       u32 triled_src;
+       bool triled_has_atc_ctl;
+       bool triled_has_src_sel;
+
+       struct lpg_channel *channels;
+       unsigned int num_channels;
+};
+
+/**
+ * struct lpg_channel - per channel data
+ * @lpg:       reference to parent lpg
+ * @base:      base address of the PWM channel
+ * @triled_mask: mask in TRILED to enable this channel
+ * @lut_mask:  mask in LUT to start pattern generator for this channel
+ * @subtype:   PMIC hardware block subtype
+ * @in_use:    channel is exposed to LED framework
+ * @color:     color of the LED attached to this channel
+ * @dtest_line:        DTEST line for output, or 0 if disabled
+ * @dtest_value: DTEST line configuration
+ * @pwm_value: duty (in microseconds) of the generated pulses, overridden by LUT
+ * @enabled:   output enabled?
+ * @period:    period (in nanoseconds) of the generated pulses
+ * @clk_sel:   reference clock frequency selector
+ * @pre_div_sel: divider selector of the reference clock
+ * @pre_div_exp: exponential divider of the reference clock
+ * @ramp_enabled: duty cycle is driven by iterating over lookup table
+ * @ramp_ping_pong: reverse through pattern, rather than wrapping to start
+ * @ramp_oneshot: perform only a single pass over the pattern
+ * @ramp_reverse: iterate over pattern backwards
+ * @ramp_tick_ms: length (in milliseconds) of one step in the pattern
+ * @ramp_lo_pause_ms: pause (in milliseconds) before iterating over pattern
+ * @ramp_hi_pause_ms: pause (in milliseconds) after iterating over pattern
+ * @pattern_lo_idx: start index of associated pattern
+ * @pattern_hi_idx: last index of associated pattern
+ */
+struct lpg_channel {
+       struct lpg *lpg;
+
+       u32 base;
+       unsigned int triled_mask;
+       unsigned int lut_mask;
+       unsigned int subtype;
+
+       bool in_use;
+
+       int color;
+
+       u32 dtest_line;
+       u32 dtest_value;
+
+       u16 pwm_value;
+       bool enabled;
+
+       u64 period;
+       unsigned int clk_sel;
+       unsigned int pre_div_sel;
+       unsigned int pre_div_exp;
+
+       bool ramp_enabled;
+       bool ramp_ping_pong;
+       bool ramp_oneshot;
+       bool ramp_reverse;
+       unsigned short ramp_tick_ms;
+       unsigned long ramp_lo_pause_ms;
+       unsigned long ramp_hi_pause_ms;
+
+       unsigned int pattern_lo_idx;
+       unsigned int pattern_hi_idx;
+};
+
+/**
+ * struct lpg_led - logical LED object
+ * @lpg:               lpg context reference
+ * @cdev:              LED class device
+ * @mcdev:             Multicolor LED class device
+ * @num_channels:      number of @channels
+ * @channels:          list of channels associated with the LED
+ */
+struct lpg_led {
+       struct lpg *lpg;
+
+       struct led_classdev cdev;
+       struct led_classdev_mc mcdev;
+
+       unsigned int num_channels;
+       struct lpg_channel *channels[];
+};
+
+/**
+ * struct lpg_channel_data - per channel initialization data
+ * @base:              base address for PWM channel registers
+ * @triled_mask:       bitmask for controlling this channel in TRILED
+ */
+struct lpg_channel_data {
+       unsigned int base;
+       u8 triled_mask;
+};
+
+/**
+ * struct lpg_data - initialization data
+ * @lut_base:          base address of LUT block
+ * @lut_size:          number of entries in LUT
+ * @triled_base:       base address of TRILED
+ * @triled_has_atc_ctl:        true if there is TRI_LED_ATC_CTL register
+ * @triled_has_src_sel:        true if there is TRI_LED_SRC_SEL register
+ * @num_channels:      number of channels in LPG
+ * @channels:          list of channel initialization data
+ */
+struct lpg_data {
+       unsigned int lut_base;
+       unsigned int lut_size;
+       unsigned int triled_base;
+       bool triled_has_atc_ctl;
+       bool triled_has_src_sel;
+       int num_channels;
+       const struct lpg_channel_data *channels;
+};
+
+static int triled_set(struct lpg *lpg, unsigned int mask, unsigned int enable)
+{
+       /* Skip if we don't have a triled block */
+       if (!lpg->triled_base)
+               return 0;
+
+       return regmap_update_bits(lpg->map, lpg->triled_base + TRI_LED_EN_CTL,
+                                 mask, enable);
+}
+
+static int lpg_lut_store(struct lpg *lpg, struct led_pattern *pattern,
+                        size_t len, unsigned int *lo_idx, unsigned int *hi_idx)
+{
+       unsigned int idx;
+       u16 val;
+       int i;
+
+       idx = bitmap_find_next_zero_area(lpg->lut_bitmap, lpg->lut_size,
+                                        0, len, 0);
+       if (idx >= lpg->lut_size)
+               return -ENOMEM;
+
+       for (i = 0; i < len; i++) {
+               val = pattern[i].brightness;
+
+               regmap_bulk_write(lpg->map, lpg->lut_base + LPG_LUT_REG(idx + i),
+                                 &val, sizeof(val));
+       }
+
+       bitmap_set(lpg->lut_bitmap, idx, len);
+
+       *lo_idx = idx;
+       *hi_idx = idx + len - 1;
+
+       return 0;
+}
+
+static void lpg_lut_free(struct lpg *lpg, unsigned int lo_idx, unsigned int hi_idx)
+{
+       int len;
+
+       len = hi_idx - lo_idx + 1;
+       if (len == 1)
+               return;
+
+       bitmap_clear(lpg->lut_bitmap, lo_idx, len);
+}
+
+static int lpg_lut_sync(struct lpg *lpg, unsigned int mask)
+{
+       return regmap_write(lpg->map, lpg->lut_base + RAMP_CONTROL_REG, mask);
+}
+
+static const unsigned int lpg_clk_rates[] = {0, 1024, 32768, 19200000};
+static const unsigned int lpg_pre_divs[] = {1, 3, 5, 6};
+
+static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
+{
+       unsigned int clk_sel, best_clk = 0;
+       unsigned int div, best_div = 0;
+       unsigned int m, best_m = 0;
+       unsigned int error;
+       unsigned int best_err = UINT_MAX;
+       u64 best_period = 0;
+       u64 max_period;
+
+       /*
+        * The PWM period is determined by:
+        *
+        *          resolution * pre_div * 2^M
+        * period = --------------------------
+        *                   refclk
+        *
+        * With resolution fixed at 2^9 bits, pre_div = {1, 3, 5, 6} and
+        * M = [0..7].
+        *
+        * This allows for periods between 27uS and 384s, as the PWM framework
+        * wants a period of equal or lower length than requested, reject
+        * anything below 27uS.
+        */
+       if (period <= (u64)NSEC_PER_SEC * LPG_RESOLUTION / 19200000)
+               return -EINVAL;
+
+       /* Limit period to largest possible value, to avoid overflows */
+       max_period = (u64)NSEC_PER_SEC * LPG_RESOLUTION * 6 * (1 << LPG_MAX_M) / 1024;
+       if (period > max_period)
+               period = max_period;
+
+       /*
+        * Search for the pre_div, refclk and M by solving the rewritten formula
+        * for each refclk and pre_div value:
+        *
+        *                     period * refclk
+        * M = log2 -------------------------------------
+        *           NSEC_PER_SEC * pre_div * resolution
+        */
+       for (clk_sel = 1; clk_sel < ARRAY_SIZE(lpg_clk_rates); clk_sel++) {
+               u64 numerator = period * lpg_clk_rates[clk_sel];
+
+               for (div = 0; div < ARRAY_SIZE(lpg_pre_divs); div++) {
+                       u64 denominator = (u64)NSEC_PER_SEC * lpg_pre_divs[div] * LPG_RESOLUTION;
+                       u64 actual;
+                       u64 ratio;
+
+                       if (numerator < denominator)
+                               continue;
+
+                       ratio = div64_u64(numerator, denominator);
+                       m = ilog2(ratio);
+                       if (m > LPG_MAX_M)
+                               m = LPG_MAX_M;
+
+                       actual = DIV_ROUND_UP_ULL(denominator * (1 << m), lpg_clk_rates[clk_sel]);
+
+                       error = period - actual;
+                       if (error < best_err) {
+                               best_err = error;
+
+                               best_div = div;
+                               best_m = m;
+                               best_clk = clk_sel;
+                               best_period = actual;
+                       }
+               }
+       }
+
+       chan->clk_sel = best_clk;
+       chan->pre_div_sel = best_div;
+       chan->pre_div_exp = best_m;
+       chan->period = best_period;
+
+       return 0;
+}
+
+static void lpg_calc_duty(struct lpg_channel *chan, uint64_t duty)
+{
+       unsigned int max = LPG_RESOLUTION - 1;
+       unsigned int val;
+
+       val = div64_u64(duty * lpg_clk_rates[chan->clk_sel],
+                       (u64)NSEC_PER_SEC * lpg_pre_divs[chan->pre_div_sel] * (1 << chan->pre_div_exp));
+
+       chan->pwm_value = min(val, max);
+}
+
+static void lpg_apply_freq(struct lpg_channel *chan)
+{
+       unsigned long val;
+       struct lpg *lpg = chan->lpg;
+
+       if (!chan->enabled)
+               return;
+
+       val = chan->clk_sel;
+
+       /* Specify 9bit resolution, based on the subtype of the channel */
+       switch (chan->subtype) {
+       case LPG_SUBTYPE_LPG:
+               val |= GENMASK(5, 4);
+               break;
+       case LPG_SUBTYPE_PWM:
+               val |= BIT(2);
+               break;
+       case LPG_SUBTYPE_LPG_LITE:
+       default:
+               val |= BIT(4);
+               break;
+       }
+
+       regmap_write(lpg->map, chan->base + LPG_SIZE_CLK_REG, val);
+
+       val = FIELD_PREP(PWM_FREQ_PRE_DIV_MASK, chan->pre_div_sel) |
+             FIELD_PREP(PWM_FREQ_EXP_MASK, chan->pre_div_exp);
+       regmap_write(lpg->map, chan->base + LPG_PREDIV_CLK_REG, val);
+}
+
+#define LPG_ENABLE_GLITCH_REMOVAL      BIT(5)
+
+static void lpg_enable_glitch(struct lpg_channel *chan)
+{
+       struct lpg *lpg = chan->lpg;
+
+       regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG,
+                          LPG_ENABLE_GLITCH_REMOVAL, 0);
+}
+
+static void lpg_disable_glitch(struct lpg_channel *chan)
+{
+       struct lpg *lpg = chan->lpg;
+
+       regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG,
+                          LPG_ENABLE_GLITCH_REMOVAL,
+                          LPG_ENABLE_GLITCH_REMOVAL);
+}
+
+static void lpg_apply_pwm_value(struct lpg_channel *chan)
+{
+       struct lpg *lpg = chan->lpg;
+       u16 val = chan->pwm_value;
+
+       if (!chan->enabled)
+               return;
+
+       regmap_bulk_write(lpg->map, chan->base + PWM_VALUE_REG, &val, sizeof(val));
+}
+
+#define LPG_PATTERN_CONFIG_LO_TO_HI    BIT(4)
+#define LPG_PATTERN_CONFIG_REPEAT      BIT(3)
+#define LPG_PATTERN_CONFIG_TOGGLE      BIT(2)
+#define LPG_PATTERN_CONFIG_PAUSE_HI    BIT(1)
+#define LPG_PATTERN_CONFIG_PAUSE_LO    BIT(0)
+
+static void lpg_apply_lut_control(struct lpg_channel *chan)
+{
+       struct lpg *lpg = chan->lpg;
+       unsigned int hi_pause;
+       unsigned int lo_pause;
+       unsigned int conf = 0;
+       unsigned int lo_idx = chan->pattern_lo_idx;
+       unsigned int hi_idx = chan->pattern_hi_idx;
+       u16 step = chan->ramp_tick_ms;
+
+       if (!chan->ramp_enabled || chan->pattern_lo_idx == chan->pattern_hi_idx)
+               return;
+
+       hi_pause = DIV_ROUND_UP(chan->ramp_hi_pause_ms, step);
+       lo_pause = DIV_ROUND_UP(chan->ramp_lo_pause_ms, step);
+
+       if (!chan->ramp_reverse)
+               conf |= LPG_PATTERN_CONFIG_LO_TO_HI;
+       if (!chan->ramp_oneshot)
+               conf |= LPG_PATTERN_CONFIG_REPEAT;
+       if (chan->ramp_ping_pong)
+               conf |= LPG_PATTERN_CONFIG_TOGGLE;
+       if (chan->ramp_hi_pause_ms)
+               conf |= LPG_PATTERN_CONFIG_PAUSE_HI;
+       if (chan->ramp_lo_pause_ms)
+               conf |= LPG_PATTERN_CONFIG_PAUSE_LO;
+
+       regmap_write(lpg->map, chan->base + LPG_PATTERN_CONFIG_REG, conf);
+       regmap_write(lpg->map, chan->base + LPG_HI_IDX_REG, hi_idx);
+       regmap_write(lpg->map, chan->base + LPG_LO_IDX_REG, lo_idx);
+
+       regmap_bulk_write(lpg->map, chan->base + LPG_RAMP_DURATION_REG, &step, sizeof(step));
+       regmap_write(lpg->map, chan->base + LPG_HI_PAUSE_REG, hi_pause);
+       regmap_write(lpg->map, chan->base + LPG_LO_PAUSE_REG, lo_pause);
+}
+
+#define LPG_ENABLE_CONTROL_OUTPUT              BIT(7)
+#define LPG_ENABLE_CONTROL_BUFFER_TRISTATE     BIT(5)
+#define LPG_ENABLE_CONTROL_SRC_PWM             BIT(2)
+#define LPG_ENABLE_CONTROL_RAMP_GEN            BIT(1)
+
+static void lpg_apply_control(struct lpg_channel *chan)
+{
+       unsigned int ctrl;
+       struct lpg *lpg = chan->lpg;
+
+       ctrl = LPG_ENABLE_CONTROL_BUFFER_TRISTATE;
+
+       if (chan->enabled)
+               ctrl |= LPG_ENABLE_CONTROL_OUTPUT;
+
+       if (chan->pattern_lo_idx != chan->pattern_hi_idx)
+               ctrl |= LPG_ENABLE_CONTROL_RAMP_GEN;
+       else
+               ctrl |= LPG_ENABLE_CONTROL_SRC_PWM;
+
+       regmap_write(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, ctrl);
+
+       /*
+        * Due to LPG hardware bug, in the PWM mode, having enabled PWM,
+        * We have to write PWM values one more time.
+        */
+       if (chan->enabled)
+               lpg_apply_pwm_value(chan);
+}
+
+#define LPG_SYNC_PWM   BIT(0)
+
+static void lpg_apply_sync(struct lpg_channel *chan)
+{
+       struct lpg *lpg = chan->lpg;
+
+       regmap_write(lpg->map, chan->base + PWM_SYNC_REG, LPG_SYNC_PWM);
+}
+
+static int lpg_parse_dtest(struct lpg *lpg)
+{
+       struct lpg_channel *chan;
+       struct device_node *np = lpg->dev->of_node;
+       int count;
+       int ret;
+       int i;
+
+       count = of_property_count_u32_elems(np, "qcom,dtest");
+       if (count == -EINVAL) {
+               return 0;
+       } else if (count < 0) {
+               ret = count;
+               goto err_malformed;
+       } else if (count != lpg->data->num_channels * 2) {
+               dev_err(lpg->dev, "qcom,dtest needs to be %d items\n",
+                       lpg->data->num_channels * 2);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < lpg->data->num_channels; i++) {
+               chan = &lpg->channels[i];
+
+               ret = of_property_read_u32_index(np, "qcom,dtest", i * 2,
+                                                &chan->dtest_line);
+               if (ret)
+                       goto err_malformed;
+
+               ret = of_property_read_u32_index(np, "qcom,dtest", i * 2 + 1,
+                                                &chan->dtest_value);
+               if (ret)
+                       goto err_malformed;
+       }
+
+       return 0;
+
+err_malformed:
+       dev_err(lpg->dev, "malformed qcom,dtest\n");
+       return ret;
+}
+
+static void lpg_apply_dtest(struct lpg_channel *chan)
+{
+       struct lpg *lpg = chan->lpg;
+
+       if (!chan->dtest_line)
+               return;
+
+       regmap_write(lpg->map, chan->base + PWM_SEC_ACCESS_REG, 0xa5);
+       regmap_write(lpg->map, chan->base + PWM_DTEST_REG(chan->dtest_line),
+                    chan->dtest_value);
+}
+
+static void lpg_apply(struct lpg_channel *chan)
+{
+       lpg_disable_glitch(chan);
+       lpg_apply_freq(chan);
+       lpg_apply_pwm_value(chan);
+       lpg_apply_control(chan);
+       lpg_apply_sync(chan);
+       lpg_apply_lut_control(chan);
+       lpg_enable_glitch(chan);
+}
+
+static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
+                              struct mc_subled *subleds)
+{
+       enum led_brightness brightness;
+       struct lpg_channel *chan;
+       unsigned int triled_enabled = 0;
+       unsigned int triled_mask = 0;
+       unsigned int lut_mask = 0;
+       unsigned int duty;
+       struct lpg *lpg = led->lpg;
+       int i;
+
+       for (i = 0; i < led->num_channels; i++) {
+               chan = led->channels[i];
+               brightness = subleds[i].brightness;
+
+               if (brightness == LED_OFF) {
+                       chan->enabled = false;
+                       chan->ramp_enabled = false;
+               } else if (chan->pattern_lo_idx != chan->pattern_hi_idx) {
+                       lpg_calc_freq(chan, NSEC_PER_MSEC);
+
+                       chan->enabled = true;
+                       chan->ramp_enabled = true;
+
+                       lut_mask |= chan->lut_mask;
+                       triled_enabled |= chan->triled_mask;
+               } else {
+                       lpg_calc_freq(chan, NSEC_PER_MSEC);
+
+                       duty = div_u64(brightness * chan->period, cdev->max_brightness);
+                       lpg_calc_duty(chan, duty);
+                       chan->enabled = true;
+                       chan->ramp_enabled = false;
+
+                       triled_enabled |= chan->triled_mask;
+               }
+
+               triled_mask |= chan->triled_mask;
+
+               lpg_apply(chan);
+       }
+
+       /* Toggle triled lines */
+       if (triled_mask)
+               triled_set(lpg, triled_mask, triled_enabled);
+
+       /* Trigger start of ramp generator(s) */
+       if (lut_mask)
+               lpg_lut_sync(lpg, lut_mask);
+}
+
+static void lpg_brightness_single_set(struct led_classdev *cdev,
+                                     enum led_brightness value)
+{
+       struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+       struct mc_subled info;
+
+       mutex_lock(&led->lpg->lock);
+
+       info.brightness = value;
+       lpg_brightness_set(led, cdev, &info);
+
+       mutex_unlock(&led->lpg->lock);
+}
+
+static void lpg_brightness_mc_set(struct led_classdev *cdev,
+                                 enum led_brightness value)
+{
+       struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+       struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+
+       mutex_lock(&led->lpg->lock);
+
+       led_mc_calc_color_components(mc, value);
+       lpg_brightness_set(led, cdev, mc->subled_info);
+
+       mutex_unlock(&led->lpg->lock);
+}
+
+static int lpg_blink_set(struct lpg_led *led,
+                        unsigned long *delay_on, unsigned long *delay_off)
+{
+       struct lpg_channel *chan;
+       unsigned int period;
+       unsigned int triled_mask = 0;
+       struct lpg *lpg = led->lpg;
+       u64 duty;
+       int i;
+
+       if (!*delay_on && !*delay_off) {
+               *delay_on = 500;
+               *delay_off = 500;
+       }
+
+       duty = *delay_on * NSEC_PER_MSEC;
+       period = (*delay_on + *delay_off) * NSEC_PER_MSEC;
+
+       for (i = 0; i < led->num_channels; i++) {
+               chan = led->channels[i];
+
+               lpg_calc_freq(chan, period);
+               lpg_calc_duty(chan, duty);
+
+               chan->enabled = true;
+               chan->ramp_enabled = false;
+
+               triled_mask |= chan->triled_mask;
+
+               lpg_apply(chan);
+       }
+
+       /* Enable triled lines */
+       triled_set(lpg, triled_mask, triled_mask);
+
+       chan = led->channels[0];
+       duty = div_u64(chan->pwm_value * chan->period, LPG_RESOLUTION);
+       *delay_on = div_u64(duty, NSEC_PER_MSEC);
+       *delay_off = div_u64(chan->period - duty, NSEC_PER_MSEC);
+
+       return 0;
+}
+
+static int lpg_blink_single_set(struct led_classdev *cdev,
+                               unsigned long *delay_on, unsigned long *delay_off)
+{
+       struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+       int ret;
+
+       mutex_lock(&led->lpg->lock);
+
+       ret = lpg_blink_set(led, delay_on, delay_off);
+
+       mutex_unlock(&led->lpg->lock);
+
+       return ret;
+}
+
+static int lpg_blink_mc_set(struct led_classdev *cdev,
+                           unsigned long *delay_on, unsigned long *delay_off)
+{
+       struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+       struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+       int ret;
+
+       mutex_lock(&led->lpg->lock);
+
+       ret = lpg_blink_set(led, delay_on, delay_off);
+
+       mutex_unlock(&led->lpg->lock);
+
+       return ret;
+}
+
+static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern,
+                          u32 len, int repeat)
+{
+       struct lpg_channel *chan;
+       struct lpg *lpg = led->lpg;
+       struct led_pattern *pattern;
+       unsigned int brightness_a;
+       unsigned int brightness_b;
+       unsigned int actual_len;
+       unsigned int hi_pause;
+       unsigned int lo_pause;
+       unsigned int delta_t;
+       unsigned int lo_idx;
+       unsigned int hi_idx;
+       unsigned int i;
+       bool ping_pong = true;
+       int ret = -EINVAL;
+
+       /* Hardware only support oneshot or indefinite loops */
+       if (repeat != -1 && repeat != 1)
+               return -EINVAL;
+
+       /*
+        * The standardized leds-trigger-pattern format defines that the
+        * brightness of the LED follows a linear transition from one entry
+        * in the pattern to the next, over the given delta_t time. It
+        * describes that the way to perform instant transitions a zero-length
+        * entry should be added following a pattern entry.
+        *
+        * The LPG hardware is only able to perform the latter (no linear
+        * transitions), so require each entry in the pattern to be followed by
+        * a zero-length transition.
+        */
+       if (len % 2)
+               return -EINVAL;
+
+       pattern = kcalloc(len / 2, sizeof(*pattern), GFP_KERNEL);
+       if (!pattern)
+               return -ENOMEM;
+
+       for (i = 0; i < len; i += 2) {
+               if (led_pattern[i].brightness != led_pattern[i + 1].brightness)
+                       goto out_free_pattern;
+               if (led_pattern[i + 1].delta_t != 0)
+                       goto out_free_pattern;
+
+               pattern[i / 2].brightness = led_pattern[i].brightness;
+               pattern[i / 2].delta_t = led_pattern[i].delta_t;
+       }
+
+       len /= 2;
+
+       /*
+        * Specifying a pattern of length 1 causes the hardware to iterate
+        * through the entire LUT, so prohibit this.
+        */
+       if (len < 2)
+               goto out_free_pattern;
+
+       /*
+        * The LPG plays patterns with at a fixed pace, a "low pause" can be
+        * used to stretch the first delay of the pattern and a "high pause"
+        * the last one.
+        *
+        * In order to save space the pattern can be played in "ping pong"
+        * mode, in which the pattern is first played forward, then "high
+        * pause" is applied, then the pattern is played backwards and finally
+        * the "low pause" is applied.
+        *
+        * The middle elements of the pattern are used to determine delta_t and
+        * the "low pause" and "high pause" multipliers are derrived from this.
+        *
+        * The first element in the pattern is used to determine "low pause".
+        *
+        * If the specified pattern is a palindrome the ping pong mode is
+        * enabled. In this scenario the delta_t of the middle entry (i.e. the
+        * last in the programmed pattern) determines the "high pause".
+        */
+
+       /* Detect palindromes and use "ping pong" to reduce LUT usage */
+       for (i = 0; i < len / 2; i++) {
+               brightness_a = pattern[i].brightness;
+               brightness_b = pattern[len - i - 1].brightness;
+
+               if (brightness_a != brightness_b) {
+                       ping_pong = false;
+                       break;
+               }
+       }
+
+       /* The pattern length to be written to the LUT */
+       if (ping_pong)
+               actual_len = (len + 1) / 2;
+       else
+               actual_len = len;
+
+       /*
+        * Validate that all delta_t in the pattern are the same, with the
+        * exception of the middle element in case of ping_pong.
+        */
+       delta_t = pattern[1].delta_t;
+       for (i = 2; i < len; i++) {
+               if (pattern[i].delta_t != delta_t) {
+                       /*
+                        * Allow last entry in the full or shortened pattern to
+                        * specify hi pause. Reject other variations.
+                        */
+                       if (i != actual_len - 1)
+                               goto out_free_pattern;
+               }
+       }
+
+       /* LPG_RAMP_DURATION_REG is a 9bit */
+       if (delta_t >= BIT(9))
+               goto out_free_pattern;
+
+       /* Find "low pause" and "high pause" in the pattern */
+       lo_pause = pattern[0].delta_t;
+       hi_pause = pattern[actual_len - 1].delta_t;
+
+       mutex_lock(&lpg->lock);
+       ret = lpg_lut_store(lpg, pattern, actual_len, &lo_idx, &hi_idx);
+       if (ret < 0)
+               goto out_unlock;
+
+       for (i = 0; i < led->num_channels; i++) {
+               chan = led->channels[i];
+
+               chan->ramp_tick_ms = delta_t;
+               chan->ramp_ping_pong = ping_pong;
+               chan->ramp_oneshot = repeat != -1;
+
+               chan->ramp_lo_pause_ms = lo_pause;
+               chan->ramp_hi_pause_ms = hi_pause;
+
+               chan->pattern_lo_idx = lo_idx;
+               chan->pattern_hi_idx = hi_idx;
+       }
+
+out_unlock:
+       mutex_unlock(&lpg->lock);
+out_free_pattern:
+       kfree(pattern);
+
+       return ret;
+}
+
+static int lpg_pattern_single_set(struct led_classdev *cdev,
+                                 struct led_pattern *pattern, u32 len,
+                                 int repeat)
+{
+       struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+       int ret;
+
+       ret = lpg_pattern_set(led, pattern, len, repeat);
+       if (ret < 0)
+               return ret;
+
+       lpg_brightness_single_set(cdev, LED_FULL);
+
+       return 0;
+}
+
+static int lpg_pattern_mc_set(struct led_classdev *cdev,
+                             struct led_pattern *pattern, u32 len,
+                             int repeat)
+{
+       struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+       struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+       int ret;
+
+       ret = lpg_pattern_set(led, pattern, len, repeat);
+       if (ret < 0)
+               return ret;
+
+       led_mc_calc_color_components(mc, LED_FULL);
+       lpg_brightness_set(led, cdev, mc->subled_info);
+
+       return 0;
+}
+
+static int lpg_pattern_clear(struct lpg_led *led)
+{
+       struct lpg_channel *chan;
+       struct lpg *lpg = led->lpg;
+       int i;
+
+       mutex_lock(&lpg->lock);
+
+       chan = led->channels[0];
+       lpg_lut_free(lpg, chan->pattern_lo_idx, chan->pattern_hi_idx);
+
+       for (i = 0; i < led->num_channels; i++) {
+               chan = led->channels[i];
+               chan->pattern_lo_idx = 0;
+               chan->pattern_hi_idx = 0;
+       }
+
+       mutex_unlock(&lpg->lock);
+
+       return 0;
+}
+
+static int lpg_pattern_single_clear(struct led_classdev *cdev)
+{
+       struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+
+       return lpg_pattern_clear(led);
+}
+
+static int lpg_pattern_mc_clear(struct led_classdev *cdev)
+{
+       struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+       struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+
+       return lpg_pattern_clear(led);
+}
+
+static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct lpg *lpg = container_of(chip, struct lpg, pwm);
+       struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
+
+       return chan->in_use ? -EBUSY : 0;
+}
+
+/*
+ * Limitations:
+ * - Updating both duty and period is not done atomically, so the output signal
+ *   will momentarily be a mix of the settings.
+ * - Changed parameters takes effect immediately.
+ * - A disabled channel outputs a logical 0.
+ */
+static int lpg_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                        const struct pwm_state *state)
+{
+       struct lpg *lpg = container_of(chip, struct lpg, pwm);
+       struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
+       int ret = 0;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       mutex_lock(&lpg->lock);
+
+       if (state->enabled) {
+               ret = lpg_calc_freq(chan, state->period);
+               if (ret < 0)
+                       goto out_unlock;
+
+               lpg_calc_duty(chan, state->duty_cycle);
+       }
+       chan->enabled = state->enabled;
+
+       lpg_apply(chan);
+
+       triled_set(lpg, chan->triled_mask, chan->enabled ? chan->triled_mask : 0);
+
+out_unlock:
+       mutex_unlock(&lpg->lock);
+
+       return ret;
+}
+
+static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+                             struct pwm_state *state)
+{
+       struct lpg *lpg = container_of(chip, struct lpg, pwm);
+       struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
+       unsigned int pre_div;
+       unsigned int refclk;
+       unsigned int val;
+       unsigned int m;
+       u16 pwm_value;
+       int ret;
+
+       ret = regmap_read(lpg->map, chan->base + LPG_SIZE_CLK_REG, &val);
+       if (ret)
+               return;
+
+       refclk = lpg_clk_rates[val & PWM_CLK_SELECT_MASK];
+       if (refclk) {
+               ret = regmap_read(lpg->map, chan->base + LPG_PREDIV_CLK_REG, &val);
+               if (ret)
+                       return;
+
+               pre_div = lpg_pre_divs[FIELD_GET(PWM_FREQ_PRE_DIV_MASK, val)];
+               m = FIELD_GET(PWM_FREQ_EXP_MASK, val);
+
+               ret = regmap_bulk_read(lpg->map, chan->base + PWM_VALUE_REG, &pwm_value, sizeof(pwm_value));
+               if (ret)
+                       return;
+
+               state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * LPG_RESOLUTION * pre_div * (1 << m), refclk);
+               state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk);
+       } else {
+               state->period = 0;
+               state->duty_cycle = 0;
+       }
+
+       ret = regmap_read(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, &val);
+       if (ret)
+               return;
+
+       state->enabled = FIELD_GET(LPG_ENABLE_CONTROL_OUTPUT, val);
+       state->polarity = PWM_POLARITY_NORMAL;
+
+       if (state->duty_cycle > state->period)
+               state->duty_cycle = state->period;
+}
+
+static const struct pwm_ops lpg_pwm_ops = {
+       .request = lpg_pwm_request,
+       .apply = lpg_pwm_apply,
+       .get_state = lpg_pwm_get_state,
+       .owner = THIS_MODULE,
+};
+
+static int lpg_add_pwm(struct lpg *lpg)
+{
+       int ret;
+
+       lpg->pwm.base = -1;
+       lpg->pwm.dev = lpg->dev;
+       lpg->pwm.npwm = lpg->num_channels;
+       lpg->pwm.ops = &lpg_pwm_ops;
+
+       ret = pwmchip_add(&lpg->pwm);
+       if (ret)
+               dev_err(lpg->dev, "failed to add PWM chip: ret %d\n", ret);
+
+       return ret;
+}
+
+static int lpg_parse_channel(struct lpg *lpg, struct device_node *np,
+                            struct lpg_channel **channel)
+{
+       struct lpg_channel *chan;
+       u32 color = LED_COLOR_ID_GREEN;
+       u32 reg;
+       int ret;
+
+       ret = of_property_read_u32(np, "reg", &reg);
+       if (ret || !reg || reg > lpg->num_channels) {
+               dev_err(lpg->dev, "invalid \"reg\" of %pOFn\n", np);
+               return -EINVAL;
+       }
+
+       chan = &lpg->channels[reg - 1];
+       chan->in_use = true;
+
+       ret = of_property_read_u32(np, "color", &color);
+       if (ret < 0 && ret != -EINVAL) {
+               dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
+               return ret;
+       }
+
+       chan->color = color;
+
+       *channel = chan;
+
+       return 0;
+}
+
+static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+{
+       struct led_init_data init_data = {};
+       struct led_classdev *cdev;
+       struct device_node *child;
+       struct mc_subled *info;
+       struct lpg_led *led;
+       const char *state;
+       int num_channels;
+       u32 color = 0;
+       int ret;
+       int i;
+
+       ret = of_property_read_u32(np, "color", &color);
+       if (ret < 0 && ret != -EINVAL) {
+               dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
+               return ret;
+       }
+
+       if (color == LED_COLOR_ID_RGB)
+               num_channels = of_get_available_child_count(np);
+       else
+               num_channels = 1;
+
+       led = devm_kzalloc(lpg->dev, struct_size(led, channels, num_channels), GFP_KERNEL);
+       if (!led)
+               return -ENOMEM;
+
+       led->lpg = lpg;
+       led->num_channels = num_channels;
+
+       if (color == LED_COLOR_ID_RGB) {
+               info = devm_kcalloc(lpg->dev, num_channels, sizeof(*info), GFP_KERNEL);
+               if (!info)
+                       return -ENOMEM;
+               i = 0;
+               for_each_available_child_of_node(np, child) {
+                       ret = lpg_parse_channel(lpg, child, &led->channels[i]);
+                       if (ret < 0)
+                               return ret;
+
+                       info[i].color_index = led->channels[i]->color;
+                       info[i].intensity = 0;
+                       i++;
+               }
+
+               led->mcdev.subled_info = info;
+               led->mcdev.num_colors = num_channels;
+
+               cdev = &led->mcdev.led_cdev;
+               cdev->brightness_set = lpg_brightness_mc_set;
+               cdev->blink_set = lpg_blink_mc_set;
+
+               /* Register pattern accessors only if we have a LUT block */
+               if (lpg->lut_base) {
+                       cdev->pattern_set = lpg_pattern_mc_set;
+                       cdev->pattern_clear = lpg_pattern_mc_clear;
+               }
+       } else {
+               ret = lpg_parse_channel(lpg, np, &led->channels[0]);
+               if (ret < 0)
+                       return ret;
+
+               cdev = &led->cdev;
+               cdev->brightness_set = lpg_brightness_single_set;
+               cdev->blink_set = lpg_blink_single_set;
+
+               /* Register pattern accessors only if we have a LUT block */
+               if (lpg->lut_base) {
+                       cdev->pattern_set = lpg_pattern_single_set;
+                       cdev->pattern_clear = lpg_pattern_single_clear;
+               }
+       }
+
+       cdev->default_trigger = of_get_property(np, "linux,default-trigger", NULL);
+       cdev->max_brightness = LPG_RESOLUTION - 1;
+
+       if (!of_property_read_string(np, "default-state", &state) &&
+           !strcmp(state, "on"))
+               cdev->brightness = cdev->max_brightness;
+       else
+               cdev->brightness = LED_OFF;
+
+       cdev->brightness_set(cdev, cdev->brightness);
+
+       init_data.fwnode = of_fwnode_handle(np);
+
+       if (color == LED_COLOR_ID_RGB)
+               ret = devm_led_classdev_multicolor_register_ext(lpg->dev, &led->mcdev, &init_data);
+       else
+               ret = devm_led_classdev_register_ext(lpg->dev, &led->cdev, &init_data);
+       if (ret)
+               dev_err(lpg->dev, "unable to register %s\n", cdev->name);
+
+       return ret;
+}
+
+static int lpg_init_channels(struct lpg *lpg)
+{
+       const struct lpg_data *data = lpg->data;
+       struct lpg_channel *chan;
+       int i;
+
+       lpg->num_channels = data->num_channels;
+       lpg->channels = devm_kcalloc(lpg->dev, data->num_channels,
+                                    sizeof(struct lpg_channel), GFP_KERNEL);
+       if (!lpg->channels)
+               return -ENOMEM;
+
+       for (i = 0; i < data->num_channels; i++) {
+               chan = &lpg->channels[i];
+
+               chan->lpg = lpg;
+               chan->base = data->channels[i].base;
+               chan->triled_mask = data->channels[i].triled_mask;
+               chan->lut_mask = BIT(i);
+
+               regmap_read(lpg->map, chan->base + LPG_SUBTYPE_REG, &chan->subtype);
+       }
+
+       return 0;
+}
+
+static int lpg_init_triled(struct lpg *lpg)
+{
+       struct device_node *np = lpg->dev->of_node;
+       int ret;
+
+       /* Skip initialization if we don't have a triled block */
+       if (!lpg->data->triled_base)
+               return 0;
+
+       lpg->triled_base = lpg->data->triled_base;
+       lpg->triled_has_atc_ctl = lpg->data->triled_has_atc_ctl;
+       lpg->triled_has_src_sel = lpg->data->triled_has_src_sel;
+
+       if (lpg->triled_has_src_sel) {
+               ret = of_property_read_u32(np, "qcom,power-source", &lpg->triled_src);
+               if (ret || lpg->triled_src == 2 || lpg->triled_src > 3) {
+                       dev_err(lpg->dev, "invalid power source\n");
+                       return -EINVAL;
+               }
+       }
+
+       /* Disable automatic trickle charge LED */
+       if (lpg->triled_has_atc_ctl)
+               regmap_write(lpg->map, lpg->triled_base + TRI_LED_ATC_CTL, 0);
+
+       /* Configure power source */
+       if (lpg->triled_has_src_sel)
+               regmap_write(lpg->map, lpg->triled_base + TRI_LED_SRC_SEL, lpg->triled_src);
+
+       /* Default all outputs to off */
+       regmap_write(lpg->map, lpg->triled_base + TRI_LED_EN_CTL, 0);
+
+       return 0;
+}
+
+static int lpg_init_lut(struct lpg *lpg)
+{
+       const struct lpg_data *data = lpg->data;
+
+       if (!data->lut_base)
+               return 0;
+
+       lpg->lut_base = data->lut_base;
+       lpg->lut_size = data->lut_size;
+
+       lpg->lut_bitmap = devm_bitmap_zalloc(lpg->dev, lpg->lut_size, GFP_KERNEL);
+       if (!lpg->lut_bitmap)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int lpg_probe(struct platform_device *pdev)
+{
+       struct device_node *np;
+       struct lpg *lpg;
+       int ret;
+       int i;
+
+       lpg = devm_kzalloc(&pdev->dev, sizeof(*lpg), GFP_KERNEL);
+       if (!lpg)
+               return -ENOMEM;
+
+       lpg->data = of_device_get_match_data(&pdev->dev);
+       if (!lpg->data)
+               return -EINVAL;
+
+       platform_set_drvdata(pdev, lpg);
+
+       lpg->dev = &pdev->dev;
+       mutex_init(&lpg->lock);
+
+       lpg->map = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!lpg->map)
+               return dev_err_probe(&pdev->dev, -ENXIO, "parent regmap unavailable\n");
+
+       ret = lpg_init_channels(lpg);
+       if (ret < 0)
+               return ret;
+
+       ret = lpg_parse_dtest(lpg);
+       if (ret < 0)
+               return ret;
+
+       ret = lpg_init_triled(lpg);
+       if (ret < 0)
+               return ret;
+
+       ret = lpg_init_lut(lpg);
+       if (ret < 0)
+               return ret;
+
+       for_each_available_child_of_node(pdev->dev.of_node, np) {
+               ret = lpg_add_led(lpg, np);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < lpg->num_channels; i++)
+               lpg_apply_dtest(&lpg->channels[i]);
+
+       return lpg_add_pwm(lpg);
+}
+
+static int lpg_remove(struct platform_device *pdev)
+{
+       struct lpg *lpg = platform_get_drvdata(pdev);
+
+       pwmchip_remove(&lpg->pwm);
+
+       return 0;
+}
+
+static const struct lpg_data pm8916_pwm_data = {
+       .num_channels = 1,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xbc00 },
+       },
+};
+
+static const struct lpg_data pm8941_lpg_data = {
+       .lut_base = 0xb000,
+       .lut_size = 64,
+
+       .triled_base = 0xd000,
+       .triled_has_atc_ctl = true,
+       .triled_has_src_sel = true,
+
+       .num_channels = 8,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xb100 },
+               { .base = 0xb200 },
+               { .base = 0xb300 },
+               { .base = 0xb400 },
+               { .base = 0xb500, .triled_mask = BIT(5) },
+               { .base = 0xb600, .triled_mask = BIT(6) },
+               { .base = 0xb700, .triled_mask = BIT(7) },
+               { .base = 0xb800 },
+       },
+};
+
+static const struct lpg_data pm8994_lpg_data = {
+       .lut_base = 0xb000,
+       .lut_size = 64,
+
+       .num_channels = 6,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xb100 },
+               { .base = 0xb200 },
+               { .base = 0xb300 },
+               { .base = 0xb400 },
+               { .base = 0xb500 },
+               { .base = 0xb600 },
+       },
+};
+
+static const struct lpg_data pmi8994_lpg_data = {
+       .lut_base = 0xb000,
+       .lut_size = 24,
+
+       .triled_base = 0xd000,
+       .triled_has_atc_ctl = true,
+       .triled_has_src_sel = true,
+
+       .num_channels = 4,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xb100, .triled_mask = BIT(5) },
+               { .base = 0xb200, .triled_mask = BIT(6) },
+               { .base = 0xb300, .triled_mask = BIT(7) },
+               { .base = 0xb400 },
+       },
+};
+
+static const struct lpg_data pmi8998_lpg_data = {
+       .lut_base = 0xb000,
+       .lut_size = 49,
+
+       .triled_base = 0xd000,
+
+       .num_channels = 6,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xb100 },
+               { .base = 0xb200 },
+               { .base = 0xb300, .triled_mask = BIT(5) },
+               { .base = 0xb400, .triled_mask = BIT(6) },
+               { .base = 0xb500, .triled_mask = BIT(7) },
+               { .base = 0xb600 },
+       },
+};
+
+static const struct lpg_data pm8150b_lpg_data = {
+       .lut_base = 0xb000,
+       .lut_size = 24,
+
+       .triled_base = 0xd000,
+
+       .num_channels = 2,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xb100, .triled_mask = BIT(7) },
+               { .base = 0xb200, .triled_mask = BIT(6) },
+       },
+};
+
+static const struct lpg_data pm8150l_lpg_data = {
+       .lut_base = 0xb000,
+       .lut_size = 48,
+
+       .triled_base = 0xd000,
+
+       .num_channels = 5,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xb100, .triled_mask = BIT(7) },
+               { .base = 0xb200, .triled_mask = BIT(6) },
+               { .base = 0xb300, .triled_mask = BIT(5) },
+               { .base = 0xbc00 },
+               { .base = 0xbd00 },
+
+       },
+};
+
+static const struct lpg_data pm8350c_pwm_data = {
+       .triled_base = 0xef00,
+
+       .num_channels = 4,
+       .channels = (const struct lpg_channel_data[]) {
+               { .base = 0xe800, .triled_mask = BIT(7) },
+               { .base = 0xe900, .triled_mask = BIT(6) },
+               { .base = 0xea00, .triled_mask = BIT(5) },
+               { .base = 0xeb00 },
+       },
+};
+
+static const struct of_device_id lpg_of_table[] = {
+       { .compatible = "qcom,pm8150b-lpg", .data = &pm8150b_lpg_data },
+       { .compatible = "qcom,pm8150l-lpg", .data = &pm8150l_lpg_data },
+       { .compatible = "qcom,pm8350c-pwm", .data = &pm8350c_pwm_data },
+       { .compatible = "qcom,pm8916-pwm", .data = &pm8916_pwm_data },
+       { .compatible = "qcom,pm8941-lpg", .data = &pm8941_lpg_data },
+       { .compatible = "qcom,pm8994-lpg", .data = &pm8994_lpg_data },
+       { .compatible = "qcom,pmi8994-lpg", .data = &pmi8994_lpg_data },
+       { .compatible = "qcom,pmi8998-lpg", .data = &pmi8998_lpg_data },
+       { .compatible = "qcom,pmc8180c-lpg", .data = &pm8150l_lpg_data },
+       {}
+};
+MODULE_DEVICE_TABLE(of, lpg_of_table);
+
+static struct platform_driver lpg_driver = {
+       .probe = lpg_probe,
+       .remove = lpg_remove,
+       .driver = {
+               .name = "qcom-spmi-lpg",
+               .of_match_table = lpg_of_table,
+       },
+};
+module_platform_driver(lpg_driver);
+
+MODULE_DESCRIPTION("Qualcomm LPG LED driver");
+MODULE_LICENSE("GPL v2");
index 9ed9c95..2acda9c 100644 (file)
@@ -395,6 +395,13 @@ struct cached_dev {
        atomic_t                io_errors;
        unsigned int            error_limit;
        unsigned int            offline_seconds;
+
+       /*
+        * Retry to update writeback_rate if contention happens for
+        * down_read(dc->writeback_lock) in update_writeback_rate()
+        */
+#define BCH_WBRATE_UPDATE_MAX_SKIPS    15
+       unsigned int            rate_update_retry;
 };
 
 enum alloc_reserve {
index ad9f166..e136d6e 100644 (file)
@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
        int i;
        struct bkey *k = NULL;
        struct btree_iter iter;
-       struct btree_check_state *check_state;
-       char name[32];
+       struct btree_check_state check_state;
 
        /* check and mark root node keys */
        for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
@@ -2018,63 +2017,59 @@ int bch_btree_check(struct cache_set *c)
        if (c->root->level == 0)
                return 0;
 
-       check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
-       if (!check_state)
-               return -ENOMEM;
-
-       check_state->c = c;
-       check_state->total_threads = bch_btree_chkthread_nr();
-       check_state->key_idx = 0;
-       spin_lock_init(&check_state->idx_lock);
-       atomic_set(&check_state->started, 0);
-       atomic_set(&check_state->enough, 0);
-       init_waitqueue_head(&check_state->wait);
+       memset(&check_state, 0, sizeof(struct btree_check_state));
+       check_state.c = c;
+       check_state.total_threads = bch_btree_chkthread_nr();
+       check_state.key_idx = 0;
+       spin_lock_init(&check_state.idx_lock);
+       atomic_set(&check_state.started, 0);
+       atomic_set(&check_state.enough, 0);
+       init_waitqueue_head(&check_state.wait);
 
+       rw_lock(0, c->root, c->root->level);
        /*
         * Run multiple threads to check btree nodes in parallel,
-        * if check_state->enough is non-zero, it means current
+        * if check_state.enough is non-zero, it means current
         * running check threads are enough, unncessary to create
         * more.
         */
-       for (i = 0; i < check_state->total_threads; i++) {
-               /* fetch latest check_state->enough earlier */
+       for (i = 0; i < check_state.total_threads; i++) {
+               /* fetch latest check_state.enough earlier */
                smp_mb__before_atomic();
-               if (atomic_read(&check_state->enough))
+               if (atomic_read(&check_state.enough))
                        break;
 
-               check_state->infos[i].result = 0;
-               check_state->infos[i].state = check_state;
-               snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
-               atomic_inc(&check_state->started);
+               check_state.infos[i].result = 0;
+               check_state.infos[i].state = &check_state;
 
-               check_state->infos[i].thread =
+               check_state.infos[i].thread =
                        kthread_run(bch_btree_check_thread,
-                                   &check_state->infos[i],
-                                   name);
-               if (IS_ERR(check_state->infos[i].thread)) {
+                                   &check_state.infos[i],
+                                   "bch_btrchk[%d]", i);
+               if (IS_ERR(check_state.infos[i].thread)) {
                        pr_err("fails to run thread bch_btrchk[%d]\n", i);
                        for (--i; i >= 0; i--)
-                               kthread_stop(check_state->infos[i].thread);
+                               kthread_stop(check_state.infos[i].thread);
                        ret = -ENOMEM;
                        goto out;
                }
+               atomic_inc(&check_state.started);
        }
 
        /*
         * Must wait for all threads to stop.
         */
-       wait_event_interruptible(check_state->wait,
-                                atomic_read(&check_state->started) == 0);
+       wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
 
-       for (i = 0; i < check_state->total_threads; i++) {
-               if (check_state->infos[i].result) {
-                       ret = check_state->infos[i].result;
+       for (i = 0; i < check_state.total_threads; i++) {
+               if (check_state.infos[i].result) {
+                       ret = check_state.infos[i].result;
                        goto out;
                }
        }
 
 out:
-       kfree(check_state);
+       rw_unlock(0, c->root);
        return ret;
 }
 
index 5048210..1b5fdbc 100644 (file)
@@ -226,7 +226,7 @@ struct btree_check_info {
        int                             result;
 };
 
-#define BCH_BTR_CHKTHREAD_MAX  64
+#define BCH_BTR_CHKTHREAD_MAX  12
 struct btree_check_state {
        struct cache_set                *c;
        int                             total_threads;
index df5347e..e5da469 100644 (file)
@@ -405,6 +405,11 @@ err:
        return ret;
 }
 
+void bch_journal_space_reserve(struct journal *j)
+{
+       j->do_reserve = true;
+}
+
 /* Journalling */
 
 static void btree_flush_write(struct cache_set *c)
@@ -621,12 +626,30 @@ static void do_journal_discard(struct cache *ca)
        }
 }
 
+static unsigned int free_journal_buckets(struct cache_set *c)
+{
+       struct journal *j = &c->journal;
+       struct cache *ca = c->cache;
+       struct journal_device *ja = &c->cache->journal;
+       unsigned int n;
+
+       /* In case njournal_buckets is not power of 2 */
+       if (ja->cur_idx >= ja->discard_idx)
+               n = ca->sb.njournal_buckets +  ja->discard_idx - ja->cur_idx;
+       else
+               n = ja->discard_idx - ja->cur_idx;
+
+       if (n > (1 + j->do_reserve))
+               return n - (1 + j->do_reserve);
+
+       return 0;
+}
+
 static void journal_reclaim(struct cache_set *c)
 {
        struct bkey *k = &c->journal.key;
        struct cache *ca = c->cache;
        uint64_t last_seq;
-       unsigned int next;
        struct journal_device *ja = &ca->journal;
        atomic_t p __maybe_unused;
 
@@ -649,12 +672,10 @@ static void journal_reclaim(struct cache_set *c)
        if (c->journal.blocks_free)
                goto out;
 
-       next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
-       /* No space available on this device */
-       if (next == ja->discard_idx)
+       if (!free_journal_buckets(c))
                goto out;
 
-       ja->cur_idx = next;
+       ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
        k->ptr[0] = MAKE_PTR(0,
                             bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
                             ca->sb.nr_this_dev);
index f2ea34d..cd316b4 100644 (file)
@@ -105,6 +105,7 @@ struct journal {
        spinlock_t              lock;
        spinlock_t              flush_write_lock;
        bool                    btree_flushing;
+       bool                    do_reserve;
        /* used when waiting because the journal was full */
        struct closure_waitlist wait;
        struct closure          io;
@@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list);
 
 void bch_journal_free(struct cache_set *c);
 int bch_journal_alloc(struct cache_set *c);
+void bch_journal_space_reserve(struct journal *j);
 
 #endif /* _BCACHE_JOURNAL_H */
index 9c5dde7..f2c5a7e 100644 (file)
@@ -1105,6 +1105,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
         * which would call closure_get(&dc->disk.cl)
         */
        ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
+       if (!ddip) {
+               bio->bi_status = BLK_STS_RESOURCE;
+               bio->bi_end_io(bio);
+               return;
+       }
+
        ddip->d = d;
        /* Count on the bcache device */
        ddip->orig_bdev = orig_bdev;
index 2f49e31..3563d15 100644 (file)
@@ -2127,6 +2127,7 @@ static int run_cache_set(struct cache_set *c)
 
        flash_devs_run(c);
 
+       bch_journal_space_reserve(&c->journal);
        set_bit(CACHE_SET_RUNNING, &c->flags);
        return 0;
 err:
index 9ee0005..3f0ff3a 100644 (file)
@@ -235,19 +235,27 @@ static void update_writeback_rate(struct work_struct *work)
                return;
        }
 
-       if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
-               /*
-                * If the whole cache set is idle, set_at_max_writeback_rate()
-                * will set writeback rate to a max number. Then it is
-                * unncessary to update writeback rate for an idle cache set
-                * in maximum writeback rate number(s).
-                */
-               if (!set_at_max_writeback_rate(c, dc)) {
-                       down_read(&dc->writeback_lock);
+       /*
+        * If the whole cache set is idle, set_at_max_writeback_rate()
+        * will set writeback rate to a max number. Then it is
+        * unncessary to update writeback rate for an idle cache set
+        * in maximum writeback rate number(s).
+        */
+       if (atomic_read(&dc->has_dirty) && dc->writeback_percent &&
+           !set_at_max_writeback_rate(c, dc)) {
+               do {
+                       if (!down_read_trylock((&dc->writeback_lock))) {
+                               dc->rate_update_retry++;
+                               if (dc->rate_update_retry <=
+                                   BCH_WBRATE_UPDATE_MAX_SKIPS)
+                                       break;
+                               down_read(&dc->writeback_lock);
+                               dc->rate_update_retry = 0;
+                       }
                        __update_writeback_rate(dc);
                        update_gc_after_writeback(c);
                        up_read(&dc->writeback_lock);
-               }
+               } while (0);
        }
 
 
@@ -805,13 +813,11 @@ static int bch_writeback_thread(void *arg)
 
 /* Init */
 #define INIT_KEYS_EACH_TIME    500000
-#define INIT_KEYS_SLEEP_MS     100
 
 struct sectors_dirty_init {
        struct btree_op op;
        unsigned int    inode;
        size_t          count;
-       struct bkey     start;
 };
 
 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
@@ -827,11 +833,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
                                             KEY_START(k), KEY_SIZE(k));
 
        op->count++;
-       if (atomic_read(&b->c->search_inflight) &&
-           !(op->count % INIT_KEYS_EACH_TIME)) {
-               bkey_copy_key(&op->start, k);
-               return -EAGAIN;
-       }
+       if (!(op->count % INIT_KEYS_EACH_TIME))
+               cond_resched();
 
        return MAP_CONTINUE;
 }
@@ -846,24 +849,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
        bch_btree_op_init(&op.op, -1);
        op.inode = d->id;
        op.count = 0;
-       op.start = KEY(op.inode, 0, 0);
-
-       do {
-               ret = bcache_btree(map_keys_recurse,
-                                  k,
-                                  c->root,
-                                  &op.op,
-                                  &op.start,
-                                  sectors_dirty_init_fn,
-                                  0);
-               if (ret == -EAGAIN)
-                       schedule_timeout_interruptible(
-                               msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
-               else if (ret < 0) {
-                       pr_warn("sectors dirty init failed, ret=%d!\n", ret);
-                       break;
-               }
-       } while (ret == -EAGAIN);
+
+       ret = bcache_btree(map_keys_recurse,
+                          k,
+                          c->root,
+                          &op.op,
+                          &KEY(op.inode, 0, 0),
+                          sectors_dirty_init_fn,
+                          0);
+       if (ret < 0)
+               pr_warn("sectors dirty init failed, ret=%d!\n", ret);
 
        return ret;
 }
@@ -907,7 +902,6 @@ static int bch_dirty_init_thread(void *arg)
                                goto out;
                        }
                        skip_nr--;
-                       cond_resched();
                }
 
                if (p) {
@@ -917,7 +911,6 @@ static int bch_dirty_init_thread(void *arg)
 
                p = NULL;
                prev_idx = cur_idx;
-               cond_resched();
        }
 
 out:
@@ -948,67 +941,56 @@ void bch_sectors_dirty_init(struct bcache_device *d)
        struct btree_iter iter;
        struct sectors_dirty_init op;
        struct cache_set *c = d->c;
-       struct bch_dirty_init_state *state;
-       char name[32];
+       struct bch_dirty_init_state state;
 
        /* Just count root keys if no leaf node */
+       rw_lock(0, c->root, c->root->level);
        if (c->root->level == 0) {
                bch_btree_op_init(&op.op, -1);
                op.inode = d->id;
                op.count = 0;
-               op.start = KEY(op.inode, 0, 0);
 
                for_each_key_filter(&c->root->keys,
                                    k, &iter, bch_ptr_invalid)
                        sectors_dirty_init_fn(&op.op, c->root, k);
-               return;
-       }
 
-       state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
-       if (!state) {
-               pr_warn("sectors dirty init failed: cannot allocate memory\n");
+               rw_unlock(0, c->root);
                return;
        }
 
-       state->c = c;
-       state->d = d;
-       state->total_threads = bch_btre_dirty_init_thread_nr();
-       state->key_idx = 0;
-       spin_lock_init(&state->idx_lock);
-       atomic_set(&state->started, 0);
-       atomic_set(&state->enough, 0);
-       init_waitqueue_head(&state->wait);
-
-       for (i = 0; i < state->total_threads; i++) {
-               /* Fetch latest state->enough earlier */
+       memset(&state, 0, sizeof(struct bch_dirty_init_state));
+       state.c = c;
+       state.d = d;
+       state.total_threads = bch_btre_dirty_init_thread_nr();
+       state.key_idx = 0;
+       spin_lock_init(&state.idx_lock);
+       atomic_set(&state.started, 0);
+       atomic_set(&state.enough, 0);
+       init_waitqueue_head(&state.wait);
+
+       for (i = 0; i < state.total_threads; i++) {
+               /* Fetch latest state.enough earlier */
                smp_mb__before_atomic();
-               if (atomic_read(&state->enough))
+               if (atomic_read(&state.enough))
                        break;
 
-               state->infos[i].state = state;
-               atomic_inc(&state->started);
-               snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
-
-               state->infos[i].thread =
-                       kthread_run(bch_dirty_init_thread,
-                                   &state->infos[i],
-                                   name);
-               if (IS_ERR(state->infos[i].thread)) {
+               state.infos[i].state = &state;
+               state.infos[i].thread =
+                       kthread_run(bch_dirty_init_thread, &state.infos[i],
+                                   "bch_dirtcnt[%d]", i);
+               if (IS_ERR(state.infos[i].thread)) {
                        pr_err("fails to run thread bch_dirty_init[%d]\n", i);
                        for (--i; i >= 0; i--)
-                               kthread_stop(state->infos[i].thread);
+                               kthread_stop(state.infos[i].thread);
                        goto out;
                }
+               atomic_inc(&state.started);
        }
 
-       /*
-        * Must wait for all threads to stop.
-        */
-       wait_event_interruptible(state->wait,
-                atomic_read(&state->started) == 0);
-
 out:
-       kfree(state);
+       /* Must wait for all threads to stop. */
+       wait_event(state.wait, atomic_read(&state.started) == 0);
+       rw_unlock(0, c->root);
 }
 
 void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -1032,6 +1014,9 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
        dc->writeback_rate_fp_term_high = 1000;
        dc->writeback_rate_i_term_inverse = 10000;
 
+       /* For dc->writeback_lock contention in update_writeback_rate() */
+       dc->rate_update_retry = 0;
+
        WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
        INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
 }
index 02b2f9d..31df716 100644 (file)
@@ -20,7 +20,7 @@
 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
 
-#define BCH_DIRTY_INIT_THRD_MAX        64
+#define BCH_DIRTY_INIT_THRD_MAX        12
 /*
  * 14 (16384ths) is chosen here as something that each backing device
  * should be a reasonable fraction of the share, and not to blow up
index 9526ccb..5e41fba 100644 (file)
@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
        if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
                if (mddev->sync_thread) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       md_reap_sync_thread(mddev);
+                       md_reap_sync_thread(mddev, false);
                }
        } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
                return -EBUSY;
index a37c7b7..0e833a1 100644 (file)
@@ -1005,7 +1005,7 @@ bool dm_table_request_based(struct dm_table *t)
        return __table_type_request_based(dm_table_get_type(t));
 }
 
-static int dm_table_supports_poll(struct dm_table *t);
+static bool dm_table_supports_poll(struct dm_table *t);
 
 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
 {
@@ -1027,7 +1027,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
                        per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
                        min_pool_size = max(min_pool_size, ti->num_flush_bios);
                }
-               poll_supported = !!dm_table_supports_poll(t);
+               poll_supported = dm_table_supports_poll(t);
        }
 
        t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
@@ -1547,9 +1547,20 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
        return 0;
 }
 
-static int dm_table_supports_poll(struct dm_table *t)
+static bool dm_table_supports_poll(struct dm_table *t)
 {
-       return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL);
+       struct dm_target *ti;
+       unsigned i = 0;
+
+       while (i < dm_table_get_num_targets(t)) {
+               ti = dm_table_get_target(t, i++);
+
+               if (!ti->type->iterate_devices ||
+                   ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
+                       return false;
+       }
+
+       return true;
 }
 
 /*
index 80133aa..d6dbd47 100644 (file)
@@ -1312,6 +1312,7 @@ bad:
 
 static struct target_type verity_target = {
        .name           = "verity",
+       .features       = DM_TARGET_IMMUTABLE,
        .version        = {1, 8, 0},
        .module         = THIS_MODULE,
        .ctr            = verity_ctr,
index 138a3b2..6e7797b 100644 (file)
@@ -206,7 +206,6 @@ static void linear_free(struct mddev *mddev, void *priv)
 
 static bool linear_make_request(struct mddev *mddev, struct bio *bio)
 {
-       char b[BDEVNAME_SIZE];
        struct dev_info *tmp_dev;
        sector_t start_sector, end_sector, data_offset;
        sector_t bio_sector = bio->bi_iter.bi_sector;
@@ -256,10 +255,10 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
        return true;
 
 out_of_bounds:
-       pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
+       pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
               mdname(mddev),
               (unsigned long long)bio->bi_iter.bi_sector,
-              bdevname(tmp_dev->rdev->bdev, b),
+              tmp_dev->rdev->bdev,
               (unsigned long long)tmp_dev->rdev->sectors,
               (unsigned long long)start_sector);
        bio_io_error(bio);
index 1c6dbf9..66edf5e 100644 (file)
@@ -87,10 +87,9 @@ static void multipath_end_request(struct bio *bio)
                /*
                 * oops, IO error:
                 */
-               char b[BDEVNAME_SIZE];
                md_error (mp_bh->mddev, rdev);
-               pr_info("multipath: %s: rescheduling sector %llu\n",
-                       bdevname(rdev->bdev,b),
+               pr_info("multipath: %pg: rescheduling sector %llu\n",
+                       rdev->bdev,
                        (unsigned long long)bio->bi_iter.bi_sector);
                multipath_reschedule_retry(mp_bh);
        } else
@@ -154,7 +153,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
 static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
 {
        struct mpconf *conf = mddev->private;
-       char b[BDEVNAME_SIZE];
 
        if (conf->raid_disks - mddev->degraded <= 1) {
                /*
@@ -177,9 +175,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
        }
        set_bit(Faulty, &rdev->flags);
        set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
-       pr_err("multipath: IO failure on %s, disabling IO path.\n"
+       pr_err("multipath: IO failure on %pg, disabling IO path.\n"
               "multipath: Operation continuing on %d IO paths.\n",
-              bdevname(rdev->bdev, b),
+              rdev->bdev,
               conf->raid_disks - mddev->degraded);
 }
 
@@ -197,12 +195,11 @@ static void print_multipath_conf (struct mpconf *conf)
                 conf->raid_disks);
 
        for (i = 0; i < conf->raid_disks; i++) {
-               char b[BDEVNAME_SIZE];
                tmp = conf->multipaths + i;
                if (tmp->rdev)
-                       pr_debug(" disk%d, o:%d, dev:%s\n",
+                       pr_debug(" disk%d, o:%d, dev:%pg\n",
                                 i,!test_bit(Faulty, &tmp->rdev->flags),
-                                bdevname(tmp->rdev->bdev,b));
+                                tmp->rdev->bdev);
        }
 }
 
index 707e802..8273ac5 100644 (file)
@@ -1021,8 +1021,6 @@ EXPORT_SYMBOL_GPL(sync_page_io);
 
 static int read_disk_sb(struct md_rdev *rdev, int size)
 {
-       char b[BDEVNAME_SIZE];
-
        if (rdev->sb_loaded)
                return 0;
 
@@ -1032,8 +1030,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
        return 0;
 
 fail:
-       pr_err("md: disabled device %s, could not read superblock.\n",
-              bdevname(rdev->bdev,b));
+       pr_err("md: disabled device %pg, could not read superblock.\n",
+              rdev->bdev);
        return -EINVAL;
 }
 
@@ -1179,7 +1177,6 @@ EXPORT_SYMBOL(md_check_no_bitmap);
  */
 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
 {
-       char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
        mdp_super_t *sb;
        int ret;
        bool spare_disk = true;
@@ -1198,19 +1195,19 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
 
        ret = -EINVAL;
 
-       bdevname(rdev->bdev, b);
        sb = page_address(rdev->sb_page);
 
        if (sb->md_magic != MD_SB_MAGIC) {
-               pr_warn("md: invalid raid superblock magic on %s\n", b);
+               pr_warn("md: invalid raid superblock magic on %pg\n",
+                       rdev->bdev);
                goto abort;
        }
 
        if (sb->major_version != 0 ||
            sb->minor_version < 90 ||
            sb->minor_version > 91) {
-               pr_warn("Bad version number %d.%d on %s\n",
-                       sb->major_version, sb->minor_version, b);
+               pr_warn("Bad version number %d.%d on %pg\n",
+                       sb->major_version, sb->minor_version, rdev->bdev);
                goto abort;
        }
 
@@ -1218,7 +1215,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
                goto abort;
 
        if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
-               pr_warn("md: invalid superblock checksum on %s\n", b);
+               pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
                goto abort;
        }
 
@@ -1250,13 +1247,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
                __u64 ev1, ev2;
                mdp_super_t *refsb = page_address(refdev->sb_page);
                if (!md_uuid_equal(refsb, sb)) {
-                       pr_warn("md: %s has different UUID to %s\n",
-                               b, bdevname(refdev->bdev,b2));
+                       pr_warn("md: %pg has different UUID to %pg\n",
+                               rdev->bdev, refdev->bdev);
                        goto abort;
                }
                if (!md_sb_equal(refsb, sb)) {
-                       pr_warn("md: %s has same UUID but different superblock to %s\n",
-                               b, bdevname(refdev->bdev, b2));
+                       pr_warn("md: %pg has same UUID but different superblock to %pg\n",
+                               rdev->bdev, refdev->bdev);
                        goto abort;
                }
                ev1 = md_event(sb);
@@ -1620,7 +1617,6 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
        int ret;
        sector_t sb_start;
        sector_t sectors;
-       char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
        int bmask;
        bool spare_disk = true;
 
@@ -1664,13 +1660,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
                return -EINVAL;
 
        if (calc_sb_1_csum(sb) != sb->sb_csum) {
-               pr_warn("md: invalid superblock checksum on %s\n",
-                       bdevname(rdev->bdev,b));
+               pr_warn("md: invalid superblock checksum on %pg\n",
+                       rdev->bdev);
                return -EINVAL;
        }
        if (le64_to_cpu(sb->data_size) < 10) {
-               pr_warn("md: data_size too small on %s\n",
-                       bdevname(rdev->bdev,b));
+               pr_warn("md: data_size too small on %pg\n",
+                       rdev->bdev);
                return -EINVAL;
        }
        if (sb->pad0 ||
@@ -1776,9 +1772,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
                    sb->level != refsb->level ||
                    sb->layout != refsb->layout ||
                    sb->chunksize != refsb->chunksize) {
-                       pr_warn("md: %s has strangely different superblock to %s\n",
-                               bdevname(rdev->bdev,b),
-                               bdevname(refdev->bdev,b2));
+                       pr_warn("md: %pg has strangely different superblock to %pg\n",
+                               rdev->bdev,
+                               refdev->bdev);
                        return -EINVAL;
                }
                ev1 = le64_to_cpu(sb->events);
@@ -2365,7 +2361,6 @@ EXPORT_SYMBOL(md_integrity_register);
 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
 {
        struct blk_integrity *bi_mddev;
-       char name[BDEVNAME_SIZE];
 
        if (!mddev->gendisk)
                return 0;
@@ -2376,8 +2371,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
                return 0;
 
        if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
-               pr_err("%s: incompatible integrity profile for %s\n",
-                      mdname(mddev), bdevname(rdev->bdev, name));
+               pr_err("%s: incompatible integrity profile for %pg\n",
+                      mdname(mddev), rdev->bdev);
                return -ENXIO;
        }
 
@@ -2486,11 +2481,9 @@ static void rdev_delayed_delete(struct work_struct *ws)
 
 static void unbind_rdev_from_array(struct md_rdev *rdev)
 {
-       char b[BDEVNAME_SIZE];
-
        bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
        list_del_rcu(&rdev->same_set);
-       pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
+       pr_debug("md: unbind<%pg>\n", rdev->bdev);
        mddev_destroy_serial_pool(rdev->mddev, rdev, false);
        rdev->mddev = NULL;
        sysfs_remove_link(&rdev->kobj, "block");
@@ -2543,9 +2536,7 @@ void md_autodetect_dev(dev_t dev);
 
 static void export_rdev(struct md_rdev *rdev)
 {
-       char b[BDEVNAME_SIZE];
-
-       pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
+       pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
        md_rdev_clear(rdev);
 #ifndef MODULE
        if (test_bit(AutoDetected, &rdev->flags))
@@ -2803,8 +2794,6 @@ repeat:
 rewrite:
        md_bitmap_update_sb(mddev->bitmap);
        rdev_for_each(rdev, mddev) {
-               char b[BDEVNAME_SIZE];
-
                if (rdev->sb_loaded != 1)
                        continue; /* no noise on spare devices */
 
@@ -2812,8 +2801,8 @@ rewrite:
                        md_super_write(mddev,rdev,
                                       rdev->sb_start, rdev->sb_size,
                                       rdev->sb_page);
-                       pr_debug("md: (write) %s's sb offset: %llu\n",
-                                bdevname(rdev->bdev, b),
+                       pr_debug("md: (write) %pg's sb offset: %llu\n",
+                                rdev->bdev,
                                 (unsigned long long)rdev->sb_start);
                        rdev->sb_events = mddev->events;
                        if (rdev->badblocks.size) {
@@ -2825,8 +2814,8 @@ rewrite:
                        }
 
                } else
-                       pr_debug("md: %s (skipping faulty)\n",
-                                bdevname(rdev->bdev, b));
+                       pr_debug("md: %pg (skipping faulty)\n",
+                                rdev->bdev);
 
                if (mddev->level == LEVEL_MULTIPATH)
                        /* only need to write one superblock... */
@@ -3701,7 +3690,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
  */
 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
 {
-       char b[BDEVNAME_SIZE];
        int err;
        struct md_rdev *rdev;
        sector_t size;
@@ -3725,8 +3713,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
 
        size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
        if (!size) {
-               pr_warn("md: %s has zero or unknown size, marking faulty!\n",
-                       bdevname(rdev->bdev,b));
+               pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
+                       rdev->bdev);
                err = -EINVAL;
                goto abort_free;
        }
@@ -3735,14 +3723,14 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
                err = super_types[super_format].
                        load_super(rdev, NULL, super_minor);
                if (err == -EINVAL) {
-                       pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
-                               bdevname(rdev->bdev,b),
+                       pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
+                               rdev->bdev,
                                super_format, super_minor);
                        goto abort_free;
                }
                if (err < 0) {
-                       pr_warn("md: could not read %s's sb, not importing!\n",
-                               bdevname(rdev->bdev,b));
+                       pr_warn("md: could not read %pg's sb, not importing!\n",
+                               rdev->bdev);
                        goto abort_free;
                }
        }
@@ -3765,7 +3753,6 @@ static int analyze_sbs(struct mddev *mddev)
 {
        int i;
        struct md_rdev *rdev, *freshest, *tmp;
-       char b[BDEVNAME_SIZE];
 
        freshest = NULL;
        rdev_for_each_safe(rdev, tmp, mddev)
@@ -3777,8 +3764,8 @@ static int analyze_sbs(struct mddev *mddev)
                case 0:
                        break;
                default:
-                       pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
-                               bdevname(rdev->bdev,b));
+                       pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
+                               rdev->bdev);
                        md_kick_rdev_from_array(rdev);
                }
 
@@ -3796,8 +3783,8 @@ static int analyze_sbs(struct mddev *mddev)
                if (mddev->max_disks &&
                    (rdev->desc_nr >= mddev->max_disks ||
                     i > mddev->max_disks)) {
-                       pr_warn("md: %s: %s: only %d devices permitted\n",
-                               mdname(mddev), bdevname(rdev->bdev, b),
+                       pr_warn("md: %s: %pg: only %d devices permitted\n",
+                               mdname(mddev), rdev->bdev,
                                mddev->max_disks);
                        md_kick_rdev_from_array(rdev);
                        continue;
@@ -3805,8 +3792,8 @@ static int analyze_sbs(struct mddev *mddev)
                if (rdev != freshest) {
                        if (super_types[mddev->major_version].
                            validate_super(mddev, rdev)) {
-                               pr_warn("md: kicking non-fresh %s from array!\n",
-                                       bdevname(rdev->bdev,b));
+                               pr_warn("md: kicking non-fresh %pg from array!\n",
+                                       rdev->bdev);
                                md_kick_rdev_from_array(rdev);
                                continue;
                        }
@@ -4844,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                                flush_workqueue(md_misc_wq);
                        if (mddev->sync_thread) {
                                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                               md_reap_sync_thread(mddev);
+                               md_reap_sync_thread(mddev, true);
                        }
                        mddev_unlock(mddev);
                }
@@ -5598,8 +5585,6 @@ static void md_free(struct kobject *ko)
 
        bioset_exit(&mddev->bio_set);
        bioset_exit(&mddev->sync_set);
-       if (mddev->level != 1 && mddev->level != 10)
-               bioset_exit(&mddev->io_acct_set);
        kfree(mddev);
 }
 
@@ -5912,7 +5897,6 @@ int md_run(struct mddev *mddev)
                /* Warn if this is a potentially silly
                 * configuration.
                 */
-               char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
                struct md_rdev *rdev2;
                int warned = 0;
 
@@ -5921,10 +5905,10 @@ int md_run(struct mddev *mddev)
                                if (rdev < rdev2 &&
                                    rdev->bdev->bd_disk ==
                                    rdev2->bdev->bd_disk) {
-                                       pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+                                       pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
                                                mdname(mddev),
-                                               bdevname(rdev->bdev,b),
-                                               bdevname(rdev2->bdev,b2));
+                                               rdev->bdev,
+                                               rdev2->bdev);
                                        warned = 1;
                                }
                        }
@@ -6213,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev)
                flush_workqueue(md_misc_wq);
        if (mddev->sync_thread) {
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-               md_reap_sync_thread(mddev);
+               md_reap_sync_thread(mddev, true);
        }
 
        del_timer_sync(&mddev->safemode_timer);
@@ -6285,8 +6269,6 @@ void md_stop(struct mddev *mddev)
        __md_stop(mddev);
        bioset_exit(&mddev->bio_set);
        bioset_exit(&mddev->sync_set);
-       if (mddev->level != 1 && mddev->level != 10)
-               bioset_exit(&mddev->io_acct_set);
 }
 
 EXPORT_SYMBOL_GPL(md_stop);
@@ -6452,8 +6434,7 @@ static void autorun_array(struct mddev *mddev)
        pr_info("md: running: ");
 
        rdev_for_each(rdev, mddev) {
-               char b[BDEVNAME_SIZE];
-               pr_cont("<%s>", bdevname(rdev->bdev,b));
+               pr_cont("<%pg>", rdev->bdev);
        }
        pr_cont("\n");
 
@@ -6480,7 +6461,6 @@ static void autorun_devices(int part)
 {
        struct md_rdev *rdev0, *rdev, *tmp;
        struct mddev *mddev;
-       char b[BDEVNAME_SIZE];
 
        pr_info("md: autorun ...\n");
        while (!list_empty(&pending_raid_disks)) {
@@ -6490,12 +6470,12 @@ static void autorun_devices(int part)
                rdev0 = list_entry(pending_raid_disks.next,
                                         struct md_rdev, same_set);
 
-               pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
+               pr_debug("md: considering %pg ...\n", rdev0->bdev);
                INIT_LIST_HEAD(&candidates);
                rdev_for_each_list(rdev, tmp, &pending_raid_disks)
                        if (super_90_load(rdev, rdev0, 0) >= 0) {
-                               pr_debug("md:  adding %s ...\n",
-                                        bdevname(rdev->bdev,b));
+                               pr_debug("md:  adding %pg ...\n",
+                                        rdev->bdev);
                                list_move(&rdev->same_set, &candidates);
                        }
                /*
@@ -6512,8 +6492,8 @@ static void autorun_devices(int part)
                        unit = MINOR(dev);
                }
                if (rdev0->preferred_minor != unit) {
-                       pr_warn("md: unit number in %s is bad: %d\n",
-                               bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+                       pr_warn("md: unit number in %pg is bad: %d\n",
+                               rdev0->bdev, rdev0->preferred_minor);
                        break;
                }
 
@@ -6526,8 +6506,8 @@ static void autorun_devices(int part)
                        pr_warn("md: %s locked, cannot run\n", mdname(mddev));
                else if (mddev->raid_disks || mddev->major_version
                         || !list_empty(&mddev->disks)) {
-                       pr_warn("md: %s already running, cannot run %s\n",
-                               mdname(mddev), bdevname(rdev0->bdev,b));
+                       pr_warn("md: %s already running, cannot run %pg\n",
+                               mdname(mddev), rdev0->bdev);
                        mddev_unlock(mddev);
                } else {
                        pr_debug("md: created %s\n", mdname(mddev));
@@ -6701,7 +6681,6 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
 
 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
 {
-       char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
        struct md_rdev *rdev;
        dev_t dev = MKDEV(info->major,info->minor);
 
@@ -6731,9 +6710,9 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
                        err = super_types[mddev->major_version]
                                .load_super(rdev, rdev0, mddev->minor_version);
                        if (err < 0) {
-                               pr_warn("md: %s has different UUID to %s\n",
-                                       bdevname(rdev->bdev,b),
-                                       bdevname(rdev0->bdev,b2));
+                               pr_warn("md: %pg has different UUID to %pg\n",
+                                       rdev->bdev,
+                                       rdev0->bdev);
                                export_rdev(rdev);
                                return -EINVAL;
                        }
@@ -6908,7 +6887,6 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
 
 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
 {
-       char b[BDEVNAME_SIZE];
        struct md_rdev *rdev;
 
        if (!mddev->pers)
@@ -6943,14 +6921,13 @@ kick_rdev:
 
        return 0;
 busy:
-       pr_debug("md: cannot remove active disk %s from %s ...\n",
-                bdevname(rdev->bdev,b), mdname(mddev));
+       pr_debug("md: cannot remove active disk %pg from %s ...\n",
+                rdev->bdev, mdname(mddev));
        return -EBUSY;
 }
 
 static int hot_add_disk(struct mddev *mddev, dev_t dev)
 {
-       char b[BDEVNAME_SIZE];
        int err;
        struct md_rdev *rdev;
 
@@ -6983,8 +6960,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
        rdev->sectors = rdev->sb_start;
 
        if (test_bit(Faulty, &rdev->flags)) {
-               pr_warn("md: can not hot-add faulty %s disk to %s!\n",
-                       bdevname(rdev->bdev,b), mdname(mddev));
+               pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
+                       rdev->bdev, mdname(mddev));
                err = -EINVAL;
                goto abort_export;
        }
@@ -7011,8 +6988,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
         * disable on the whole MD.
         */
        if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
-               pr_info("%s: Disabling nowait because %s does not support nowait\n",
-                       mdname(mddev), bdevname(rdev->bdev, b));
+               pr_info("%s: Disabling nowait because %pg does not support nowait\n",
+                       mdname(mddev), rdev->bdev);
                blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
        }
        /*
@@ -7963,17 +7940,22 @@ EXPORT_SYMBOL(md_register_thread);
 
 void md_unregister_thread(struct md_thread **threadp)
 {
-       struct md_thread *thread = *threadp;
-       if (!thread)
-               return;
-       pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
-       /* Locking ensures that mddev_unlock does not wake_up a
+       struct md_thread *thread;
+
+       /*
+        * Locking ensures that mddev_unlock does not wake_up a
         * non-existent thread
         */
        spin_lock(&pers_lock);
+       thread = *threadp;
+       if (!thread) {
+               spin_unlock(&pers_lock);
+               return;
+       }
        *threadp = NULL;
        spin_unlock(&pers_lock);
 
+       pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
        kthread_stop(thread->tsk);
        kfree(thread);
 }
@@ -8012,10 +7994,8 @@ static void status_unused(struct seq_file *seq)
        seq_printf(seq, "unused devices: ");
 
        list_for_each_entry(rdev, &pending_raid_disks, same_set) {
-               char b[BDEVNAME_SIZE];
                i++;
-               seq_printf(seq, "%s ",
-                             bdevname(rdev->bdev,b));
+               seq_printf(seq, "%pg ", rdev->bdev);
        }
        if (!i)
                seq_printf(seq, "<none>");
@@ -8255,9 +8235,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
                sectors = 0;
                rcu_read_lock();
                rdev_for_each_rcu(rdev, mddev) {
-                       char b[BDEVNAME_SIZE];
-                       seq_printf(seq, " %s[%d]",
-                               bdevname(rdev->bdev,b), rdev->desc_nr);
+                       seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
+
                        if (test_bit(WriteMostly, &rdev->flags))
                                seq_printf(seq, "(W)");
                        if (test_bit(Journal, &rdev->flags))
@@ -9324,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev)
                         * ->spare_active and clear saved_raid_disk
                         */
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       md_reap_sync_thread(mddev);
+                       md_reap_sync_thread(mddev, true);
                        clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                        clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                        clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9359,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev)
                        goto unlock;
                }
                if (mddev->sync_thread) {
-                       md_reap_sync_thread(mddev);
+                       md_reap_sync_thread(mddev, true);
                        goto unlock;
                }
                /* Set RUNNING before clearing NEEDED to avoid
@@ -9432,14 +9411,18 @@ void md_check_recovery(struct mddev *mddev)
 }
 EXPORT_SYMBOL(md_check_recovery);
 
-void md_reap_sync_thread(struct mddev *mddev)
+void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held)
 {
        struct md_rdev *rdev;
        sector_t old_dev_sectors = mddev->dev_sectors;
        bool is_reshaped = false;
 
+       if (reconfig_mutex_held)
+               mddev_unlock(mddev);
        /* resync has finished, collect result */
        md_unregister_thread(&mddev->sync_thread);
+       if (reconfig_mutex_held)
+               mddev_lock_nointr(mddev);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
            mddev->degraded != mddev->raid_disks) {
@@ -9652,7 +9635,6 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
        struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
        struct md_rdev *rdev2, *tmp;
        int role, ret;
-       char b[BDEVNAME_SIZE];
 
        /*
         * If size is changed in another node then we need to
@@ -9676,7 +9658,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
 
                if (test_bit(Candidate, &rdev2->flags)) {
                        if (role == MD_DISK_ROLE_FAULTY) {
-                               pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
+                               pr_info("md: Removing Candidate device %pg because add failed\n",
+                                       rdev2->bdev);
                                md_kick_rdev_from_array(rdev2);
                                continue;
                        }
@@ -9693,8 +9676,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
                              MD_FEATURE_RESHAPE_ACTIVE)) {
                                rdev2->saved_raid_disk = role;
                                ret = remove_and_add_spares(mddev, rdev2);
-                               pr_info("Activated spare: %s\n",
-                                       bdevname(rdev2->bdev,b));
+                               pr_info("Activated spare: %pg\n",
+                                       rdev2->bdev);
                                /* wakeup mddev->thread here, so array could
                                 * perform resync with the new activated disk */
                                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
index cf2cbb1..5f62c46 100644 (file)
@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
 extern void md_unregister_thread(struct md_thread **threadp);
 extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
-extern void md_reap_sync_thread(struct mddev *mddev);
+extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held);
 extern int mddev_init_writes_pending(struct mddev *mddev);
 extern bool md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
index e11701e..78addfe 100644 (file)
@@ -37,7 +37,6 @@ static void dump_zones(struct mddev *mddev)
        int j, k;
        sector_t zone_size = 0;
        sector_t zone_start = 0;
-       char b[BDEVNAME_SIZE];
        struct r0conf *conf = mddev->private;
        int raid_disks = conf->strip_zone[0].nb_dev;
        pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
@@ -48,9 +47,8 @@ static void dump_zones(struct mddev *mddev)
                int len = 0;
 
                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
-                       len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
-                                       bdevname(conf->devlist[j*raid_disks
-                                                              + k]->bdev, b));
+                       len += snprintf(line+len, 200-len, "%s%pg", k?"/":"",
+                               conf->devlist[j * raid_disks + k]->bdev);
                pr_debug("md: zone%d=[%s]\n", j, line);
 
                zone_size  = conf->strip_zone[j].zone_end - zone_start;
@@ -69,8 +67,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
        struct strip_zone *zone;
        int cnt;
-       char b[BDEVNAME_SIZE];
-       char b2[BDEVNAME_SIZE];
        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
        unsigned blksize = 512;
 
@@ -78,9 +74,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        if (!conf)
                return -ENOMEM;
        rdev_for_each(rdev1, mddev) {
-               pr_debug("md/raid0:%s: looking at %s\n",
+               pr_debug("md/raid0:%s: looking at %pg\n",
                         mdname(mddev),
-                        bdevname(rdev1->bdev, b));
+                        rdev1->bdev);
                c = 0;
 
                /* round size to chunk_size */
@@ -92,12 +88,12 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
                                      rdev1->bdev->bd_disk->queue));
 
                rdev_for_each(rdev2, mddev) {
-                       pr_debug("md/raid0:%s:   comparing %s(%llu)"
-                                " with %s(%llu)\n",
+                       pr_debug("md/raid0:%s:   comparing %pg(%llu)"
+                                " with %pg(%llu)\n",
                                 mdname(mddev),
-                                bdevname(rdev1->bdev,b),
+                                rdev1->bdev,
                                 (unsigned long long)rdev1->sectors,
-                                bdevname(rdev2->bdev,b2),
+                                rdev2->bdev,
                                 (unsigned long long)rdev2->sectors);
                        if (rdev2 == rdev1) {
                                pr_debug("md/raid0:%s:   END\n",
@@ -225,15 +221,15 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
                for (j=0; j<cnt; j++) {
                        rdev = conf->devlist[j];
                        if (rdev->sectors <= zone->dev_start) {
-                               pr_debug("md/raid0:%s: checking %s ... nope\n",
+                               pr_debug("md/raid0:%s: checking %pg ... nope\n",
                                         mdname(mddev),
-                                        bdevname(rdev->bdev, b));
+                                        rdev->bdev);
                                continue;
                        }
-                       pr_debug("md/raid0:%s: checking %s ..."
+                       pr_debug("md/raid0:%s: checking %pg ..."
                                 " contained as device %d\n",
                                 mdname(mddev),
-                                bdevname(rdev->bdev, b), c);
+                                rdev->bdev, c);
                        dev[c] = rdev;
                        c++;
                        if (!smallest || rdev->sectors < smallest->sectors) {
@@ -362,7 +358,6 @@ static void free_conf(struct mddev *mddev, struct r0conf *conf)
        kfree(conf->strip_zone);
        kfree(conf->devlist);
        kfree(conf);
-       mddev->private = NULL;
 }
 
 static void raid0_free(struct mddev *mddev, void *priv)
index 99d5af1..258d4eb 100644 (file)
@@ -402,10 +402,9 @@ static void raid1_end_read_request(struct bio *bio)
                /*
                 * oops, read error:
                 */
-               char b[BDEVNAME_SIZE];
-               pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+               pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
                                   mdname(conf->mddev),
-                                  bdevname(rdev->bdev, b),
+                                  rdev->bdev,
                                   (unsigned long long)r1_bio->sector);
                set_bit(R1BIO_ReadError, &r1_bio->state);
                reschedule_retry(r1_bio);
@@ -1283,10 +1282,10 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        mirror = conf->mirrors + rdisk;
 
        if (r1bio_existed)
-               pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+               pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
                                    mdname(mddev),
                                    (unsigned long long)r1_bio->sector,
-                                   bdevname(mirror->rdev->bdev, b));
+                                   mirror->rdev->bdev);
 
        if (test_bit(WriteMostly, &mirror->rdev->flags) &&
            bitmap) {
@@ -1659,7 +1658,6 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev)
  */
 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 {
-       char b[BDEVNAME_SIZE];
        struct r1conf *conf = mddev->private;
        unsigned long flags;
 
@@ -1686,9 +1684,9 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        set_mask_bits(&mddev->sb_flags, 0,
                      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
-       pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+       pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
                "md/raid1:%s: Operation continuing on %d devices.\n",
-               mdname(mddev), bdevname(rdev->bdev, b),
+               mdname(mddev), rdev->bdev,
                mdname(mddev), conf->raid_disks - mddev->degraded);
 }
 
@@ -1706,13 +1704,12 @@ static void print_conf(struct r1conf *conf)
 
        rcu_read_lock();
        for (i = 0; i < conf->raid_disks; i++) {
-               char b[BDEVNAME_SIZE];
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev)
-                       pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+                       pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
                                 i, !test_bit(In_sync, &rdev->flags),
                                 !test_bit(Faulty, &rdev->flags),
-                                bdevname(rdev->bdev,b));
+                                rdev->bdev);
        }
        rcu_read_unlock();
 }
@@ -2347,7 +2344,6 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                }
                d = start;
                while (d != read_disk) {
-                       char b[BDEVNAME_SIZE];
                        if (d==0)
                                d = conf->raid_disks * 2;
                        d--;
@@ -2360,11 +2356,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                                if (r1_sync_page_io(rdev, sect, s,
                                                    conf->tmppage, READ)) {
                                        atomic_add(s, &rdev->corrected_errors);
-                                       pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+                                       pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
                                                mdname(mddev), s,
                                                (unsigned long long)(sect +
                                                                     rdev->data_offset),
-                                               bdevname(rdev->bdev, b));
+                                               rdev->bdev);
                                }
                                rdev_dec_pending(rdev, mddev);
                        } else
index dfa576c..d589f82 100644 (file)
@@ -397,10 +397,9 @@ static void raid10_end_read_request(struct bio *bio)
                /*
                 * oops, read error - keep the refcount on the rdev
                 */
-               char b[BDEVNAME_SIZE];
-               pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
+               pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
                                   mdname(conf->mddev),
-                                  bdevname(rdev->bdev, b),
+                                  rdev->bdev,
                                   (unsigned long long)r10_bio->sector);
                set_bit(R10BIO_ReadError, &r10_bio->state);
                reschedule_retry(r10_bio);
@@ -1187,9 +1186,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
                return;
        }
        if (err_rdev)
-               pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+               pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
                                   mdname(mddev),
-                                  bdevname(rdev->bdev, b),
+                                  rdev->bdev,
                                   (unsigned long long)r10_bio->sector);
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
@@ -1987,7 +1986,6 @@ static int enough(struct r10conf *conf, int ignore)
  */
 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
 {
-       char b[BDEVNAME_SIZE];
        struct r10conf *conf = mddev->private;
        unsigned long flags;
 
@@ -2010,9 +2008,9 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
        set_mask_bits(&mddev->sb_flags, 0,
                      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
        spin_unlock_irqrestore(&conf->device_lock, flags);
-       pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+       pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
                "md/raid10:%s: Operation continuing on %d devices.\n",
-               mdname(mddev), bdevname(rdev->bdev, b),
+               mdname(mddev), rdev->bdev,
                mdname(mddev), conf->geo.raid_disks - mddev->degraded);
 }
 
@@ -2032,13 +2030,12 @@ static void print_conf(struct r10conf *conf)
        /* This is only called with ->reconfix_mutex held, so
         * rcu protection of rdev is not needed */
        for (i = 0; i < conf->geo.raid_disks; i++) {
-               char b[BDEVNAME_SIZE];
                rdev = conf->mirrors[i].rdev;
                if (rdev)
-                       pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+                       pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
                                 i, !test_bit(In_sync, &rdev->flags),
                                 !test_bit(Faulty, &rdev->flags),
-                                bdevname(rdev->bdev,b));
+                                rdev->bdev);
        }
 }
 
@@ -2691,14 +2688,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
        check_decay_read_errors(mddev, rdev);
        atomic_inc(&rdev->read_errors);
        if (atomic_read(&rdev->read_errors) > max_read_errors) {
-               char b[BDEVNAME_SIZE];
-               bdevname(rdev->bdev, b);
-
-               pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
-                         mdname(mddev), b,
+               pr_notice("md/raid10:%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+                         mdname(mddev), rdev->bdev,
                          atomic_read(&rdev->read_errors), max_read_errors);
-               pr_notice("md/raid10:%s: %s: Failing raid device\n",
-                         mdname(mddev), b);
+               pr_notice("md/raid10:%s: %pg: Failing raid device\n",
+                         mdname(mddev), rdev->bdev);
                md_error(mddev, rdev);
                r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
                return;
@@ -2768,8 +2762,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                /* write it back and re-read */
                rcu_read_lock();
                while (sl != r10_bio->read_slot) {
-                       char b[BDEVNAME_SIZE];
-
                        if (sl==0)
                                sl = conf->copies;
                        sl--;
@@ -2788,24 +2780,22 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                                             s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
-                               pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+                               pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
                                          mdname(mddev), s,
                                          (unsigned long long)(
                                                  sect +
                                                  choose_data_offset(r10_bio,
                                                                     rdev)),
-                                         bdevname(rdev->bdev, b));
-                               pr_notice("md/raid10:%s: %s: failing drive\n",
+                                         rdev->bdev);
+                               pr_notice("md/raid10:%s: %pg: failing drive\n",
                                          mdname(mddev),
-                                         bdevname(rdev->bdev, b));
+                                         rdev->bdev);
                        }
                        rdev_dec_pending(rdev, mddev);
                        rcu_read_lock();
                }
                sl = start;
                while (sl != r10_bio->read_slot) {
-                       char b[BDEVNAME_SIZE];
-
                        if (sl==0)
                                sl = conf->copies;
                        sl--;
@@ -2825,23 +2815,23 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
-                               pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
+                               pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
                                       mdname(mddev), s,
                                       (unsigned long long)(
                                               sect +
                                               choose_data_offset(r10_bio, rdev)),
-                                      bdevname(rdev->bdev, b));
-                               pr_notice("md/raid10:%s: %s: failing drive\n",
+                                      rdev->bdev);
+                               pr_notice("md/raid10:%s: %pg: failing drive\n",
                                       mdname(mddev),
-                                      bdevname(rdev->bdev, b));
+                                      rdev->bdev);
                                break;
                        case 1:
-                               pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
+                               pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
                                       mdname(mddev), s,
                                       (unsigned long long)(
                                               sect +
                                               choose_data_offset(r10_bio, rdev)),
-                                      bdevname(rdev->bdev, b));
+                                      rdev->bdev);
                                atomic_add(s, &rdev->corrected_errors);
                        }
 
index 094a404..83c184e 100644 (file)
@@ -3064,11 +3064,10 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 {
        struct request_queue *q = bdev_get_queue(rdev->bdev);
        struct r5l_log *log;
-       char b[BDEVNAME_SIZE];
        int ret;
 
-       pr_debug("md/raid:%s: using device %s as journal\n",
-                mdname(conf->mddev), bdevname(rdev->bdev, b));
+       pr_debug("md/raid:%s: using device %pg as journal\n",
+                mdname(conf->mddev), rdev->bdev);
 
        if (PAGE_SIZE != 4096)
                return -EINVAL;
index 55d065a..973e2e0 100644 (file)
@@ -798,7 +798,6 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
        int data_disks;
        int i;
        int ret = 0;
-       char b[BDEVNAME_SIZE];
        unsigned int pp_size = le32_to_cpu(e->pp_size);
        unsigned int data_size = le32_to_cpu(e->data_size);
 
@@ -894,8 +893,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
                                break;
                        }
 
-                       pr_debug("%s:%*s reading data member disk %s sector %llu\n",
-                                __func__, indent, "", bdevname(rdev->bdev, b),
+                       pr_debug("%s:%*s reading data member disk %pg sector %llu\n",
+                                __func__, indent, "", rdev->bdev,
                                 (unsigned long long)sector);
                        if (!sync_page_io(rdev, sector, block_size, page2,
                                        REQ_OP_READ, 0, false)) {
@@ -942,10 +941,10 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
                                        conf->disks[sh.pd_idx].rdev, 1);
 
                BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
-               pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
+               pr_debug("%s:%*s write parity at sector %llu, disk %pg\n",
                         __func__, indent, "",
                         (unsigned long long)parity_sector,
-                        bdevname(parity_rdev->bdev, b));
+                        parity_rdev->bdev);
                if (!sync_page_io(parity_rdev, parity_sector, block_size,
                                page1, REQ_OP_WRITE, 0, false)) {
                        pr_debug("%s:%*s parity write error!\n", __func__,
@@ -1255,7 +1254,6 @@ void ppl_exit_log(struct r5conf *conf)
 
 static int ppl_validate_rdev(struct md_rdev *rdev)
 {
-       char b[BDEVNAME_SIZE];
        int ppl_data_sectors;
        int ppl_size_new;
 
@@ -1272,8 +1270,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
                                RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
 
        if (ppl_data_sectors <= 0) {
-               pr_warn("md/raid:%s: PPL space too small on %s\n",
-                       mdname(rdev->mddev), bdevname(rdev->bdev, b));
+               pr_warn("md/raid:%s: PPL space too small on %pg\n",
+                       mdname(rdev->mddev), rdev->bdev);
                return -ENOSPC;
        }
 
@@ -1283,16 +1281,16 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
             rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
            (rdev->ppl.sector >= rdev->data_offset &&
             rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
-               pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
-                       mdname(rdev->mddev), bdevname(rdev->bdev, b));
+               pr_warn("md/raid:%s: PPL space overlaps with data on %pg\n",
+                       mdname(rdev->mddev), rdev->bdev);
                return -EINVAL;
        }
 
        if (!rdev->mddev->external &&
            ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
             (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
-               pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
-                       mdname(rdev->mddev), bdevname(rdev->bdev, b));
+               pr_warn("md/raid:%s: PPL space overlaps with superblock on %pg\n",
+                       mdname(rdev->mddev), rdev->bdev);
                return -EINVAL;
        }
 
@@ -1463,14 +1461,13 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
        struct ppl_conf *ppl_conf = conf->log_private;
        struct ppl_log *log;
        int ret = 0;
-       char b[BDEVNAME_SIZE];
 
        if (!rdev)
                return -EINVAL;
 
-       pr_debug("%s: disk: %d operation: %s dev: %s\n",
+       pr_debug("%s: disk: %d operation: %s dev: %pg\n",
                 __func__, rdev->raid_disk, add ? "add" : "remove",
-                bdevname(rdev->bdev, b));
+                rdev->bdev);
 
        if (rdev->raid_disk < 0)
                return 0;
index 39038fa..5d09256 100644 (file)
@@ -2686,7 +2686,6 @@ static void raid5_end_read_request(struct bio * bi)
        struct stripe_head *sh = bi->bi_private;
        struct r5conf *conf = sh->raid_conf;
        int disks = sh->disks, i;
-       char b[BDEVNAME_SIZE];
        struct md_rdev *rdev = NULL;
        sector_t s;
 
@@ -2723,10 +2722,10 @@ static void raid5_end_read_request(struct bio * bi)
                         * any error
                         */
                        pr_info_ratelimited(
-                               "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
+                               "md/raid:%s: read error corrected (%lu sectors at %llu on %pg)\n",
                                mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
                                (unsigned long long)s,
-                               bdevname(rdev->bdev, b));
+                               rdev->bdev);
                        atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
@@ -2743,7 +2742,6 @@ static void raid5_end_read_request(struct bio * bi)
                if (atomic_read(&rdev->read_errors))
                        atomic_set(&rdev->read_errors, 0);
        } else {
-               const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
                int set_bad = 0;
 
@@ -2752,25 +2750,25 @@ static void raid5_end_read_request(struct bio * bi)
                        atomic_inc(&rdev->read_errors);
                if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
                        pr_warn_ratelimited(
-                               "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
+                               "md/raid:%s: read error on replacement device (sector %llu on %pg).\n",
                                mdname(conf->mddev),
                                (unsigned long long)s,
-                               bdn);
+                               rdev->bdev);
                else if (conf->mddev->degraded >= conf->max_degraded) {
                        set_bad = 1;
                        pr_warn_ratelimited(
-                               "md/raid:%s: read error not correctable (sector %llu on %s).\n",
+                               "md/raid:%s: read error not correctable (sector %llu on %pg).\n",
                                mdname(conf->mddev),
                                (unsigned long long)s,
-                               bdn);
+                               rdev->bdev);
                } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
                        /* Oh, no!!! */
                        set_bad = 1;
                        pr_warn_ratelimited(
-                               "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
+                               "md/raid:%s: read error NOT corrected!! (sector %llu on %pg).\n",
                                mdname(conf->mddev),
                                (unsigned long long)s,
-                               bdn);
+                               rdev->bdev);
                } else if (atomic_read(&rdev->read_errors)
                         > conf->max_nr_stripes) {
                        if (!test_bit(Faulty, &rdev->flags)) {
@@ -2778,8 +2776,8 @@ static void raid5_end_read_request(struct bio * bi)
                                    mdname(conf->mddev),
                                    atomic_read(&rdev->read_errors),
                                    conf->max_nr_stripes);
-                               pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
-                                   mdname(conf->mddev), bdn);
+                               pr_warn("md/raid:%s: Too many read errors, failing device %pg.\n",
+                                   mdname(conf->mddev), rdev->bdev);
                        }
                } else
                        retry = 1;
@@ -2891,13 +2889,12 @@ static void raid5_end_write_request(struct bio *bi)
 
 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 {
-       char b[BDEVNAME_SIZE];
        struct r5conf *conf = mddev->private;
        unsigned long flags;
        pr_debug("raid456: error called\n");
 
-       pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
-               mdname(mddev), bdevname(rdev->bdev, b));
+       pr_crit("md/raid:%s: Disk failure on %pg, disabling device.\n",
+               mdname(mddev), rdev->bdev);
 
        spin_lock_irqsave(&conf->device_lock, flags);
        set_bit(Faulty, &rdev->flags);
@@ -7359,9 +7356,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                }
 
                if (test_bit(In_sync, &rdev->flags)) {
-                       char b[BDEVNAME_SIZE];
-                       pr_info("md/raid:%s: device %s operational as raid disk %d\n",
-                               mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+                       pr_info("md/raid:%s: device %pg operational as raid disk %d\n",
+                               mdname(mddev), rdev->bdev, raid_disk);
                } else if (rdev->saved_raid_disk != raid_disk)
                        /* Cannot rely on bitmap to complete recovery */
                        conf->fullsync = 1;
@@ -7877,12 +7873,11 @@ static void print_raid5_conf (struct r5conf *conf)
 
        rcu_read_lock();
        for (i = 0; i < conf->raid_disks; i++) {
-               char b[BDEVNAME_SIZE];
                rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev)
-                       pr_debug(" disk %d, o:%d, dev:%s\n",
+                       pr_debug(" disk %d, o:%d, dev:%pg\n",
                               i, !test_bit(Faulty, &rdev->flags),
-                              bdevname(rdev->bdev, b));
+                              rdev->bdev);
        }
        rcu_read_unlock();
 }
index c12dda7..3155e87 100644 (file)
@@ -773,7 +773,7 @@ static int ati_remote_initialize(struct ati_remote *ati_remote)
 
        /* Set up irq_urb */
        pipe = usb_rcvintpipe(udev, ati_remote->endpoint_in->bEndpointAddress);
-       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(udev, pipe);
        maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
 
        usb_fill_int_urb(ati_remote->irq_urb, udev, pipe, ati_remote->inbuf,
@@ -784,7 +784,7 @@ static int ati_remote_initialize(struct ati_remote *ati_remote)
 
        /* Set up out_urb */
        pipe = usb_sndintpipe(udev, ati_remote->endpoint_out->bEndpointAddress);
-       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(udev, pipe);
        maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
 
        usb_fill_int_urb(ati_remote->out_urb, udev, pipe, ati_remote->outbuf,
index 2dc810f..0834d5f 100644 (file)
@@ -1728,7 +1728,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
                pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress);
        else
                pipe = usb_rcvbulkpipe(dev, ep_in->bEndpointAddress);
-       maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(dev, pipe);
 
        ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL);
        if (!ir)
index 16ba85d..deb8533 100644 (file)
@@ -307,7 +307,7 @@ static int streamzap_probe(struct usb_interface *intf,
        }
 
        pipe = usb_rcvintpipe(usbdev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(usbdev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(usbdev, pipe);
 
        if (maxp == 0) {
                dev_err(&intf->dev, "%s: endpoint Max Packet Size is 0!?!\n",
index 98d0b43..7424b20 100644 (file)
@@ -171,7 +171,7 @@ static int xbox_remote_initialize(struct xbox_remote *xbox_remote,
 
        /* Set up irq_urb */
        pipe = usb_rcvintpipe(udev, endpoint_in->bEndpointAddress);
-       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(udev, pipe);
        maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
 
        usb_fill_int_urb(xbox_remote->irq_urb, udev, pipe, xbox_remote->inbuf,
index 8c2725e..ee04973 100644 (file)
@@ -120,7 +120,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
        pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
                                                          & USB_ENDPOINT_NUMBER_MASK);
 
-       size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+       size = usb_maxpacket(dev->udev, pipe);
        size = size * 15; /* 512 x 8 or 12 or 15 */
 
        dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
index 84602ed..5136e9e 100644 (file)
@@ -340,7 +340,7 @@ static int __tm6000_ir_int_start(struct rc_dev *rc)
                dev->int_in.endp->desc.bEndpointAddress
                & USB_ENDPOINT_NUMBER_MASK);
 
-       size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+       size = usb_maxpacket(dev->udev, pipe);
        dprintk(1, "IR max size: %d\n", size);
 
        ir->int_urb->transfer_buffer = kzalloc(size, GFP_ATOMIC);
index e293f6f..d855a19 100644 (file)
@@ -570,7 +570,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
                               dev->isoc_in.endp->desc.bEndpointAddress &
                               USB_ENDPOINT_NUMBER_MASK);
 
-       size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+       size = usb_maxpacket(dev->udev, pipe);
 
        if (size > dev->isoc_in.maxsize)
                size = dev->isoc_in.maxsize;
index 6c2a421..f305643 100644 (file)
@@ -630,7 +630,7 @@ static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
                dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
 
                /* If we have Power OFF ability, use it, else try restarting */
-               if (pm_power_off) {
+               if (kernel_can_power_off()) {
                        kernel_power_off();
                } else {
                        WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
index 3d5b14c..0be5731 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/mfd/tmio.h>
 #include <linux/mfd/tc6393xb.h>
 #include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
 #include <linux/slab.h>
 
 #define SCR_REVID      0x08            /* b Revision ID        */
 
 struct tc6393xb {
        void __iomem            *scr;
+       struct device           *dev;
 
        struct gpio_chip        gpio;
+       struct gpio_desc        *vcc_on;
 
        struct clk              *clk; /* 3,6 Mhz */
 
@@ -497,17 +501,93 @@ static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
        return 0;
 }
 
-static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
+/*
+ * TC6393XB GPIOs as used on TOSA, are the only user of this chip.
+ * GPIOs 2, 5, 8 and 13 are not connected.
+ */
+#define TOSA_GPIO_TG_ON                        0
+#define TOSA_GPIO_L_MUTE               1
+#define TOSA_GPIO_BL_C20MA             3
+#define TOSA_GPIO_CARD_VCC_ON          4
+#define TOSA_GPIO_CHARGE_OFF           6
+#define TOSA_GPIO_CHARGE_OFF_JC                7
+#define TOSA_GPIO_BAT0_V_ON            9
+#define TOSA_GPIO_BAT1_V_ON            10
+#define TOSA_GPIO_BU_CHRG_ON           11
+#define TOSA_GPIO_BAT_SW_ON            12
+#define TOSA_GPIO_BAT0_TH_ON           14
+#define TOSA_GPIO_BAT1_TH_ON           15
+
+
+GPIO_LOOKUP_SINGLE(tosa_lcd_gpio_lookup, "spi2.0", "tc6393xb",
+                  TOSA_GPIO_TG_ON, "tg #pwr", GPIO_ACTIVE_HIGH);
+
+GPIO_LOOKUP_SINGLE(tosa_lcd_bl_gpio_lookup, "i2c-tos-bl", "tc6393xb",
+                  TOSA_GPIO_BL_C20MA, "backlight", GPIO_ACTIVE_HIGH);
+
+GPIO_LOOKUP_SINGLE(tosa_audio_gpio_lookup, "tosa-audio", "tc6393xb",
+                  TOSA_GPIO_L_MUTE, NULL, GPIO_ACTIVE_HIGH);
+
+static struct gpiod_lookup_table tosa_battery_gpio_lookup = {
+       .dev_id = "wm97xx-battery",
+       .table = {
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF,
+                           "main charge off", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF_JC,
+                           "jacket charge off", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_V_ON,
+                           "main battery", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_V_ON,
+                           "jacket battery", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BU_CHRG_ON,
+                           "backup battery", GPIO_ACTIVE_HIGH),
+               /* BAT1 and BAT0 thermistors appear to be swapped */
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_TH_ON,
+                           "main battery temp", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_TH_ON,
+                           "jacket battery temp", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT_SW_ON,
+                           "battery switch", GPIO_ACTIVE_HIGH),
+               { },
+       },
+};
+
+static struct gpiod_lookup_table *tc6393xb_gpio_lookups[] = {
+       &tosa_lcd_gpio_lookup,
+       &tosa_lcd_bl_gpio_lookup,
+       &tosa_audio_gpio_lookup,
+       &tosa_battery_gpio_lookup,
+};
+
+static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb)
 {
-       tc6393xb->gpio.label = "tc6393xb";
-       tc6393xb->gpio.base = gpio_base;
-       tc6393xb->gpio.ngpio = 16;
-       tc6393xb->gpio.set = tc6393xb_gpio_set;
-       tc6393xb->gpio.get = tc6393xb_gpio_get;
-       tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
-       tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
-
-       return gpiochip_add_data(&tc6393xb->gpio, tc6393xb);
+       struct gpio_chip *gc = &tc6393xb->gpio;
+       struct device *dev = tc6393xb->dev;
+       int ret;
+
+       gc->label = "tc6393xb";
+       gc->base = -1; /* Dynamic allocation */
+       gc->ngpio = 16;
+       gc->set = tc6393xb_gpio_set;
+       gc->get = tc6393xb_gpio_get;
+       gc->direction_input = tc6393xb_gpio_direction_input;
+       gc->direction_output = tc6393xb_gpio_direction_output;
+
+       ret = devm_gpiochip_add_data(dev, gc, tc6393xb);
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to add GPIO chip\n");
+
+       /* Register descriptor look-ups for consumers */
+       gpiod_add_lookup_tables(tc6393xb_gpio_lookups, ARRAY_SIZE(tc6393xb_gpio_lookups));
+
+       /* Request some of our own GPIOs */
+       tc6393xb->vcc_on = gpiochip_request_own_desc(gc, TOSA_GPIO_CARD_VCC_ON, "VCC ON",
+                                                    GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
+       if (IS_ERR(tc6393xb->vcc_on))
+               return dev_err_probe(dev, PTR_ERR(tc6393xb->vcc_on),
+                                    "failed to request VCC ON GPIO\n");
+
+       return 0;
 }
 
 /*--------------------------------------------------------------------------*/
@@ -617,6 +697,7 @@ static int tc6393xb_probe(struct platform_device *dev)
                ret = -ENOMEM;
                goto err_kzalloc;
        }
+       tc6393xb->dev = &dev->dev;
 
        raw_spin_lock_init(&tc6393xb->lock);
 
@@ -676,22 +757,12 @@ static int tc6393xb_probe(struct platform_device *dev)
                        tmio_ioread8(tc6393xb->scr + SCR_REVID),
                        (unsigned long) iomem->start, tc6393xb->irq);
 
-       tc6393xb->gpio.base = -1;
-
-       if (tcpd->gpio_base >= 0) {
-               ret = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
-               if (ret)
-                       goto err_gpio_add;
-       }
+       ret = tc6393xb_register_gpio(tc6393xb);
+       if (ret)
+               goto err_gpio_add;
 
        tc6393xb_attach_irq(dev);
 
-       if (tcpd->setup) {
-               ret = tcpd->setup(dev);
-               if (ret)
-                       goto err_setup;
-       }
-
        tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
        tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
                                                sizeof(*tcpd->nand_data);
@@ -705,15 +776,8 @@ static int tc6393xb_probe(struct platform_device *dev)
        if (!ret)
                return 0;
 
-       if (tcpd->teardown)
-               tcpd->teardown(dev);
-
-err_setup:
        tc6393xb_detach_irq(dev);
-
 err_gpio_add:
-       if (tc6393xb->gpio.base != -1)
-               gpiochip_remove(&tc6393xb->gpio);
        tcpd->disable(dev);
 err_enable:
        clk_disable_unprepare(tc6393xb->clk);
@@ -738,14 +802,8 @@ static int tc6393xb_remove(struct platform_device *dev)
 
        mfd_remove_devices(&dev->dev);
 
-       if (tcpd->teardown)
-               tcpd->teardown(dev);
-
        tc6393xb_detach_irq(dev);
 
-       if (tc6393xb->gpio.base != -1)
-               gpiochip_remove(&tc6393xb->gpio);
-
        ret = tcpd->disable(dev);
        clk_disable_unprepare(tc6393xb->clk);
        iounmap(tc6393xb->scr);
index 92c0611..075f3a3 100644 (file)
@@ -530,11 +530,8 @@ exit_done:
                        }
                        break;
                case OP_SWP:
-                       if (altera_check_stack(stack_ptr, 2, &status)) {
-                               long_tmp = stack[stack_ptr - 2];
-                               stack[stack_ptr - 2] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, 2, &status))
+                               swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
                        break;
                case OP_ADD:
                        if (altera_check_stack(stack_ptr, 2, &status)) {
@@ -912,34 +909,22 @@ exit_done:
                         */
 
                        /* SWP  */
-                       if (altera_check_stack(stack_ptr, 2, &status)) {
-                               long_tmp = stack[stack_ptr - 2];
-                               stack[stack_ptr - 2] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, 2, &status))
+                               swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
 
                        /* SWPN 7 */
                        index = 7 + 1;
-                       if (altera_check_stack(stack_ptr, index, &status)) {
-                               long_tmp = stack[stack_ptr - index];
-                               stack[stack_ptr - index] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, index, &status))
+                               swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
 
                        /* SWP  */
-                       if (altera_check_stack(stack_ptr, 2, &status)) {
-                               long_tmp = stack[stack_ptr - 2];
-                               stack[stack_ptr - 2] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, 2, &status))
+                               swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
 
                        /* SWPN 6 */
                        index = 6 + 1;
-                       if (altera_check_stack(stack_ptr, index, &status)) {
-                               long_tmp = stack[stack_ptr - index];
-                               stack[stack_ptr - index] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, index, &status))
+                               swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
 
                        /* DUPN 8 */
                        index = 8 + 1;
@@ -950,18 +935,12 @@ exit_done:
 
                        /* SWPN 2 */
                        index = 2 + 1;
-                       if (altera_check_stack(stack_ptr, index, &status)) {
-                               long_tmp = stack[stack_ptr - index];
-                               stack[stack_ptr - index] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, index, &status))
+                               swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
 
                        /* SWP  */
-                       if (altera_check_stack(stack_ptr, 2, &status)) {
-                               long_tmp = stack[stack_ptr - 2];
-                               stack[stack_ptr - 2] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, 2, &status))
+                               swap(stack[stack_ptr - 2], stack[stack_ptr - 1]);
 
                        /* DUPN 6 */
                        index = 6 + 1;
@@ -1075,11 +1054,8 @@ exit_done:
                         * to swap with top element
                         */
                        index = (args[0]) + 1;
-                       if (altera_check_stack(stack_ptr, index, &status)) {
-                               long_tmp = stack[stack_ptr - index];
-                               stack[stack_ptr - index] = stack[stack_ptr - 1];
-                               stack[stack_ptr - 1] = long_tmp;
-                       }
+                       if (altera_check_stack(stack_ptr, index, &status))
+                               swap(stack[stack_ptr - index], stack[stack_ptr - 1]);
                        break;
                case OP_DUPN:
                        /*
index 066b9ef..3c08150 100644 (file)
@@ -757,20 +757,19 @@ static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
                                                   u16 q_num,
                                                   u16 msg_id)
 {
-       bool found = false;
-       struct bcm_vk_wkent *entry;
+       struct bcm_vk_wkent *entry = NULL, *iter;
 
        spin_lock(&chan->pendq_lock);
-       list_for_each_entry(entry, &chan->pendq[q_num], node) {
-               if (get_msg_id(&entry->to_v_msg[0]) == msg_id) {
-                       list_del(&entry->node);
-                       found = true;
+       list_for_each_entry(iter, &chan->pendq[q_num], node) {
+               if (get_msg_id(&iter->to_v_msg[0]) == msg_id) {
+                       list_del(&iter->node);
+                       entry = iter;
                        bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
                        break;
                }
        }
        spin_unlock(&chan->pendq_lock);
-       return ((found) ? entry : NULL);
+       return entry;
 }
 
 s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
@@ -1010,16 +1009,14 @@ ssize_t bcm_vk_read(struct file *p_file,
                                         miscdev);
        struct device *dev = &vk->pdev->dev;
        struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
-       struct bcm_vk_wkent *entry = NULL;
+       struct bcm_vk_wkent *entry = NULL, *iter;
        u32 q_num;
        u32 rsp_length;
-       bool found = false;
 
        if (!bcm_vk_drv_access_ok(vk))
                return -EPERM;
 
        dev_dbg(dev, "Buf count %zu\n", count);
-       found = false;
 
        /*
         * search through the pendq on the to_h chan, and return only those
@@ -1028,13 +1025,13 @@ ssize_t bcm_vk_read(struct file *p_file,
         */
        spin_lock(&chan->pendq_lock);
        for (q_num = 0; q_num < chan->q_nr; q_num++) {
-               list_for_each_entry(entry, &chan->pendq[q_num], node) {
-                       if (entry->ctx->idx == ctx->idx) {
+               list_for_each_entry(iter, &chan->pendq[q_num], node) {
+                       if (iter->ctx->idx == ctx->idx) {
                                if (count >=
-                                   (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) {
-                                       list_del(&entry->node);
+                                   (iter->to_h_blks * VK_MSGQ_BLK_SIZE)) {
+                                       list_del(&iter->node);
                                        atomic_dec(&ctx->pend_cnt);
-                                       found = true;
+                                       entry = iter;
                                } else {
                                        /* buffer not big enough */
                                        rc = -EMSGSIZE;
@@ -1046,7 +1043,7 @@ ssize_t bcm_vk_read(struct file *p_file,
 read_loop_exit:
        spin_unlock(&chan->pendq_lock);
 
-       if (found) {
+       if (entry) {
                /* retrieve the passed down msg_id */
                set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
                rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
index 3f514d7..9080f9f 100644 (file)
@@ -317,12 +317,15 @@ static int alcor_pci_probe(struct pci_dev *pdev,
        ret = mfd_add_devices(&pdev->dev, priv->id, alcor_pci_cells,
                        ARRAY_SIZE(alcor_pci_cells), NULL, 0, NULL);
        if (ret < 0)
-               goto error_release_regions;
+               goto error_clear_drvdata;
 
        alcor_pci_aspm_ctrl(priv, 0);
 
        return 0;
 
+error_clear_drvdata:
+       pci_clear_master(pdev);
+       pci_set_drvdata(pdev, NULL);
 error_release_regions:
        pci_release_regions(pdev);
 error_free_ida:
@@ -343,6 +346,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
        ida_free(&alcor_pci_idr, priv->id);
 
        pci_release_regions(pdev);
+       pci_clear_master(pdev);
        pci_set_drvdata(pdev, NULL);
 }
 
index a77585a..749cc5a 100644 (file)
@@ -57,40 +57,6 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
                         0xFF, driving[drive_sel][2]);
 }
 
-static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
-{
-       struct pci_dev *pdev = pcr->pci;
-       u32 reg;
-
-       /* 0x814~0x817 */
-       pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
-
-       if (!rts5261_vendor_setting_valid(reg)) {
-               /* Not support MMC default */
-               pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
-               pcr_dbg(pcr, "skip fetch vendor setting\n");
-               return;
-       }
-
-       if (!rts5261_reg_check_mmc_support(reg))
-               pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
-
-       /* TO do: need to add rtd3 function */
-       pcr->rtd3_en = rts5261_reg_to_rtd3(reg);
-
-       if (rts5261_reg_check_reverse_socket(reg))
-               pcr->flags |= PCR_REVERSE_SOCKET;
-
-       /* 0x724~0x727 */
-       pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
-       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
-
-       pcr->aspm_en = rts5261_reg_to_aspm(reg);
-       pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg);
-       pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
-}
-
 static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
 {
        /* Set relink_time to 0 */
@@ -391,11 +357,11 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr)
 
 }
 
-static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
+static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
 {
        struct pci_dev *pdev = pcr->pci;
-       int retval;
-       u32 lval, i;
+       u32 lval1, lval2, i;
+       u16 setting_reg1, setting_reg2;
        u8 valid, efuse_valid, tmp;
 
        rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
@@ -418,26 +384,70 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
        efuse_valid = ((tmp & 0x0C) >> 2);
        pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
 
-       if (efuse_valid == 0) {
-               retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
-               if (retval != 0)
-                       pcr_dbg(pcr, "read 0x814 DW fail\n");
-               pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
-               /* 0x816 */
-               valid = (u8)((lval >> 16) & 0x03);
-               pcr_dbg(pcr, "0x816: %d\n", valid);
-       }
+       pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2);
+       /* 0x816 */
+       valid = (u8)((lval2 >> 16) & 0x03);
+
        rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
                REG_EFUSE_POR, 0);
        pcr_dbg(pcr, "Disable efuse por!\n");
 
-       pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
-       lval = lval & 0x00FFFFFF;
-       retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval);
-       if (retval != 0)
-               pcr_dbg(pcr, "write config fail\n");
+       if (efuse_valid == 2 || efuse_valid == 3) {
+               if (valid == 3) {
+                       /* Bypass efuse */
+                       setting_reg1 = PCR_SETTING_REG1;
+                       setting_reg2 = PCR_SETTING_REG2;
+               } else {
+                       /* Use efuse data */
+                       setting_reg1 = PCR_SETTING_REG4;
+                       setting_reg2 = PCR_SETTING_REG5;
+               }
+       } else if (efuse_valid == 0) {
+               // default
+               setting_reg1 = PCR_SETTING_REG1;
+               setting_reg2 = PCR_SETTING_REG2;
+       }
+
+       pci_read_config_dword(pdev, setting_reg2, &lval2);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2);
+
+       if (!rts5261_vendor_setting_valid(lval2)) {
+               /* Not support MMC default */
+               pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+               pcr_dbg(pcr, "skip fetch vendor setting\n");
+               return;
+       }
+
+       if (!rts5261_reg_check_mmc_support(lval2))
+               pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
 
-       return retval;
+       pcr->rtd3_en = rts5261_reg_to_rtd3(lval2);
+
+       if (rts5261_reg_check_reverse_socket(lval2))
+               pcr->flags |= PCR_REVERSE_SOCKET;
+
+       pci_read_config_dword(pdev, setting_reg1, &lval1);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
+
+       pcr->aspm_en = rts5261_reg_to_aspm(lval1);
+       pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(lval1);
+       pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(lval1);
+
+       if (setting_reg1 == PCR_SETTING_REG1) {
+               /* store setting */
+               rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF));
+
+               pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1);
+               lval2 = lval2 & 0x00FFFFFF;
+               pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2);
+       }
 }
 
 static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
@@ -636,7 +646,6 @@ static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
 }
 
 static const struct pcr_ops rts5261_pcr_ops = {
-       .fetch_vendor_settings = rtsx5261_fetch_vendor_settings,
        .turn_on_led = rts5261_turn_on_led,
        .turn_off_led = rts5261_turn_off_led,
        .extra_init_hw = rts5261_extra_init_hw,
index 59eda55..1ef9b61 100644 (file)
@@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
        return 0;
 
 out_init_fail:
+       usb_set_intfdata(ucr->pusb_intf, NULL);
        usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
                        ucr->iobuf_dma);
        return ret;
index d80ada8..93ebd17 100644 (file)
@@ -1606,17 +1606,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
                                   struct fastrpc_req_munmap *req)
 {
        struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
-       struct fastrpc_buf *buf, *b;
+       struct fastrpc_buf *buf = NULL, *iter, *b;
        struct fastrpc_munmap_req_msg req_msg;
        struct device *dev = fl->sctx->dev;
        int err;
        u32 sc;
 
        spin_lock(&fl->lock);
-       list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
-               if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+       list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
+               if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
+                       buf = iter;
                        break;
-               buf = NULL;
+               }
        }
        spin_unlock(&fl->lock);
 
@@ -1747,17 +1748,18 @@ err_invoke:
 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
 {
        struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
-       struct fastrpc_map *map = NULL, *m;
+       struct fastrpc_map *map = NULL, *iter, *m;
        struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
        int err = 0;
        u32 sc;
        struct device *dev = fl->sctx->dev;
 
        spin_lock(&fl->lock);
-       list_for_each_entry_safe(map, m, &fl->maps, node) {
-               if ((req->fd < 0 || map->fd == req->fd) && (map->raddr == req->vaddr))
+       list_for_each_entry_safe(iter, m, &fl->maps, node) {
+               if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
+                       map = iter;
                        break;
-               map = NULL;
+               }
        }
 
        spin_unlock(&fl->lock);
index 6ebe3c7..934a3a4 100644 (file)
@@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
                common/command_buffer.o common/hw_queue.o common/irq.o \
                common/sysfs.o common/hwmon.o common/memory.o \
                common/command_submission.o common/firmware_if.o \
-               common/state_dump.o
+               common/state_dump.o common/memory_mgr.o
index a507110..e13b2b3 100644 (file)
@@ -160,24 +160,6 @@ static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
        }
 }
 
-static void cb_release(struct kref *ref)
-{
-       struct hl_device *hdev;
-       struct hl_cb *cb;
-
-       cb = container_of(ref, struct hl_cb, refcount);
-       hdev = cb->hdev;
-
-       hl_debugfs_remove_cb(cb);
-
-       if (cb->is_mmu_mapped)
-               cb_unmap_mem(cb->ctx, cb);
-
-       hl_ctx_put(cb->ctx);
-
-       cb_do_release(hdev, cb);
-}
-
 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
                                        int ctx_id, bool internal_cb)
 {
@@ -238,168 +220,175 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
        return cb;
 }
 
-int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
-                       struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
-                       bool map_cb, u64 *handle)
+struct hl_cb_mmap_mem_alloc_args {
+       struct hl_device *hdev;
+       struct hl_ctx *ctx;
+       u32 cb_size;
+       bool internal_cb;
+       bool map_cb;
+};
+
+static void hl_cb_mmap_mem_release(struct hl_mmap_mem_buf *buf)
 {
-       struct hl_cb *cb;
-       bool alloc_new_cb = true;
-       int rc, ctx_id = ctx->asid;
+       struct hl_cb *cb = buf->private;
 
-       /*
-        * Can't use generic function to check this because of special case
-        * where we create a CB as part of the reset process
-        */
-       if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
-               dev_warn_ratelimited(hdev->dev,
-                       "Device is disabled or in reset. Can't create new CBs\n");
-               rc = -EBUSY;
-               goto out_err;
-       }
+       hl_debugfs_remove_cb(cb);
 
-       if (cb_size > SZ_2M) {
-               dev_err(hdev->dev, "CB size %d must be less than %d\n",
-                       cb_size, SZ_2M);
-               rc = -EINVAL;
-               goto out_err;
-       }
+       if (cb->is_mmu_mapped)
+               cb_unmap_mem(cb->ctx, cb);
+
+       hl_ctx_put(cb->ctx);
 
-       if (!internal_cb) {
+       cb_do_release(cb->hdev, cb);
+}
+
+static int hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
+{
+       struct hl_cb_mmap_mem_alloc_args *cb_args = args;
+       struct hl_cb *cb;
+       int rc, ctx_id = cb_args->ctx->asid;
+       bool alloc_new_cb = true;
+
+       if (!cb_args->internal_cb) {
                /* Minimum allocation must be PAGE SIZE */
-               if (cb_size < PAGE_SIZE)
-                       cb_size = PAGE_SIZE;
+               if (cb_args->cb_size < PAGE_SIZE)
+                       cb_args->cb_size = PAGE_SIZE;
 
                if (ctx_id == HL_KERNEL_ASID_ID &&
-                               cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+                               cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) {
 
-                       spin_lock(&hdev->cb_pool_lock);
-                       if (!list_empty(&hdev->cb_pool)) {
-                               cb = list_first_entry(&hdev->cb_pool,
+                       spin_lock(&cb_args->hdev->cb_pool_lock);
+                       if (!list_empty(&cb_args->hdev->cb_pool)) {
+                               cb = list_first_entry(&cb_args->hdev->cb_pool,
                                                typeof(*cb), pool_list);
                                list_del(&cb->pool_list);
-                               spin_unlock(&hdev->cb_pool_lock);
+                               spin_unlock(&cb_args->hdev->cb_pool_lock);
                                alloc_new_cb = false;
                        } else {
-                               spin_unlock(&hdev->cb_pool_lock);
-                               dev_dbg(hdev->dev, "CB pool is empty\n");
+                               spin_unlock(&cb_args->hdev->cb_pool_lock);
+                               dev_dbg(cb_args->hdev->dev, "CB pool is empty\n");
                        }
                }
        }
 
        if (alloc_new_cb) {
-               cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
-               if (!cb) {
-                       rc = -ENOMEM;
-                       goto out_err;
-               }
+               cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
+               if (!cb)
+                       return -ENOMEM;
        }
 
-       cb->hdev = hdev;
-       cb->ctx = ctx;
-       hl_ctx_get(hdev, cb->ctx);
+       cb->hdev = cb_args->hdev;
+       cb->ctx = cb_args->ctx;
+       cb->buf = buf;
+       cb->buf->mappable_size = cb->size;
+       cb->buf->private = cb;
+
+       hl_ctx_get(cb->ctx);
 
-       if (map_cb) {
+       if (cb_args->map_cb) {
                if (ctx_id == HL_KERNEL_ASID_ID) {
-                       dev_err(hdev->dev,
+                       dev_err(cb_args->hdev->dev,
                                "CB mapping is not supported for kernel context\n");
                        rc = -EINVAL;
                        goto release_cb;
                }
 
-               rc = cb_map_mem(ctx, cb);
+               rc = cb_map_mem(cb_args->ctx, cb);
                if (rc)
                        goto release_cb;
        }
 
-       spin_lock(&mgr->cb_lock);
-       rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
-       spin_unlock(&mgr->cb_lock);
-
-       if (rc < 0) {
-               dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
-               goto unmap_mem;
-       }
-
-       cb->id = (u64) rc;
-
-       kref_init(&cb->refcount);
-       spin_lock_init(&cb->lock);
-
-       /*
-        * idr is 32-bit so we can safely OR it with a mask that is above
-        * 32 bit
-        */
-       *handle = cb->id | HL_MMAP_TYPE_CB;
-       *handle <<= PAGE_SHIFT;
-
        hl_debugfs_add_cb(cb);
 
        return 0;
 
-unmap_mem:
-       if (cb->is_mmu_mapped)
-               cb_unmap_mem(cb->ctx, cb);
 release_cb:
        hl_ctx_put(cb->ctx);
-       cb_do_release(hdev, cb);
-out_err:
-       *handle = 0;
+       cb_do_release(cb_args->hdev, cb);
 
        return rc;
 }
 
-int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
+static int hl_cb_mmap(struct hl_mmap_mem_buf *buf,
+                                     struct vm_area_struct *vma, void *args)
 {
-       struct hl_cb *cb;
-       u32 handle;
-       int rc = 0;
+       struct hl_cb *cb = buf->private;
 
-       /*
-        * handle was given to user to do mmap, I need to shift it back to
-        * how the idr module gave it to me
-        */
-       cb_handle >>= PAGE_SHIFT;
-       handle = (u32) cb_handle;
+       return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
+                                       cb->bus_address, cb->size);
+}
 
-       spin_lock(&mgr->cb_lock);
+static struct hl_mmap_mem_buf_behavior cb_behavior = {
+       .topic = "CB",
+       .mem_id = HL_MMAP_TYPE_CB,
+       .alloc = hl_cb_mmap_mem_alloc,
+       .release = hl_cb_mmap_mem_release,
+       .mmap = hl_cb_mmap,
+};
 
-       cb = idr_find(&mgr->cb_handles, handle);
-       if (cb) {
-               idr_remove(&mgr->cb_handles, handle);
-               spin_unlock(&mgr->cb_lock);
-               kref_put(&cb->refcount, cb_release);
-       } else {
-               spin_unlock(&mgr->cb_lock);
-               dev_err(hdev->dev,
-                       "CB destroy failed, no match to handle 0x%x\n", handle);
-               rc = -EINVAL;
+int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
+                       struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
+                       bool map_cb, u64 *handle)
+{
+       struct hl_cb_mmap_mem_alloc_args args = {
+               .hdev = hdev,
+               .ctx = ctx,
+               .cb_size = cb_size,
+               .internal_cb = internal_cb,
+               .map_cb = map_cb,
+       };
+       struct hl_mmap_mem_buf *buf;
+       int ctx_id = ctx->asid;
+
+       if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
+               dev_warn_ratelimited(hdev->dev,
+                       "Device is disabled or in reset. Can't create new CBs\n");
+               return -EBUSY;
        }
 
-       return rc;
+       if (cb_size > SZ_2M) {
+               dev_err(hdev->dev, "CB size %d must be less than %d\n",
+                       cb_size, SZ_2M);
+               return -EINVAL;
+       }
+
+       buf = hl_mmap_mem_buf_alloc(
+               mmg, &cb_behavior,
+               ctx_id == HL_KERNEL_ASID_ID ? GFP_ATOMIC : GFP_KERNEL, &args);
+       if (!buf)
+               return -ENOMEM;
+
+       *handle = buf->handle;
+
+       return 0;
+}
+
+int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
+{
+       int rc;
+
+       rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
+       if (rc < 0)
+               return rc; /* Invalid handle */
+
+       if (rc == 0)
+               dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle);
+
+       return 0;
 }
 
-static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
-                       u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va)
+static int hl_cb_info(struct hl_mem_mgr *mmg,
+                       u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
 {
        struct hl_vm_va_block *va_block;
        struct hl_cb *cb;
-       u32 handle;
        int rc = 0;
 
-       /* The CB handle was given to user to do mmap, so need to shift it back
-        * to the value which was allocated by the IDR module.
-        */
-       cb_handle >>= PAGE_SHIFT;
-       handle = (u32) cb_handle;
-
-       spin_lock(&mgr->cb_lock);
-
-       cb = idr_find(&mgr->cb_handles, handle);
+       cb = hl_cb_get(mmg, handle);
        if (!cb) {
-               dev_err(hdev->dev,
-                       "CB info failed, no match to handle 0x%x\n", handle);
-               rc = -EINVAL;
-               goto out;
+               dev_err(mmg->dev,
+                       "CB info failed, no match to handle 0x%llx\n", handle);
+               return -EINVAL;
        }
 
        if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
@@ -407,7 +396,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
                if (va_block) {
                        *device_va = va_block->start;
                } else {
-                       dev_err(hdev->dev, "CB is not mapped to the device's MMU\n");
+                       dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
                        rc = -EINVAL;
                        goto out;
                }
@@ -416,7 +405,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
        }
 
 out:
-       spin_unlock(&mgr->cb_lock);
+       hl_cb_put(cb);
        return rc;
 }
 
@@ -444,7 +433,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
                                args->in.cb_size, HL_MAX_CB_SIZE);
                        rc = -EINVAL;
                } else {
-                       rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
+                       rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx,
                                        args->in.cb_size, false,
                                        !!(args->in.flags & HL_CB_FLAGS_MAP),
                                        &handle);
@@ -455,12 +444,12 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
                break;
 
        case HL_CB_OP_DESTROY:
-               rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
+               rc = hl_cb_destroy(&hpriv->mem_mgr,
                                        args->in.cb_handle);
                break;
 
        case HL_CB_OP_INFO:
-               rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
+               rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle,
                                args->in.flags,
                                &usage_cnt,
                                &device_va);
@@ -483,163 +472,20 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
        return rc;
 }
 
-static void cb_vm_close(struct vm_area_struct *vma)
+struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle)
 {
-       struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
-       long new_mmap_size;
-
-       new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
-
-       if (new_mmap_size > 0) {
-               cb->mmap_size = new_mmap_size;
-               return;
-       }
-
-       spin_lock(&cb->lock);
-       cb->mmap = false;
-       spin_unlock(&cb->lock);
-
-       hl_cb_put(cb);
-       vma->vm_private_data = NULL;
-}
-
-static const struct vm_operations_struct cb_vm_ops = {
-       .close = cb_vm_close
-};
-
-int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
-{
-       struct hl_device *hdev = hpriv->hdev;
-       struct hl_cb *cb;
-       u32 handle, user_cb_size;
-       int rc;
-
-       /* We use the page offset to hold the idr and thus we need to clear
-        * it before doing the mmap itself
-        */
-       handle = vma->vm_pgoff;
-       vma->vm_pgoff = 0;
-
-       /* reference was taken here */
-       cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
-       if (!cb) {
-               dev_err(hdev->dev,
-                       "CB mmap failed, no match to handle 0x%x\n", handle);
-               return -EINVAL;
-       }
-
-       /* Validation check */
-       user_cb_size = vma->vm_end - vma->vm_start;
-       if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
-               dev_err(hdev->dev,
-                       "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
-                       vma->vm_end - vma->vm_start, cb->size);
-               rc = -EINVAL;
-               goto put_cb;
-       }
-
-       if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
-                                                       user_cb_size)) {
-               dev_err(hdev->dev,
-                       "user pointer is invalid - 0x%lx\n",
-                       vma->vm_start);
-
-               rc = -EINVAL;
-               goto put_cb;
-       }
-
-       spin_lock(&cb->lock);
+       struct hl_mmap_mem_buf *buf;
 
-       if (cb->mmap) {
-               dev_err(hdev->dev,
-                       "CB mmap failed, CB already mmaped to user\n");
-               rc = -EINVAL;
-               goto release_lock;
-       }
-
-       cb->mmap = true;
-
-       spin_unlock(&cb->lock);
-
-       vma->vm_ops = &cb_vm_ops;
-
-       /*
-        * Note: We're transferring the cb reference to
-        * vma->vm_private_data here.
-        */
-
-       vma->vm_private_data = cb;
-
-       rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
-                                       cb->bus_address, cb->size);
-       if (rc) {
-               spin_lock(&cb->lock);
-               cb->mmap = false;
-               goto release_lock;
-       }
-
-       cb->mmap_size = cb->size;
-       vma->vm_pgoff = handle;
-
-       return 0;
-
-release_lock:
-       spin_unlock(&cb->lock);
-put_cb:
-       hl_cb_put(cb);
-       return rc;
-}
-
-struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
-                       u32 handle)
-{
-       struct hl_cb *cb;
-
-       spin_lock(&mgr->cb_lock);
-       cb = idr_find(&mgr->cb_handles, handle);
-
-       if (!cb) {
-               spin_unlock(&mgr->cb_lock);
-               dev_warn(hdev->dev,
-                       "CB get failed, no match to handle 0x%x\n", handle);
+       buf = hl_mmap_mem_buf_get(mmg, handle);
+       if (!buf)
                return NULL;
-       }
-
-       kref_get(&cb->refcount);
-
-       spin_unlock(&mgr->cb_lock);
-
-       return cb;
+       return buf->private;
 
 }
 
 void hl_cb_put(struct hl_cb *cb)
 {
-       kref_put(&cb->refcount, cb_release);
-}
-
-void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
-{
-       spin_lock_init(&mgr->cb_lock);
-       idr_init(&mgr->cb_handles);
-}
-
-void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
-{
-       struct hl_cb *cb;
-       struct idr *idp;
-       u32 id;
-
-       idp = &mgr->cb_handles;
-
-       idr_for_each_entry(idp, cb, id) {
-               if (kref_put(&cb->refcount, cb_release) != 1)
-                       dev_err(hdev->dev,
-                               "CB %d for CTX ID %d is still alive\n",
-                               id, cb->ctx->asid);
-       }
-
-       idr_destroy(&mgr->cb_handles);
+       hl_mmap_mem_buf_put(cb->buf);
 }
 
 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
@@ -649,7 +495,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
        struct hl_cb *cb;
        int rc;
 
-       rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
+       rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size,
                                internal_cb, false, &cb_handle);
        if (rc) {
                dev_err(hdev->dev,
@@ -657,8 +503,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
                return NULL;
        }
 
-       cb_handle >>= PAGE_SHIFT;
-       cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
+       cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
        /* hl_cb_get should never fail here */
        if (!cb) {
                dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
@@ -669,7 +514,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
        return cb;
 
 destroy_cb:
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle);
 
        return NULL;
 }
index d93ef9f..fb30b7d 100644 (file)
@@ -407,8 +407,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
 
 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
 {
-       bool next_entry_found = false;
-       struct hl_cs *next, *first_cs;
+       struct hl_cs *next = NULL, *iter, *first_cs;
 
        if (!cs_needs_timeout(cs))
                return;
@@ -443,13 +442,13 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
        spin_lock(&hdev->cs_mirror_lock);
 
        /* queue TDR for next CS */
-       list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
-               if (cs_needs_timeout(next)) {
-                       next_entry_found = true;
+       list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
+               if (cs_needs_timeout(iter)) {
+                       next = iter;
                        break;
                }
 
-       if (next_entry_found && !next->tdr_active) {
+       if (next && !next->tdr_active) {
                next->tdr_active = true;
                schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
        }
@@ -736,11 +735,10 @@ static void cs_timedout(struct work_struct *work)
        hdev = cs->ctx->hdev;
 
        /* Save only the first CS timeout parameters */
-       rc = atomic_cmpxchg(&hdev->last_error.cs_write_disable, 0, 1);
+       rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_disable, 0, 1);
        if (!rc) {
-               hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
-               hdev->last_error.cs_timeout_timestamp = ktime_get();
-               hdev->last_error.cs_timeout_seq = cs->sequence;
+               hdev->last_error.cs_timeout.timestamp = ktime_get();
+               hdev->last_error.cs_timeout.seq = cs->sequence;
        }
 
        switch (cs->type) {
@@ -806,7 +804,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
        }
 
        /* increment refcnt for context */
-       hl_ctx_get(hdev, ctx);
+       hl_ctx_get(ctx);
 
        cs->ctx = ctx;
        cs->submitted = false;
@@ -958,9 +956,9 @@ wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
 
        spin_lock_irqsave(&interrupt->wait_list_lock, flags);
        list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
-               if (pend->ts_reg_info.ts_buff) {
+               if (pend->ts_reg_info.buf) {
                        list_del(&pend->wait_list_node);
-                       hl_ts_put(pend->ts_reg_info.ts_buff);
+                       hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
                        hl_cb_put(pend->ts_reg_info.cq_cb);
                } else {
                        pend->fence.error = -EIO;
@@ -1072,17 +1070,14 @@ static int validate_queue_index(struct hl_device *hdev,
 }
 
 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
-                                       struct hl_cb_mgr *cb_mgr,
+                                       struct hl_mem_mgr *mmg,
                                        struct hl_cs_chunk *chunk)
 {
        struct hl_cb *cb;
-       u32 cb_handle;
 
-       cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
-
-       cb = hl_cb_get(hdev, cb_mgr, cb_handle);
+       cb = hl_cb_get(mmg, chunk->cb_handle);
        if (!cb) {
-               dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
+               dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
                return NULL;
        }
 
@@ -1344,7 +1339,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                }
 
                if (is_kernel_allocated_cb) {
-                       cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
+                       cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
                        if (!cb) {
                                atomic64_inc(
                                        &ctx->cs_counters.validation_drop_cnt);
@@ -1772,7 +1767,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
         */
        job->patched_cb = job->user_cb;
        job->job_cb_size = job->user_cb_size;
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
 
        /* increment refcount as for external queues we get completion */
        cs_get(cs);
@@ -1834,7 +1829,7 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
 
        handle->count = count;
 
-       hl_ctx_get(hdev, hpriv->ctx);
+       hl_ctx_get(hpriv->ctx);
        handle->ctx = hpriv->ctx;
        mgr = &hpriv->ctx->sig_mgr;
 
@@ -2528,7 +2523,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
        if (timestamp)
                *timestamp = 0;
 
-       hl_ctx_get(hdev, ctx);
+       hl_ctx_get(ctx);
 
        fence = hl_ctx_get_fence(ctx, seq);
 
@@ -2668,7 +2663,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
 {
        struct multi_cs_completion *mcs_compl;
        struct hl_device *hdev = hpriv->hdev;
-       struct multi_cs_data mcs_data = {0};
+       struct multi_cs_data mcs_data = {};
        union hl_wait_cs_args *args = data;
        struct hl_ctx *ctx = hpriv->ctx;
        struct hl_fence **fence_arr;
@@ -2719,7 +2714,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
        mcs_data.fence_arr = fence_arr;
        mcs_data.arr_len = seq_arr_len;
 
-       hl_ctx_get(hdev, ctx);
+       hl_ctx_get(ctx);
 
        /* wait (with timeout) for the first CS to be completed */
        mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
@@ -2868,12 +2863,13 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
        return 0;
 }
 
-static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff,
+static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
                                        struct hl_cb *cq_cb,
                                        u64 ts_offset, u64 cq_offset, u64 target_value,
                                        spinlock_t *wait_list_lock,
                                        struct hl_user_pending_interrupt **pend)
 {
+       struct hl_ts_buff *ts_buff = buf->private;
        struct hl_user_pending_interrupt *requested_offset_record =
                                (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
                                ts_offset;
@@ -2885,7 +2881,7 @@ static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff,
 
        /* Validate ts_offset not exceeding last max */
        if (requested_offset_record > cb_last) {
-               dev_err(ts_buff->hdev->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
+               dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
                                                                (u64)(uintptr_t)cb_last);
                return -EINVAL;
        }
@@ -2904,18 +2900,21 @@ start_over:
                        list_del(&requested_offset_record->wait_list_node);
                        spin_unlock_irqrestore(wait_list_lock, flags);
 
-                       hl_ts_put(requested_offset_record->ts_reg_info.ts_buff);
+                       hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
                        hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
 
-                       dev_dbg(ts_buff->hdev->dev, "ts node removed from interrupt list now can re-use\n");
+                       dev_dbg(buf->mmg->dev,
+                               "ts node removed from interrupt list now can re-use\n");
                } else {
-                       dev_dbg(ts_buff->hdev->dev, "ts node in middle of irq handling\n");
+                       dev_dbg(buf->mmg->dev,
+                               "ts node in middle of irq handling\n");
 
                        /* irq handling in the middle give it time to finish */
                        spin_unlock_irqrestore(wait_list_lock, flags);
                        usleep_range(1, 10);
                        if (++iter_counter == MAX_TS_ITER_NUM) {
-                               dev_err(ts_buff->hdev->dev, "handling registration interrupt took too long!!\n");
+                               dev_err(buf->mmg->dev,
+                                       "handling registration interrupt took too long!!\n");
                                return -EINVAL;
                        }
 
@@ -2927,7 +2926,7 @@ start_over:
 
        /* Fill up the new registration node info */
        requested_offset_record->ts_reg_info.in_use = 1;
-       requested_offset_record->ts_reg_info.ts_buff = ts_buff;
+       requested_offset_record->ts_reg_info.buf = buf;
        requested_offset_record->ts_reg_info.cq_cb = cq_cb;
        requested_offset_record->ts_reg_info.timestamp_kernel_addr =
                        (u64 *) ts_buff->user_buff_address + ts_offset;
@@ -2937,21 +2936,20 @@ start_over:
 
        *pend = requested_offset_record;
 
-       dev_dbg(ts_buff->hdev->dev, "Found available node in TS kernel CB(0x%llx)\n",
+       dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB(0x%llx)\n",
                                                (u64)(uintptr_t)requested_offset_record);
        return 0;
 }
 
 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
-                               struct hl_cb_mgr *cb_mgr, struct hl_ts_mgr *ts_mgr,
+                               struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
                                u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
                                u64 target_value, struct hl_user_interrupt *interrupt,
                                bool register_ts_record, u64 ts_handle, u64 ts_offset,
                                u32 *status, u64 *timestamp)
 {
-       u32 cq_patched_handle, ts_patched_handle;
        struct hl_user_pending_interrupt *pend;
-       struct hl_ts_buff *ts_buff;
+       struct hl_mmap_mem_buf *buf;
        struct hl_cb *cq_cb;
        unsigned long timeout, flags;
        long completion_rc;
@@ -2959,10 +2957,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
 
        timeout = hl_usecs64_to_jiffies(timeout_us);
 
-       hl_ctx_get(hdev, ctx);
+       hl_ctx_get(ctx);
 
-       cq_patched_handle = lower_32_bits(cq_counters_handle >> PAGE_SHIFT);
-       cq_cb = hl_cb_get(hdev, cb_mgr, cq_patched_handle);
+       cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
        if (!cq_cb) {
                rc = -EINVAL;
                goto put_ctx;
@@ -2971,16 +2968,14 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
        if (register_ts_record) {
                dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
                                        interrupt->interrupt_id, ts_offset, cq_counters_offset);
-
-               ts_patched_handle = lower_32_bits(ts_handle >> PAGE_SHIFT);
-               ts_buff = hl_ts_get(hdev, ts_mgr, ts_patched_handle);
-               if (!ts_buff) {
+               buf = hl_mmap_mem_buf_get(mmg, ts_handle);
+               if (!buf) {
                        rc = -EINVAL;
                        goto put_cq_cb;
                }
 
                /* Find first available record */
-               rc = ts_buff_get_kernel_ts_record(ts_buff, cq_cb, ts_offset,
+               rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
                                                cq_counters_offset, target_value,
                                                &interrupt->wait_list_lock, &pend);
                if (rc)
@@ -3087,7 +3082,7 @@ ts_registration_exit:
        return rc;
 
 put_ts_buff:
-       hl_ts_put(ts_buff);
+       hl_mmap_mem_buf_put(buf);
 put_cq_cb:
        hl_cb_put(cq_cb);
 put_ctx:
@@ -3111,7 +3106,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_
 
        timeout = hl_usecs64_to_jiffies(timeout_us);
 
-       hl_ctx_get(hdev, ctx);
+       hl_ctx_get(ctx);
 
        pend = kzalloc(sizeof(*pend), GFP_KERNEL);
        if (!pend) {
@@ -3249,7 +3244,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
                interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt];
 
        if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
-               rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->cb_mgr, &hpriv->ts_mem_mgr,
+               rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
                                args->in.interrupt_timeout_us, args->in.cq_counters_handle,
                                args->in.cq_counters_offset,
                                args->in.target, interrupt,
index c6360e3..ed2cfd0 100644 (file)
@@ -262,7 +262,7 @@ err_hw_block_mem_fini:
        return rc;
 }
 
-void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
+void hl_ctx_get(struct hl_ctx *ctx)
 {
        kref_get(&ctx->refcount);
 }
@@ -284,7 +284,7 @@ struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
                 * immediately once we find him
                 */
                ctx = hpriv->ctx;
-               hl_ctx_get(hdev, ctx);
+               hl_ctx_get(ctx);
                break;
        }
 
index f184955..c6744bf 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/pci.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
+#include <linux/iommu.h>
 
 #define MMU_ADDR_BUF_SIZE      40
 #define MMU_ASID_BUF_SIZE      10
@@ -125,9 +126,9 @@ static int command_buffers_show(struct seq_file *s, void *data)
                }
                seq_printf(s,
                        "   %03llu        %d    0x%08x      %d          %d          %d\n",
-                       cb->id, cb->ctx->asid, cb->size,
-                       kref_read(&cb->refcount),
-                       cb->mmap, atomic_read(&cb->cs_cnt));
+                       cb->buf->handle, cb->ctx->asid, cb->size,
+                       kref_read(&cb->buf->refcount),
+                       atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
        }
 
        spin_unlock(&dev_entry->cb_spinlock);
@@ -369,8 +370,7 @@ static int userptr_lookup_show(struct seq_file *s, void *data)
                if (dev_entry->userptr_lookup >= userptr->addr &&
                dev_entry->userptr_lookup < userptr->addr + userptr->size) {
                        total_npages = 0;
-                       for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents,
-                                       i) {
+                       for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
                                npages = hl_get_sg_info(sg, &dma_addr);
                                sg_start = userptr->addr +
                                        total_npages * PAGE_SIZE;
@@ -538,6 +538,39 @@ static int engines_show(struct seq_file *s, void *data)
        return 0;
 }
 
+static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+       struct hl_device *hdev = entry->hdev;
+       u64 val = entry->memory_scrub_val;
+       int rc;
+
+       if (!hl_device_operational(hdev, NULL)) {
+               dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
+               return -EIO;
+       }
+
+       mutex_lock(&hdev->fpriv_list_lock);
+       if (hdev->is_compute_ctx_active) {
+               mutex_unlock(&hdev->fpriv_list_lock);
+               dev_err(hdev->dev, "can't scrub dram, context exist\n");
+               return -EBUSY;
+       }
+       hdev->is_in_dram_scrub = true;
+       mutex_unlock(&hdev->fpriv_list_lock);
+
+       rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
+
+       mutex_lock(&hdev->fpriv_list_lock);
+       hdev->is_in_dram_scrub = false;
+       mutex_unlock(&hdev->fpriv_list_lock);
+
+       if (rc)
+               return rc;
+       return count;
+}
+
 static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -647,13 +680,105 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
        return rc;
 }
 
+static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
+               u64 *val, enum debugfs_access_type acc_type, bool *found)
+{
+       size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
+               sizeof(u64) : sizeof(u32);
+       struct pci_mem_region *mem_reg;
+       int i;
+
+       for (i = 0; i < PCI_REGION_NUMBER; i++) {
+               mem_reg = &hdev->pci_mem_region[i];
+               if (!mem_reg->used)
+                       continue;
+               if (addr >= mem_reg->region_base &&
+                       addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
+                       *found = true;
+                       return hdev->asic_funcs->access_dev_mem(hdev, mem_reg, i,
+                               addr, val, acc_type);
+               }
+       }
+       return 0;
+}
+
+static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
+               enum debugfs_access_type acc_type)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u64 offset = prop->device_dma_offset_for_host_access;
+
+       switch (acc_type) {
+       case DEBUGFS_READ32:
+               *val = *(u32 *) phys_to_virt(addr - offset);
+               break;
+       case DEBUGFS_WRITE32:
+               *(u32 *) phys_to_virt(addr - offset) = *val;
+               break;
+       case DEBUGFS_READ64:
+               *val = *(u64 *) phys_to_virt(addr - offset);
+               break;
+       case DEBUGFS_WRITE64:
+               *(u64 *) phys_to_virt(addr - offset) = *val;
+               break;
+       default:
+               dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
+               break;
+       }
+}
+
+static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
+       enum debugfs_access_type acc_type)
+{
+       size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
+               sizeof(u64) : sizeof(u32);
+       u64 host_start = hdev->asic_prop.host_base_address;
+       u64 host_end = hdev->asic_prop.host_end_address;
+       bool user_address, found = false;
+       int rc;
+
+       user_address = hl_is_device_va(hdev, addr);
+       if (user_address) {
+               rc = device_va_to_pa(hdev, addr, acc_size, &addr);
+               if (rc)
+                       return rc;
+       }
+
+       rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
+       if (rc) {
+               dev_err(hdev->dev,
+                       "Failed reading addr %#llx from dev mem (%d)\n",
+                       addr, rc);
+               return rc;
+       }
+
+       if (found)
+               return 0;
+
+       if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
+               rc = -EINVAL;
+               goto err;
+       }
+
+       if (addr >= host_start && addr <= host_end - acc_size) {
+               hl_access_host_mem(hdev, addr, val, acc_type);
+       } else {
+               rc = -EINVAL;
+               goto err;
+       }
+
+       return 0;
+err:
+       dev_err(hdev->dev, "invalid addr %#llx\n", addr);
+       return rc;
+}
+
 static ssize_t hl_data_read32(struct file *f, char __user *buf,
                                        size_t count, loff_t *ppos)
 {
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
-       u64 addr = entry->addr;
-       bool user_address;
+       u64 value64, addr = entry->addr;
        char tmp_buf[32];
        ssize_t rc;
        u32 val;
@@ -666,18 +791,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
        if (*ppos)
                return 0;
 
-       user_address = hl_is_device_va(hdev, addr);
-       if (user_address) {
-               rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
-               if (rc)
-                       return rc;
-       }
-
-       rc = hdev->asic_funcs->debugfs_read32(hdev, addr, user_address, &val);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+       rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
+       if (rc)
                return rc;
-       }
+
+       val = value64; /* downcast back to 32 */
 
        sprintf(tmp_buf, "0x%08x\n", val);
        return simple_read_from_buffer(buf, count, ppos, tmp_buf,
@@ -689,8 +807,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
 {
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
-       u64 addr = entry->addr;
-       bool user_address;
+       u64 value64, addr = entry->addr;
        u32 value;
        ssize_t rc;
 
@@ -703,19 +820,10 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
        if (rc)
                return rc;
 
-       user_address = hl_is_device_va(hdev, addr);
-       if (user_address) {
-               rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
-               if (rc)
-                       return rc;
-       }
-
-       rc = hdev->asic_funcs->debugfs_write32(hdev, addr, user_address, value);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
-                       value, addr);
+       value64 = value;
+       rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
+       if (rc)
                return rc;
-       }
 
        return count;
 }
@@ -726,7 +834,6 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
        u64 addr = entry->addr;
-       bool user_address;
        char tmp_buf[32];
        ssize_t rc;
        u64 val;
@@ -739,18 +846,9 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
        if (*ppos)
                return 0;
 
-       user_address = hl_is_device_va(hdev, addr);
-       if (user_address) {
-               rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
-               if (rc)
-                       return rc;
-       }
-
-       rc = hdev->asic_funcs->debugfs_read64(hdev, addr, user_address, &val);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+       rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
+       if (rc)
                return rc;
-       }
 
        sprintf(tmp_buf, "0x%016llx\n", val);
        return simple_read_from_buffer(buf, count, ppos, tmp_buf,
@@ -763,7 +861,6 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
        u64 addr = entry->addr;
-       bool user_address;
        u64 value;
        ssize_t rc;
 
@@ -776,19 +873,9 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
        if (rc)
                return rc;
 
-       user_address = hl_is_device_va(hdev, addr);
-       if (user_address) {
-               rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
-               if (rc)
-                       return rc;
-       }
-
-       rc = hdev->asic_funcs->debugfs_write64(hdev, addr, user_address, value);
-       if (rc) {
-               dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
-                       value, addr);
+       rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
+       if (rc)
                return rc;
-       }
 
        return count;
 }
@@ -829,23 +916,67 @@ static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
        }
 
        /* Free the previous allocation, if there was any */
-       entry->blob_desc.size = 0;
-       vfree(entry->blob_desc.data);
+       entry->data_dma_blob_desc.size = 0;
+       vfree(entry->data_dma_blob_desc.data);
 
-       entry->blob_desc.data = vmalloc(size);
-       if (!entry->blob_desc.data)
+       entry->data_dma_blob_desc.data = vmalloc(size);
+       if (!entry->data_dma_blob_desc.data)
                return -ENOMEM;
 
        rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
-                                               entry->blob_desc.data);
+                                               entry->data_dma_blob_desc.data);
        if (rc) {
                dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
-               vfree(entry->blob_desc.data);
-               entry->blob_desc.data = NULL;
+               vfree(entry->data_dma_blob_desc.data);
+               entry->data_dma_blob_desc.data = NULL;
+               return -EIO;
+       }
+
+       entry->data_dma_blob_desc.size = size;
+
+       return count;
+}
+
+static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+       struct hl_device *hdev = entry->hdev;
+       u32 size, trig;
+       ssize_t rc;
+
+       if (hdev->reset_info.in_reset) {
+               dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
+               return 0;
+       }
+       rc = kstrtouint_from_user(buf, count, 10, &trig);
+       if (rc)
+               return rc;
+
+       if (trig != 1) {
+               dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
+               return -EINVAL;
+       }
+
+       size = sizeof(struct cpucp_monitor_dump);
+
+       /* Free the previous allocation, if there was any */
+       entry->mon_dump_blob_desc.size = 0;
+       vfree(entry->mon_dump_blob_desc.data);
+
+       entry->mon_dump_blob_desc.data = vmalloc(size);
+       if (!entry->mon_dump_blob_desc.data)
+               return -ENOMEM;
+
+       rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to dump monitors\n");
+               vfree(entry->mon_dump_blob_desc.data);
+               entry->mon_dump_blob_desc.data = NULL;
                return -EIO;
        }
 
-       entry->blob_desc.size = size;
+       entry->mon_dump_blob_desc.size = size;
 
        return count;
 }
@@ -1218,6 +1349,11 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
        return count;
 }
 
+static const struct file_operations hl_mem_scrub_fops = {
+       .owner = THIS_MODULE,
+       .write = hl_memory_scrub,
+};
+
 static const struct file_operations hl_data32b_fops = {
        .owner = THIS_MODULE,
        .read = hl_data_read32,
@@ -1235,6 +1371,11 @@ static const struct file_operations hl_dma_size_fops = {
        .write = hl_dma_size_write
 };
 
+static const struct file_operations hl_monitor_dump_fops = {
+       .owner = THIS_MODULE,
+       .write = hl_monitor_dump_trigger
+};
+
 static const struct file_operations hl_i2c_data_fops = {
        .owner = THIS_MODULE,
        .read = hl_i2c_data_read,
@@ -1350,8 +1491,10 @@ void hl_debugfs_add_device(struct hl_device *hdev)
        if (!dev_entry->entry_arr)
                return;
 
-       dev_entry->blob_desc.size = 0;
-       dev_entry->blob_desc.data = NULL;
+       dev_entry->data_dma_blob_desc.size = 0;
+       dev_entry->data_dma_blob_desc.data = NULL;
+       dev_entry->mon_dump_blob_desc.size = 0;
+       dev_entry->mon_dump_blob_desc.data = NULL;
 
        INIT_LIST_HEAD(&dev_entry->file_list);
        INIT_LIST_HEAD(&dev_entry->cb_list);
@@ -1370,6 +1513,17 @@ void hl_debugfs_add_device(struct hl_device *hdev)
        dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
                                                hl_debug_root);
 
+       debugfs_create_x64("memory_scrub_val",
+                               0644,
+                               dev_entry->root,
+                               &dev_entry->memory_scrub_val);
+
+       debugfs_create_file("memory_scrub",
+                               0200,
+                               dev_entry->root,
+                               dev_entry,
+                               &hl_mem_scrub_fops);
+
        debugfs_create_x64("addr",
                                0644,
                                dev_entry->root,
@@ -1470,7 +1624,18 @@ void hl_debugfs_add_device(struct hl_device *hdev)
        debugfs_create_blob("data_dma",
                                0400,
                                dev_entry->root,
-                               &dev_entry->blob_desc);
+                               &dev_entry->data_dma_blob_desc);
+
+       debugfs_create_file("monitor_dump_trig",
+                               0200,
+                               dev_entry->root,
+                               dev_entry,
+                               &hl_monitor_dump_fops);
+
+       debugfs_create_blob("monitor_dump",
+                               0400,
+                               dev_entry->root,
+                               &dev_entry->mon_dump_blob_desc);
 
        debugfs_create_x8("skip_reset_on_timeout",
                                0644,
@@ -1509,7 +1674,8 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
 
        mutex_destroy(&entry->file_mutex);
 
-       vfree(entry->blob_desc.data);
+       vfree(entry->data_dma_blob_desc.data);
+       vfree(entry->mon_dump_blob_desc.data);
 
        for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
                vfree(entry->state_dump[i]);
index dc9341a..b4f14c6 100644 (file)
 
 #define HL_RESET_DELAY_USEC            10000   /* 10ms */
 
+/*
+ * hl_set_dram_bar- sets the bar to allow later access to address
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @addr: the address the caller wants to access.
+ *
+ * @return: the old BAR base address on success, U64_MAX for failure.
+ *         The caller should set it back to the old address after use.
+ *
+ * In case the bar space does not cover the whole address space,
+ * the bar base address should be set to allow access to a given address.
+ * This function can be called also if the bar doesn't need to be set,
+ * in that case it just won't change the base.
+ */
+static uint64_t hl_set_dram_bar(struct hl_device *hdev, u64 addr)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u64 bar_base_addr;
+
+       bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
+
+       return hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
+}
+
+
+static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
+       enum debugfs_access_type acc_type, enum pci_region region_type)
+{
+       struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
+       u64 old_base, rc;
+
+       if (region_type == PCI_REGION_DRAM) {
+               old_base = hl_set_dram_bar(hdev, addr);
+               if (old_base == U64_MAX)
+                       return -EIO;
+       }
+
+       switch (acc_type) {
+       case DEBUGFS_READ8:
+               *val = readb(hdev->pcie_bar[region->bar_id] +
+                       addr - region->region_base + region->offset_in_bar);
+               break;
+       case DEBUGFS_WRITE8:
+               writeb(*val, hdev->pcie_bar[region->bar_id] +
+                       addr - region->region_base + region->offset_in_bar);
+               break;
+       case DEBUGFS_READ32:
+               *val = readl(hdev->pcie_bar[region->bar_id] +
+                       addr - region->region_base + region->offset_in_bar);
+               break;
+       case DEBUGFS_WRITE32:
+               writel(*val, hdev->pcie_bar[region->bar_id] +
+                       addr - region->region_base + region->offset_in_bar);
+               break;
+       case DEBUGFS_READ64:
+               *val = readq(hdev->pcie_bar[region->bar_id] +
+                       addr - region->region_base + region->offset_in_bar);
+               break;
+       case DEBUGFS_WRITE64:
+               writeq(*val, hdev->pcie_bar[region->bar_id] +
+                       addr - region->region_base + region->offset_in_bar);
+               break;
+       }
+
+       if (region_type == PCI_REGION_DRAM) {
+               rc = hl_set_dram_bar(hdev, old_base);
+               if (rc == U64_MAX)
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       struct scatterlist *sg;
+       int rc, i;
+
+       rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
+       if (rc)
+               return rc;
+
+       /* Shift to the device's base physical address of host memory if necessary */
+       if (prop->device_dma_offset_for_host_access)
+               for_each_sgtable_dma_sg(sgt, sg, i)
+                       sg->dma_address += prop->device_dma_offset_for_host_access;
+
+       return 0;
+}
+
+void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       struct scatterlist *sg;
+       int i;
+
+       /* Cancel the device's base physical address of host memory if necessary */
+       if (prop->device_dma_offset_for_host_access)
+               for_each_sgtable_dma_sg(sgt, sg, i)
+                       sg->dma_address -= prop->device_dma_offset_for_host_access;
+
+       dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
+}
+
+/*
+ * hl_access_cfg_region - access the config region
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @addr: the address to access
+ * @val: the value to write from or read to
+ * @acc_type: the type of access (read/write 64/32)
+ */
+int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
+       enum debugfs_access_type acc_type)
+{
+       struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
+       u32 val_h, val_l;
+
+       if (!IS_ALIGNED(addr, sizeof(u32))) {
+               dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
+               return -EINVAL;
+       }
+
+       switch (acc_type) {
+       case DEBUGFS_READ32:
+               *val = RREG32(addr - cfg_region->region_base);
+               break;
+       case DEBUGFS_WRITE32:
+               WREG32(addr - cfg_region->region_base, *val);
+               break;
+       case DEBUGFS_READ64:
+               val_l = RREG32(addr - cfg_region->region_base);
+               val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
+
+               *val = (((u64) val_h) << 32) | val_l;
+               break;
+       case DEBUGFS_WRITE64:
+               WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
+               WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
+               break;
+       default:
+               dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+/*
+ * hl_access_dev_mem - access device memory
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @region: the memory region the address belongs to
+ * @region_type: the type of the region the address belongs to
+ * @addr: the address to access
+ * @val: the value to write from or read to
+ * @acc_type: the type of access (r/w, 32/64)
+ */
+int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
+               enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type)
+{
+       switch (region_type) {
+       case PCI_REGION_CFG:
+               return hl_access_cfg_region(hdev, addr, val, acc_type);
+       case PCI_REGION_SRAM:
+       case PCI_REGION_DRAM:
+               return hl_access_sram_dram_region(hdev, addr, val, acc_type,
+                       region_type);
+       default:
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
 enum hl_device_status hl_device_status(struct hl_device *hdev)
 {
        enum hl_device_status status;
@@ -107,6 +283,14 @@ static void hpriv_release(struct kref *ref)
        hdev->is_compute_ctx_active = false;
        mutex_unlock(&hdev->fpriv_list_lock);
 
+       hdev->compute_ctx_in_release = 0;
+
+       /* release the eventfd */
+       if (hpriv->notifier_event.eventfd)
+               eventfd_ctx_put(hpriv->notifier_event.eventfd);
+
+       mutex_destroy(&hpriv->notifier_event.lock);
+
        kfree(hpriv);
 }
 
@@ -146,10 +330,11 @@ static int hl_device_release(struct inode *inode, struct file *filp)
         */
        hl_release_pending_user_interrupts(hpriv->hdev);
 
-       hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
-       hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
+       hl_mem_mgr_fini(&hpriv->mem_mgr);
        hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
 
+       hdev->compute_ctx_in_release = 1;
+
        if (!hl_hpriv_put(hpriv))
                dev_notice(hdev->dev,
                        "User process closed FD but device still in use\n");
@@ -176,6 +361,11 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
        list_del(&hpriv->dev_node);
        mutex_unlock(&hdev->fpriv_ctrl_list_lock);
 out:
+       /* release the eventfd */
+       if (hpriv->notifier_event.eventfd)
+               eventfd_ctx_put(hpriv->notifier_event.eventfd);
+
+       mutex_destroy(&hpriv->notifier_event.lock);
        put_pid(hpriv->taskpid);
 
        kfree(hpriv);
@@ -204,17 +394,15 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
        }
 
        vm_pgoff = vma->vm_pgoff;
-       vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
 
        switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
-       case HL_MMAP_TYPE_CB:
-               return hl_cb_mmap(hpriv, vma);
-
        case HL_MMAP_TYPE_BLOCK:
+               vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
                return hl_hw_block_mmap(hpriv, vma);
 
+       case HL_MMAP_TYPE_CB:
        case HL_MMAP_TYPE_TS_BUFF:
-               return hl_ts_mmap(hpriv, vma);
+               return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
        }
 
        return -EINVAL;
@@ -424,18 +612,25 @@ static int device_early_init(struct hl_device *hdev)
                goto free_eq_wq;
        }
 
+       hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
+       if (!hdev->pf_wq) {
+               dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
+               rc = -ENOMEM;
+               goto free_ts_free_wq;
+       }
+
        hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
                                        GFP_KERNEL);
        if (!hdev->hl_chip_info) {
                rc = -ENOMEM;
-               goto free_ts_free_wq;
+               goto free_pf_wq;
        }
 
        rc = hl_mmu_if_set_funcs(hdev);
        if (rc)
                goto free_chip_info;
 
-       hl_cb_mgr_init(&hdev->kernel_cb_mgr);
+       hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
 
        hdev->device_reset_work.wq =
                        create_singlethread_workqueue("hl_device_reset");
@@ -464,9 +659,11 @@ static int device_early_init(struct hl_device *hdev)
        return 0;
 
 free_cb_mgr:
-       hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+       hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
 free_chip_info:
        kfree(hdev->hl_chip_info);
+free_pf_wq:
+       destroy_workqueue(hdev->pf_wq);
 free_ts_free_wq:
        destroy_workqueue(hdev->ts_free_obj_wq);
 free_eq_wq:
@@ -503,10 +700,11 @@ static void device_early_fini(struct hl_device *hdev)
 
        mutex_destroy(&hdev->clk_throttling.lock);
 
-       hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+       hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
 
        kfree(hdev->hl_chip_info);
 
+       destroy_workqueue(hdev->pf_wq);
        destroy_workqueue(hdev->ts_free_obj_wq);
        destroy_workqueue(hdev->eq_wq);
        destroy_workqueue(hdev->device_reset_work.wq);
@@ -703,6 +901,9 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r
        /* Go over all the queues, release all CS and their jobs */
        hl_cs_rollback_all(hdev, skip_wq_flush);
 
+       /* flush the MMU prefetch workqueue */
+       flush_workqueue(hdev->pf_wq);
+
        /* Release all pending user interrupts, each pending user interrupt
         * holds a reference to user context
         */
@@ -847,10 +1048,13 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool
 
                        put_task_struct(task);
                } else {
-                       dev_warn(hdev->dev,
-                               "Can't get task struct for PID so giving up on killing process\n");
-                       mutex_unlock(fd_lock);
-                       return -ETIME;
+                       /*
+                        * If we got here, it means that process was killed from outside the driver
+                        * right after it started looping on fd_list and before get_pid_task, thus
+                        * we don't need to kill it.
+                        */
+                       dev_dbg(hdev->dev,
+                               "Can't get task struct for user process, assuming process was killed from outside the driver\n");
                }
        }
 
@@ -1062,9 +1266,9 @@ do_reset:
                if (hard_reset)
                        dev_info(hdev->dev, "Going to reset device\n");
                else if (reset_upon_device_release)
-                       dev_info(hdev->dev, "Going to reset device after release by user\n");
+                       dev_dbg(hdev->dev, "Going to reset device after release by user\n");
                else
-                       dev_info(hdev->dev, "Going to reset engines of inference device\n");
+                       dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
        }
 
 again:
@@ -1270,7 +1474,10 @@ kill_processes:
 
        hdev->reset_info.needs_reset = false;
 
-       dev_notice(hdev->dev, "Successfully finished resetting the device\n");
+       if (hard_reset)
+               dev_info(hdev->dev, "Successfully finished resetting the device\n");
+       else
+               dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
 
        if (hard_reset) {
                hdev->reset_info.hard_reset_cnt++;
@@ -1323,6 +1530,43 @@ out_err:
        return rc;
 }
 
+static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event)
+{
+       mutex_lock(&notifier_event->lock);
+       notifier_event->events_mask |= event;
+       if (notifier_event->eventfd)
+               eventfd_signal(notifier_event->eventfd, 1);
+
+       mutex_unlock(&notifier_event->lock);
+}
+
+/*
+ * hl_notifier_event_send_all - notify all user processes via eventfd
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @event: the occurred event
+ * Returns 0 for success or an error on failure.
+ */
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event)
+{
+       struct hl_fpriv *hpriv;
+
+       mutex_lock(&hdev->fpriv_list_lock);
+
+       list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
+               hl_notifier_event_send(&hpriv->notifier_event, event);
+
+       mutex_unlock(&hdev->fpriv_list_lock);
+
+       /* control device */
+       mutex_lock(&hdev->fpriv_ctrl_list_lock);
+
+       list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
+               hl_notifier_event_send(&hpriv->notifier_event, event);
+
+       mutex_unlock(&hdev->fpriv_ctrl_list_lock);
+}
+
 /*
  * hl_device_init - main initialization function for habanalabs device
  *
index 3262126..828a36a 100644 (file)
@@ -18,8 +18,9 @@
 static char *extract_fw_ver_from_str(const char *fw_str)
 {
        char *str, *fw_ver, *whitespace;
+       u32 ver_offset;
 
-       fw_ver = kmalloc(16, GFP_KERNEL);
+       fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
        if (!fw_ver)
                return NULL;
 
@@ -29,9 +30,10 @@ static char *extract_fw_ver_from_str(const char *fw_str)
 
        /* Skip the fw- part */
        str += 3;
+       ver_offset = str - fw_str;
 
        /* Copy until the next whitespace */
-       whitespace =  strnstr(str, " ", 15);
+       whitespace =  strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
        if (!whitespace)
                goto free_fw_ver;
 
@@ -819,6 +821,54 @@ out:
        return rc;
 }
 
+int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+       struct cpucp_monitor_dump *mon_dump_cpu_addr;
+       dma_addr_t mon_dump_dma_addr;
+       struct cpucp_packet pkt = {};
+       size_t data_size;
+       __le32 *src_ptr;
+       u32 *dst_ptr;
+       u64 result;
+       int i, rc;
+
+       data_size = sizeof(struct cpucp_monitor_dump);
+       mon_dump_cpu_addr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, data_size,
+                                                                               &mon_dump_dma_addr);
+       if (!mon_dump_cpu_addr) {
+               dev_err(hdev->dev,
+                       "Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
+               return -ENOMEM;
+       }
+
+       memset(mon_dump_cpu_addr, 0, data_size);
+
+       pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
+       pkt.addr = cpu_to_le64(mon_dump_dma_addr);
+       pkt.data_max_size = cpu_to_le32(data_size);
+
+       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+                                                       HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
+               goto out;
+       }
+
+       /* result contains the actual size */
+       src_ptr = (__le32 *) mon_dump_cpu_addr;
+       dst_ptr = data;
+       for (i = 0; i < (data_size / sizeof(u32)); i++) {
+               *dst_ptr = le32_to_cpu(*src_ptr);
+               src_ptr++;
+               dst_ptr++;
+       }
+
+out:
+       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
+
+       return rc;
+}
+
 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
                struct hl_info_pci_counters *counters)
 {
@@ -1539,7 +1589,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
                le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
                status,
                FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
-               hdev->fw_poll_interval_usec,
+               hdev->fw_comms_poll_interval_usec,
                timeout);
 
        if (rc) {
@@ -1909,7 +1959,7 @@ static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
  * @fwc: the firmware component
  * @fw_version: fw component's version string
  */
-static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
+static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
                                        enum hl_fw_component fwc,
                                        const char *fw_version)
 {
@@ -1933,23 +1983,33 @@ static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
                                                VERSION_MAX_LEN);
                if (preboot_ver && preboot_ver != prop->preboot_ver) {
                        strscpy(btl_ver, prop->preboot_ver,
-                               min((int) (preboot_ver - prop->preboot_ver),
-                                                                       31));
+                               min((int) (preboot_ver - prop->preboot_ver), 31));
                        dev_info(hdev->dev, "%s\n", btl_ver);
                }
 
                preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
                if (preboot_ver) {
-                       dev_info(hdev->dev, "preboot version %s\n",
-                                                               preboot_ver);
+                       char major[8];
+                       int rc;
+
+                       dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
+                       sprintf(major, "%.2s", preboot_ver);
                        kfree(preboot_ver);
+
+                       rc = kstrtou32(major, 10, &hdev->fw_major_version);
+                       if (rc) {
+                               dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
+                               return rc;
+                       }
                }
 
                break;
        default:
                dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
-               return;
+               return -EINVAL;
        }
+
+       return 0;
 }
 
 /**
@@ -2121,9 +2181,10 @@ static int hl_fw_dynamic_load_image(struct hl_device *hdev,
                goto release_fw;
 
        /* read preboot version */
-       hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
+       rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
                                fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
-
+       if (rc)
+               goto release_fw;
 
        /* update state according to boot stage */
        if (cur_fwc == FW_COMP_BOOT_FIT) {
@@ -2390,9 +2451,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
                        goto protocol_err;
 
                /* read preboot version */
-               hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
+               return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
                                fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
-               return 0;
        }
 
        /* load boot fit to FW */
index 1edaf6a..b0b0f3f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/hashtable.h>
 #include <linux/debugfs.h>
 #include <linux/rwsem.h>
+#include <linux/eventfd.h>
 #include <linux/bitfield.h>
 #include <linux/genalloc.h>
 #include <linux/sched/signal.h>
 
 #define HL_CPUCP_INFO_TIMEOUT_USEC     10000000 /* 10s */
 #define HL_CPUCP_EEPROM_TIMEOUT_USEC   10000000 /* 10s */
+#define HL_CPUCP_MON_DUMP_TIMEOUT_USEC 10000000 /* 10s */
 
 #define HL_FW_STATUS_POLL_INTERVAL_USEC                10000 /* 10ms */
+#define HL_FW_COMMS_STATUS_PLDM_POLL_INTERVAL_USEC     1000000 /* 1s */
 
 #define HL_PCI_ELBI_TIMEOUT_MSEC       10 /* 10ms */
 
@@ -394,18 +397,8 @@ enum hl_device_hw_state {
  * struct hl_mmu_properties - ASIC specific MMU address translation properties.
  * @start_addr: virtual start address of the memory region.
  * @end_addr: virtual end address of the memory region.
- * @hop0_shift: shift of hop 0 mask.
- * @hop1_shift: shift of hop 1 mask.
- * @hop2_shift: shift of hop 2 mask.
- * @hop3_shift: shift of hop 3 mask.
- * @hop4_shift: shift of hop 4 mask.
- * @hop5_shift: shift of hop 5 mask.
- * @hop0_mask: mask to get the PTE address in hop 0.
- * @hop1_mask: mask to get the PTE address in hop 1.
- * @hop2_mask: mask to get the PTE address in hop 2.
- * @hop3_mask: mask to get the PTE address in hop 3.
- * @hop4_mask: mask to get the PTE address in hop 4.
- * @hop5_mask: mask to get the PTE address in hop 5.
+ * @hop_shifts: array holds HOPs shifts.
+ * @hop_masks: array holds HOPs masks.
  * @last_mask: mask to get the bit indicating this is the last hop.
  * @pgt_size: size for page tables.
  * @page_size: default page size used to allocate memory.
@@ -418,18 +411,8 @@ enum hl_device_hw_state {
 struct hl_mmu_properties {
        u64     start_addr;
        u64     end_addr;
-       u64     hop0_shift;
-       u64     hop1_shift;
-       u64     hop2_shift;
-       u64     hop3_shift;
-       u64     hop4_shift;
-       u64     hop5_shift;
-       u64     hop0_mask;
-       u64     hop1_mask;
-       u64     hop2_mask;
-       u64     hop3_mask;
-       u64     hop4_mask;
-       u64     hop5_mask;
+       u64     hop_shifts[MMU_HOP_MAX];
+       u64     hop_masks[MMU_HOP_MAX];
        u64     last_mask;
        u64     pgt_size;
        u32     page_size;
@@ -486,8 +469,10 @@ struct hl_hints_range {
  *                  the device's MMU.
  * @dram_hints_align_mask: dram va hint addresses alignment mask which is used
  *                  for hints validity check.
- * device_dma_offset_for_host_access: the offset to add to host DMA addresses
- *                                    to enable the device to access them.
+ * @device_dma_offset_for_host_access: the offset to add to host DMA addresses
+ *                                     to enable the device to access them.
+ * @host_base_address: host physical start address for host DMA from device
+ * @host_end_address: host physical end address for host DMA from device
  * @max_freq_value: current max clk frequency.
  * @clk_pll_index: clock PLL index that specify which PLL determines the clock
  *                 we display to the user
@@ -528,6 +513,10 @@ struct hl_hints_range {
  * @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
  *                            status reported by FW, bit description can be
  *                            found in CPU_BOOT_DEV_STS1
+ * @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for
+ *                                      which the property supports_user_set_page_size is true
+ *                                      (i.e. the DRAM supports multiple page sizes), otherwise
+ *                                      it will shall  be equal to dram_page_size.
  * @collective_first_sob: first sync object available for collective use
  * @collective_first_mon: first monitor available for collective use
  * @sync_stream_first_sob: first sync object available for sync stream use
@@ -568,6 +557,7 @@ struct hl_hints_range {
  * @configurable_stop_on_err: is stop-on-error option configurable via debugfs.
  * @set_max_power_on_device_init: true if need to set max power in F/W on device init.
  * @supports_user_set_page_size: true if user can set the allocation page size.
+ * @dma_mask: the dma mask to be set for this device
  */
 struct asic_fixed_properties {
        struct hw_queue_properties      *hw_queues_props;
@@ -599,6 +589,8 @@ struct asic_fixed_properties {
        u64                             cb_va_end_addr;
        u64                             dram_hints_align_mask;
        u64                             device_dma_offset_for_host_access;
+       u64                             host_base_address;
+       u64                             host_end_address;
        u64                             max_freq_value;
        u32                             clk_pll_index;
        u32                             mmu_pgt_size;
@@ -626,6 +618,7 @@ struct asic_fixed_properties {
        u32                             fw_bootfit_cpu_boot_dev_sts1;
        u32                             fw_app_cpu_boot_dev_sts0;
        u32                             fw_app_cpu_boot_dev_sts1;
+       u32                             device_mem_alloc_default_page_size;
        u16                             collective_first_sob;
        u16                             collective_first_mon;
        u16                             sync_stream_first_sob;
@@ -654,6 +647,7 @@ struct asic_fixed_properties {
        u8                              configurable_stop_on_err;
        u8                              set_max_power_on_device_init;
        u8                              supports_user_set_page_size;
+       u8                              dma_mask;
 };
 
 /**
@@ -711,85 +705,102 @@ struct hl_cs_compl {
  */
 
 /**
- * struct hl_cb_mgr - describes a Command Buffer Manager.
- * @cb_lock: protects cb_handles.
- * @cb_handles: an idr to hold all command buffer handles.
- */
-struct hl_cb_mgr {
-       spinlock_t              cb_lock;
-       struct idr              cb_handles; /* protected by cb_lock */
-};
-
-/**
- * struct hl_ts_mgr - describes the timestamp registration memory manager.
- * @ts_lock: protects ts_handles.
- * @ts_handles: an idr to hold all ts bufferes handles.
- */
-struct hl_ts_mgr {
-       spinlock_t              ts_lock;
-       struct idr              ts_handles;
-};
-
-/**
  * struct hl_ts_buff - describes a timestamp buffer.
- * @refcount: reference counter for usage of the buffer.
- * @hdev: pointer to device this buffer belongs to.
- * @mmap: true if the buff is currently mapped to user.
  * @kernel_buff_address: Holds the internal buffer's kernel virtual address.
  * @user_buff_address: Holds the user buffer's kernel virtual address.
- * @id: the buffer ID.
- * @mmap_size: Holds the buffer size that was mmaped.
  * @kernel_buff_size: Holds the internal kernel buffer size.
- * @user_buff_size: Holds the user buffer size.
  */
 struct hl_ts_buff {
-       struct kref             refcount;
-       struct hl_device        *hdev;
-       atomic_t                mmap;
        void                    *kernel_buff_address;
        void                    *user_buff_address;
-       u32                     id;
-       u32                     mmap_size;
        u32                     kernel_buff_size;
-       u32                     user_buff_size;
+};
+
+struct hl_mmap_mem_buf;
+
+/**
+ * struct hl_mem_mgr - describes unified memory manager for mappable memory chunks.
+ * @dev: back pointer to the owning device
+ * @lock: protects handles
+ * @handles: an idr holding all active handles to the memory buffers in the system.
+ */
+struct hl_mem_mgr {
+       struct device *dev;
+       spinlock_t lock;
+       struct idr handles;
+};
+
+/**
+ * struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
+ * @topic: string identifier used for logging
+ * @mem_id: memory type identifier, embedded in the handle and used to identify
+ *          the memory type by handle.
+ * @alloc: callback executed on buffer allocation, shall allocate the memory,
+ *         set it under buffer private, and set mappable size.
+ * @mmap: callback executed on mmap, must map the buffer to vma
+ * @release: callback executed on release, must free the resources used by the buffer
+ */
+struct hl_mmap_mem_buf_behavior {
+       const char *topic;
+       u64 mem_id;
+
+       int (*alloc)(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args);
+       int (*mmap)(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args);
+       void (*release)(struct hl_mmap_mem_buf *buf);
+};
+
+/**
+ * struct hl_mmap_mem_buf - describes a single unified memory buffer
+ * @behavior: buffer behavior
+ * @mmg: back pointer to the unified memory manager
+ * @refcount: reference counter for buffer users
+ * @private: pointer to buffer behavior private data
+ * @mmap: atomic boolean indicating whether or not the buffer is mapped right now
+ * @real_mapped_size: the actual size of buffer mapped, after part of it may be released,
+ *                   may change at runtime.
+ * @mappable_size: the original mappable size of the buffer, does not change after
+ *                 the allocation.
+ * @handle: the buffer id in mmg handles store
+ */
+struct hl_mmap_mem_buf {
+       struct hl_mmap_mem_buf_behavior *behavior;
+       struct hl_mem_mgr *mmg;
+       struct kref refcount;
+       void *private;
+       atomic_t mmap;
+       u64 real_mapped_size;
+       u64 mappable_size;
+       u64 handle;
 };
 
 /**
  * struct hl_cb - describes a Command Buffer.
- * @refcount: reference counter for usage of the CB.
  * @hdev: pointer to device this CB belongs to.
  * @ctx: pointer to the CB owner's context.
- * @lock: spinlock to protect mmap flows.
+ * @buf: back pointer to the parent mappable memory buffer
  * @debugfs_list: node in debugfs list of command buffers.
  * @pool_list: node in pool list of command buffers.
  * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
  *                 the device's MMU.
- * @id: the CB's ID.
  * @kernel_address: Holds the CB's kernel virtual address.
  * @bus_address: Holds the CB's DMA address.
- * @mmap_size: Holds the CB's size that was mmaped.
  * @size: holds the CB's size.
  * @cs_cnt: holds number of CS that this CB participates in.
- * @mmap: true if the CB is currently mmaped to user.
  * @is_pool: true if CB was acquired from the pool, false otherwise.
  * @is_internal: internaly allocated
  * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
  */
 struct hl_cb {
-       struct kref             refcount;
        struct hl_device        *hdev;
        struct hl_ctx           *ctx;
-       spinlock_t              lock;
+       struct hl_mmap_mem_buf  *buf;
        struct list_head        debugfs_list;
        struct list_head        pool_list;
        struct list_head        va_block_list;
-       u64                     id;
        void                    *kernel_address;
        dma_addr_t              bus_address;
-       u32                     mmap_size;
        u32                     size;
        atomic_t                cs_cnt;
-       u8                      mmap;
        u8                      is_pool;
        u8                      is_internal;
        u8                      is_mmu_mapped;
@@ -935,12 +946,12 @@ struct hl_user_interrupt {
  * struct timestamp_reg_free_node - holds the timestamp registration free objects node
  * @free_objects_node: node in the list free_obj_jobs
  * @cq_cb: pointer to cq command buffer to be freed
- * @ts_buff: pointer to timestamp buffer to be freed
+ * @buf: pointer to timestamp buffer to be freed
  */
 struct timestamp_reg_free_node {
        struct list_head        free_objects_node;
        struct hl_cb            *cq_cb;
-       struct hl_ts_buff       *ts_buff;
+       struct hl_mmap_mem_buf  *buf;
 };
 
 /* struct timestamp_reg_work_obj - holds the timestamp registration free objects job
@@ -957,8 +968,8 @@ struct timestamp_reg_work_obj {
 };
 
 /* struct timestamp_reg_info - holds the timestamp registration related data.
- * @ts_buff: pointer to the timestamp buffer which include both user/kernel buffers.
- *           relevant only when doing timestamps records registration.
+ * @buf: pointer to the timestamp buffer which include both user/kernel buffers.
+ *       relevant only when doing timestamps records registration.
  * @cq_cb: pointer to CQ counter CB.
  * @timestamp_kernel_addr: timestamp handle address, where to set timestamp
  *                         relevant only when doing timestamps records
@@ -969,7 +980,7 @@ struct timestamp_reg_work_obj {
  *          allocating records dynamically.
  */
 struct timestamp_reg_info {
-       struct hl_ts_buff       *ts_buff;
+       struct hl_mmap_mem_buf  *buf;
        struct hl_cb            *cq_cb;
        u64                     *timestamp_kernel_addr;
        u8                      in_use;
@@ -1068,6 +1079,15 @@ enum div_select_defs {
        DIV_SEL_DIVIDED_PLL = 3,
 };
 
+enum debugfs_access_type {
+       DEBUGFS_READ8,
+       DEBUGFS_WRITE8,
+       DEBUGFS_READ32,
+       DEBUGFS_WRITE32,
+       DEBUGFS_READ64,
+       DEBUGFS_WRITE64,
+};
+
 enum pci_region {
        PCI_REGION_CFG,
        PCI_REGION_SRAM,
@@ -1229,6 +1249,7 @@ struct fw_load_mgr {
  *                           its implementation is not trivial when the driver
  *                           is loaded in simulation mode (not upstreamed).
  * @scrub_device_mem: Scrub device memory given an address and size
+ * @scrub_device_dram: Scrub the dram memory of the device.
  * @get_int_queue_base: get the internal queue base address.
  * @test_queues: run simple test on all queues for sanity check.
  * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
@@ -1236,18 +1257,14 @@ struct fw_load_mgr {
  * @asic_dma_pool_free: free small DMA allocation from pool.
  * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
  * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
- * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
+ * @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
  * @cs_parser: parse Command Submission.
- * @asic_dma_map_sg: DMA map scatter-gather list.
+ * @asic_dma_map_sgtable: DMA map scatter-gather table.
  * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
  * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
  * @update_eq_ci: update event queue CI.
  * @context_switch: called upon ASID context switch.
  * @restore_phase_topology: clear all SOBs amd MONs.
- * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
- * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
- * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
- * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
  * @debugfs_read_dma: debug interface for reading up to 2MB from the device's
  *                    internal memory via DMA engine.
  * @add_device_attr: add ASIC specific device attributes.
@@ -1257,8 +1274,8 @@ struct fw_load_mgr {
  * @write_pte: write MMU page table entry to DRAM.
  * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
  *                        (L1 only) or hard (L0 & L1) flush.
- * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
- *                              ASID-VA-size mask.
+ * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with ASID-VA-size mask.
+ * @mmu_prefetch_cache_range: pre-fetch specific MMU STLB cache lines with ASID-VA-size mask.
  * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
  * @debug_coresight: perform certain actions on Coresight for debugging.
  * @is_device_idle: return true if device is idle, false otherwise.
@@ -1267,6 +1284,7 @@ struct fw_load_mgr {
  * @hw_queues_unlock: release H/W queues lock.
  * @get_pci_id: retrieve PCI ID.
  * @get_eeprom_data: retrieve EEPROM data from F/W.
+ * @get_monitor_dump: retrieve monitor registers dump from F/W.
  * @send_cpu_message: send message to F/W. If the message is timedout, the
  *                    driver will eventually reset the device. The timeout can
  *                    be determined by the calling function or it can be 0 and
@@ -1289,8 +1307,6 @@ struct fw_load_mgr {
  * @gen_wait_cb: Generate a wait CB.
  * @reset_sob: Reset a SOB.
  * @reset_sob_group: Reset SOB group
- * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
- *                        firmware configuration
  * @get_device_time: Get the device time.
  * @collective_wait_init_cs: Generate collective master/slave packets
  *                           and place them in the relevant cs jobs
@@ -1319,6 +1335,9 @@ struct fw_load_mgr {
  * @get_stream_master_qid_arr: get pointer to stream masters QID array
  * @is_valid_dram_page_size: return true if page size is supported in device
  *                           memory allocation, otherwise false.
+ * @get_valid_dram_page_orders: get valid device memory allocation page orders
+ * @access_dev_mem: access device memory
+ * @set_dram_bar_base: set the base of the DRAM BAR
  */
 struct hl_asic_funcs {
        int (*early_init)(struct hl_device *hdev);
@@ -1342,6 +1361,7 @@ struct hl_asic_funcs {
        void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
                                        void *cpu_addr, dma_addr_t dma_handle);
        int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
+       int (*scrub_device_dram)(struct hl_device *hdev, u64 val);
        void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
                                dma_addr_t *dma_handle, u16 *queue_len);
        int (*test_queues)(struct hl_device *hdev);
@@ -1353,12 +1373,11 @@ struct hl_asic_funcs {
                                size_t size, dma_addr_t *dma_handle);
        void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
                                size_t size, void *vaddr);
-       void (*hl_dma_unmap_sg)(struct hl_device *hdev,
-                               struct scatterlist *sgl, int nents,
+       void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
+                               struct sg_table *sgt,
                                enum dma_data_direction dir);
        int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
-       int (*asic_dma_map_sg)(struct hl_device *hdev,
-                               struct scatterlist *sgl, int nents,
+       int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
                                enum dma_data_direction dir);
        u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
                                        struct sg_table *sgt);
@@ -1369,14 +1388,6 @@ struct hl_asic_funcs {
        void (*update_eq_ci)(struct hl_device *hdev, u32 val);
        int (*context_switch)(struct hl_device *hdev, u32 asid);
        void (*restore_phase_topology)(struct hl_device *hdev);
-       int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
-                               bool user_address, u32 *val);
-       int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
-                               bool user_address, u32 val);
-       int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
-                               bool user_address, u64 *val);
-       int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
-                               bool user_address, u64 val);
        int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
                                void *blob_addr);
        void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
@@ -1391,6 +1402,7 @@ struct hl_asic_funcs {
                                        u32 flags);
        int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
                                u32 flags, u32 asid, u64 va, u64 size);
+       int (*mmu_prefetch_cache_range)(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
        int (*send_heartbeat)(struct hl_device *hdev);
        int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
        bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
@@ -1399,8 +1411,8 @@ struct hl_asic_funcs {
        void (*hw_queues_lock)(struct hl_device *hdev);
        void (*hw_queues_unlock)(struct hl_device *hdev);
        u32 (*get_pci_id)(struct hl_device *hdev);
-       int (*get_eeprom_data)(struct hl_device *hdev, void *data,
-                               size_t max_size);
+       int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
+       int (*get_monitor_dump)(struct hl_device *hdev, void *data);
        int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
                                u16 len, u32 timeout, u64 *result);
        int (*pci_bars_map)(struct hl_device *hdev);
@@ -1421,7 +1433,6 @@ struct hl_asic_funcs {
                        struct hl_gen_wait_properties *prop);
        void (*reset_sob)(struct hl_device *hdev, void *data);
        void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
-       void (*set_dma_mask_from_fw)(struct hl_device *hdev);
        u64 (*get_device_time)(struct hl_device *hdev);
        int (*collective_wait_init_cs)(struct hl_cs *cs);
        int (*collective_wait_create_jobs)(struct hl_device *hdev,
@@ -1445,6 +1456,12 @@ struct hl_asic_funcs {
        void (*set_pci_memory_regions)(struct hl_device *hdev);
        u32* (*get_stream_master_qid_arr)(void);
        bool (*is_valid_dram_page_size)(u32 page_size);
+       int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+                                       u32 page_size, u32 *real_page_size, bool is_dram_addr);
+       void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info);
+       int (*access_dev_mem)(struct hl_device *hdev, struct pci_mem_region *region,
+               enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
+       u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
 };
 
 
@@ -1915,6 +1932,18 @@ struct hl_debug_params {
        bool enable;
 };
 
+/**
+ * struct hl_notifier_event - holds the notifier data structure
+ * @eventfd: the event file descriptor to raise the notifications
+ * @lock: mutex lock to protect the notifier data flows
+ * @events_mask: indicates the bitmap events
+ */
+struct hl_notifier_event {
+       struct eventfd_ctx      *eventfd;
+       struct mutex            lock;
+       u64                     events_mask;
+};
+
 /*
  * FILE PRIVATE STRUCTURE
  */
@@ -1926,25 +1955,25 @@ struct hl_debug_params {
  * @taskpid: current process ID.
  * @ctx: current executing context. TODO: remove for multiple ctx per process
  * @ctx_mgr: context manager to handle multiple context for this FD.
- * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
- * @ts_mem_mgr: timestamp registration manager for alloc/free/map timestamp buffers.
+ * @mem_mgr: manager descriptor for memory exportable via mmap
+ * @notifier_event: notifier eventfd towards user process
  * @debugfs_list: list of relevant ASIC debugfs.
  * @dev_node: node in the device list of file private data
  * @refcount: number of related contexts.
  * @restore_phase_mutex: lock for context switch and restore phase.
  */
 struct hl_fpriv {
-       struct hl_device        *hdev;
-       struct file             *filp;
-       struct pid              *taskpid;
-       struct hl_ctx           *ctx;
-       struct hl_ctx_mgr       ctx_mgr;
-       struct hl_cb_mgr        cb_mgr;
-       struct hl_ts_mgr        ts_mem_mgr;
-       struct list_head        debugfs_list;
-       struct list_head        dev_node;
-       struct kref             refcount;
-       struct mutex            restore_phase_mutex;
+       struct hl_device                *hdev;
+       struct file                     *filp;
+       struct pid                      *taskpid;
+       struct hl_ctx                   *ctx;
+       struct hl_ctx_mgr               ctx_mgr;
+       struct hl_mem_mgr               mem_mgr;
+       struct hl_notifier_event        notifier_event;
+       struct list_head                debugfs_list;
+       struct list_head                dev_node;
+       struct kref                     refcount;
+       struct mutex                    restore_phase_mutex;
 };
 
 
@@ -1992,12 +2021,14 @@ struct hl_debugfs_entry {
  * @userptr_spinlock: protects userptr_list.
  * @ctx_mem_hash_list: list of available contexts with MMU mappings.
  * @ctx_mem_hash_spinlock: protects cb_list.
- * @blob_desc: descriptor of blob
+ * @data_dma_blob_desc: data DMA descriptor of blob.
+ * @mon_dump_blob_desc: monitor dump descriptor of blob.
  * @state_dump: data of the system states in case of a bad cs.
  * @state_dump_sem: protects state_dump.
  * @addr: next address to read/write from/to in read/write32.
  * @mmu_addr: next virtual address to translate to physical address in mmu_show.
  * @userptr_lookup: the target user ptr to look up for on demand.
+ * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
  * @mmu_asid: ASID to use while translating in mmu_show.
  * @state_dump_head: index of the latest state dump
  * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
@@ -2021,12 +2052,14 @@ struct hl_dbg_device_entry {
        spinlock_t                      userptr_spinlock;
        struct list_head                ctx_mem_hash_list;
        spinlock_t                      ctx_mem_hash_spinlock;
-       struct debugfs_blob_wrapper     blob_desc;
+       struct debugfs_blob_wrapper     data_dma_blob_desc;
+       struct debugfs_blob_wrapper     mon_dump_blob_desc;
        char                            *state_dump[HL_STATE_DUMP_HIST_LEN];
        struct rw_semaphore             state_dump_sem;
        u64                             addr;
        u64                             mmu_addr;
        u64                             userptr_lookup;
+       u64                             memory_scrub_val;
        u32                             mmu_asid;
        u32                             state_dump_head;
        u8                              i2c_bus;
@@ -2442,6 +2475,24 @@ struct hl_mmu_funcs {
 };
 
 /**
+ * struct hl_prefetch_work - prefetch work structure handler
+ * @pf_work: actual work struct.
+ * @ctx: compute context.
+ * @va: virtual address to pre-fetch.
+ * @size: pre-fetch size.
+ * @flags: operation flags.
+ * @asid: ASID for maintenance operation.
+ */
+struct hl_prefetch_work {
+       struct work_struct      pf_work;
+       struct hl_ctx           *ctx;
+       u64                     va;
+       u64                     size;
+       u32                     flags;
+       u32                     asid;
+};
+
+/*
  * number of user contexts allowed to call wait_for_multi_cs ioctl in
  * parallel
  */
@@ -2517,37 +2568,50 @@ struct hl_clk_throttle {
 };
 
 /**
- * struct last_error_session_info - info about last session in which CS timeout or
- *                                    razwi error occurred.
- * @open_dev_timestamp: device open timestamp.
- * @cs_timeout_timestamp: CS timeout timestamp.
- * @razwi_timestamp: razwi timestamp.
- * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the
- *                    first (root cause) CS timeout will not be overwritten.
- * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the
- *                       first (root cause) razwi will not be overwritten.
- * @cs_timeout_seq: CS timeout sequence number.
- * @razwi_addr: address that caused razwi.
- * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
- *                     not have engine id it will be set to U16_MAX.
- * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
- *                     engines which one them caused the razwi. In that case, it will contain the
- *                     second possible engine id, otherwise it will be set to U16_MAX.
- * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id.
- * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ * struct cs_timeout_info - info of last CS timeout occurred.
+ * @timestamp: CS timeout timestamp.
+ * @write_disable: if set writing to CS parameters in the structure is disabled so,
+ *                 the first (root cause) CS timeout will not be overwritten.
+ * @seq: CS timeout sequence number.
+ */
+struct cs_timeout_info {
+       ktime_t         timestamp;
+       atomic_t        write_disable;
+       u64             seq;
+};
+
+/**
+ * struct razwi_info - info about last razwi error occurred.
+ * @timestamp: razwi timestamp.
+ * @write_disable: if set writing to razwi parameters in the structure is disabled so the
+ *                 first (root cause) razwi will not be overwritten.
+ * @addr: address that caused razwi.
+ * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
+ *               not have engine id it will be set to U16_MAX.
+ * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
+ *               engines which one them caused the razwi. In that case, it will contain the
+ *               second possible engine id, otherwise it will be set to U16_MAX.
+ * @non_engine_initiator: in case the initiator of the razwi does not have engine id.
+ * @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ */
+struct razwi_info {
+       ktime_t         timestamp;
+       atomic_t        write_disable;
+       u64             addr;
+       u16             engine_id_1;
+       u16             engine_id_2;
+       u8              non_engine_initiator;
+       u8              type;
+};
+
+/**
+ * struct last_error_session_info - info about last session errors occurred.
+ * @cs_timeout: CS timeout error last information.
+ * @razwi: razwi last information.
  */
 struct last_error_session_info {
-       ktime_t         open_dev_timestamp;
-       ktime_t         cs_timeout_timestamp;
-       ktime_t         razwi_timestamp;
-       atomic_t        cs_write_disable;
-       atomic_t        razwi_write_disable;
-       u64             cs_timeout_seq;
-       u64             razwi_addr;
-       u16             razwi_engine_id_1;
-       u16             razwi_engine_id_2;
-       u8              razwi_non_engine_initiator;
-       u8              razwi_type;
+       struct  cs_timeout_info cs_timeout;
+       struct  razwi_info      razwi;
 };
 
 /**
@@ -2614,11 +2678,12 @@ struct hl_reset_info {
  *         context.
  * @eq_wq: work queue of event queue for executing work in process context.
  * @ts_free_obj_wq: work queue for timestamp registration objects release.
+ * @pf_wq: work queue for MMU pre-fetch operations.
  * @kernel_ctx: Kernel driver context structure.
  * @kernel_queues: array of hl_hw_queue.
  * @cs_mirror_list: CS mirror list for TDR.
  * @cs_mirror_lock: protects cs_mirror_list.
- * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs.
+ * @kernel_mem_mgr: memory manager for memory buffers with lifespan of driver.
  * @event_queue: event queue for IRQ from CPU-CP.
  * @dma_pool: DMA pool for small allocations.
  * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
@@ -2656,9 +2721,10 @@ struct hl_reset_info {
  * @state_dump_specs: constants and dictionaries needed to dump system state.
  * @multi_cs_completion: array of multi-CS completion.
  * @clk_throttling: holds information about current/previous clock throttling events
- * @reset_info: holds current device reset information.
  * @last_error: holds information about last session in which CS timeout or razwi error occurred.
+ * @reset_info: holds current device reset information.
  * @stream_master_qid_arr: pointer to array with QIDs of master streams.
+ * @fw_major_version: major version of current loaded preboot
  * @dram_used_mem: current DRAM memory consumption.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2678,6 +2744,9 @@ struct hl_reset_info {
  *                                  session.
  * @open_counter: number of successful device open operations.
  * @fw_poll_interval_usec: FW status poll interval in usec.
+ *                         used for CPU boot status
+ * @fw_comms_poll_interval_usec: FW comms/protocol poll interval in usec.
+ *                                  used for COMMs protocols cmds(COMMS_STS_*)
  * @card_type: Various ASICs have several card types. This indicates the card
  *             type of the current device.
  * @major: habanalabs kernel driver major.
@@ -2686,6 +2755,7 @@ struct hl_reset_info {
  * @id_control: minor of the control device
  * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
  *                    addresses.
+ * @is_in_dram_scrub: true if dram scrub operation is on going.
  * @disabled: is device disabled.
  * @late_init_done: is late init stage was done during initialization.
  * @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -2699,7 +2769,6 @@ struct hl_reset_info {
  *                   huge pages.
  * @init_done: is the initialization of the device done.
  * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
- * @dma_mask: the dma mask that was set for this device
  * @in_debug: whether the device is in a state where the profiling/tracing infrastructure
  *            can be used. This indication is needed because in some ASICs we need to do
  *            specific operations to enable that infrastructure.
@@ -2721,6 +2790,8 @@ struct hl_reset_info {
  *                        cases where Linux was not loaded to device CPU
  * @supports_wait_for_multi_cs: true if wait for multi CS is supported
  * @is_compute_ctx_active: Whether there is an active compute context executing.
+ * @compute_ctx_in_release: true if the current compute context is being released.
+ * @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
  */
 struct hl_device {
        struct pci_dev                  *pdev;
@@ -2742,11 +2813,12 @@ struct hl_device {
        struct workqueue_struct         **cq_wq;
        struct workqueue_struct         *eq_wq;
        struct workqueue_struct         *ts_free_obj_wq;
+       struct workqueue_struct         *pf_wq;
        struct hl_ctx                   *kernel_ctx;
        struct hl_hw_queue              *kernel_queues;
        struct list_head                cs_mirror_list;
        spinlock_t                      cs_mirror_lock;
-       struct hl_cb_mgr                kernel_cb_mgr;
+       struct hl_mem_mgr               kernel_mem_mgr;
        struct hl_eq                    event_queue;
        struct dma_pool                 *dma_pool;
        void                            *cpu_accessible_dma_mem;
@@ -2797,6 +2869,7 @@ struct hl_device {
        struct hl_reset_info            reset_info;
 
        u32                             *stream_master_qid_arr;
+       u32                             fw_major_version;
        atomic64_t                      dram_used_mem;
        u64                             timeout_jiffies;
        u64                             max_power;
@@ -2807,12 +2880,15 @@ struct hl_device {
        u64                             open_counter;
        u64                             fw_poll_interval_usec;
        ktime_t                         last_successful_open_ktime;
+       u64                             fw_comms_poll_interval_usec;
+
        enum cpucp_card_types           card_type;
        u32                             major;
        u32                             high_pll;
        u16                             id;
        u16                             id_control;
        u16                             cpu_pci_msb_addr;
+       u8                              is_in_dram_scrub;
        u8                              disabled;
        u8                              late_init_done;
        u8                              hwmon_initialized;
@@ -2823,7 +2899,6 @@ struct hl_device {
        u8                              pmmu_huge_range;
        u8                              init_done;
        u8                              device_cpu_disabled;
-       u8                              dma_mask;
        u8                              in_debug;
        u8                              cdev_sysfs_created;
        u8                              stop_on_err;
@@ -2839,6 +2914,8 @@ struct hl_device {
        u8                              supports_wait_for_multi_cs;
        u8                              stream_master_qid_arr_size;
        u8                              is_compute_ctx_active;
+       u8                              compute_ctx_in_release;
+       u8                              supports_mmu_prefetch;
 
        /* Parameters for bring-up */
        u64                             nic_ports_mask;
@@ -2971,6 +3048,14 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
        return ((address <= range_end_address) && (range_start_address <= end_address));
 }
 
+uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
+void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
+                               enum dma_data_direction dir);
+int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
+       enum debugfs_access_type acc_type);
+int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
+               enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
 int hl_device_open(struct inode *inode, struct file *filp);
 int hl_device_open_ctrl(struct inode *inode, struct file *filp);
 bool hl_device_operational(struct hl_device *hdev,
@@ -3013,7 +3098,7 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
 void hl_ctx_do_release(struct kref *ref);
-void hl_ctx_get(struct hl_device *hdev,        struct hl_ctx *ctx);
+void hl_ctx_get(struct hl_ctx *ctx);
 int hl_ctx_put(struct hl_ctx *ctx);
 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
@@ -3034,23 +3119,21 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
 int hl_build_hwmon_channel_info(struct hl_device *hdev,
                struct cpucp_sensor *sensors_arr);
 
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event);
+
 int hl_sysfs_init(struct hl_device *hdev);
 void hl_sysfs_fini(struct hl_device *hdev);
 
 int hl_hwmon_init(struct hl_device *hdev);
 void hl_hwmon_fini(struct hl_device *hdev);
 
-int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
                        struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
                        bool map_cb, u64 *handle);
-int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
-int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
+int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle);
 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
-struct hl_cb *hl_cb_get(struct hl_device *hdev,        struct hl_cb_mgr *mgr,
-                       u32 handle);
+struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle);
 void hl_cb_put(struct hl_cb *cb);
-void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
-void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
                                        bool internal_cb);
 int hl_cb_pool_init(struct hl_device *hdev);
@@ -3104,6 +3187,8 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx);
 void hl_mmu_ctx_fini(struct hl_ctx *ctx);
 int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                u32 page_size, bool flush_pte);
+int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+                               u32 page_size, u32 *real_page_size, bool is_dram_addr);
 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
                bool flush_pte);
 int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
@@ -3112,6 +3197,7 @@ int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
 int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
 int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
                                        u32 flags, u32 asid, u64 va, u64 size);
+int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
 u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
 u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
                                        u8 hop_idx, u64 hop_addr, u64 virt_addr);
@@ -3149,6 +3235,7 @@ int hl_fw_cpucp_handshake(struct hl_device *hdev,
                                u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
                                u32 boot_err1_reg);
 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data);
 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
                struct hl_info_pci_counters *counters);
 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
@@ -3224,11 +3311,19 @@ __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
                                        const char *format, ...);
 char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
 const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
-void hl_ts_mgr_init(struct hl_ts_mgr *mgr);
-void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr);
-int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
-struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr, u32 handle);
-void hl_ts_put(struct hl_ts_buff *buff);
+
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
+int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
+                   void *args);
+struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
+                                                  u64 handle);
+int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle);
+int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf);
+struct hl_mmap_mem_buf *
+hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
+                     struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
+                     void *args);
 
 #ifdef CONFIG_DEBUG_FS
 
index ca404ed..37edb69 100644 (file)
@@ -134,13 +134,14 @@ int hl_device_open(struct inode *inode, struct file *filp)
        hpriv->hdev = hdev;
        filp->private_data = hpriv;
        hpriv->filp = filp;
+
+       mutex_init(&hpriv->notifier_event.lock);
        mutex_init(&hpriv->restore_phase_mutex);
        kref_init(&hpriv->refcount);
        nonseekable_open(inode, filp);
 
-       hl_cb_mgr_init(&hpriv->cb_mgr);
        hl_ctx_mgr_init(&hpriv->ctx_mgr);
-       hl_ts_mgr_init(&hpriv->ts_mem_mgr);
+       hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr);
 
        hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
 
@@ -150,7 +151,28 @@ int hl_device_open(struct inode *inode, struct file *filp)
                dev_err_ratelimited(hdev->dev,
                        "Can't open %s because it is %s\n",
                        dev_name(hdev->dev), hdev->status[status]);
-               rc = -EPERM;
+
+               if (status == HL_DEVICE_STATUS_IN_RESET)
+                       rc = -EAGAIN;
+               else
+                       rc = -EPERM;
+
+               goto out_err;
+       }
+
+       if (hdev->is_in_dram_scrub) {
+               dev_dbg_ratelimited(hdev->dev,
+                       "Can't open %s during dram scrub\n",
+                       dev_name(hdev->dev));
+               rc = -EAGAIN;
+               goto out_err;
+       }
+
+       if (hdev->compute_ctx_in_release) {
+               dev_dbg_ratelimited(hdev->dev,
+                       "Can't open %s because another user is still releasing it\n",
+                       dev_name(hdev->dev));
+               rc = -EAGAIN;
                goto out_err;
        }
 
@@ -173,8 +195,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
 
        hl_debugfs_add_file(hpriv);
 
-       atomic_set(&hdev->last_error.cs_write_disable, 0);
-       atomic_set(&hdev->last_error.razwi_write_disable, 0);
+       atomic_set(&hdev->last_error.cs_timeout.write_disable, 0);
+       atomic_set(&hdev->last_error.razwi.write_disable, 0);
 
        hdev->open_counter++;
        hdev->last_successful_open_jif = jiffies;
@@ -184,11 +206,11 @@ int hl_device_open(struct inode *inode, struct file *filp)
 
 out_err:
        mutex_unlock(&hdev->fpriv_list_lock);
-       hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
-       hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
+       hl_mem_mgr_fini(&hpriv->mem_mgr);
        hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
        filp->private_data = NULL;
        mutex_destroy(&hpriv->restore_phase_mutex);
+       mutex_destroy(&hpriv->notifier_event.lock);
        put_pid(hpriv->taskpid);
 
        kfree(hpriv);
@@ -222,9 +244,11 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
        hpriv->hdev = hdev;
        filp->private_data = hpriv;
        hpriv->filp = filp;
+
+       mutex_init(&hpriv->notifier_event.lock);
        nonseekable_open(inode, filp);
 
-       hpriv->taskpid = find_get_pid(current->pid);
+       hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
 
        mutex_lock(&hdev->fpriv_ctrl_list_lock);
 
@@ -288,6 +312,7 @@ static int fixup_device_params(struct hl_device *hdev)
        hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
 
        hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
+       hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
 
        hdev->stop_on_err = true;
        hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
@@ -296,9 +321,6 @@ static int fixup_device_params(struct hl_device *hdev)
        /* Enable only after the initialization of the device */
        hdev->disabled = true;
 
-       /* Set default DMA mask to 32 bits */
-       hdev->dma_mask = 32;
-
        return 0;
 }
 
index c13a3c2..c7864d6 100644 (file)
@@ -76,6 +76,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
        if (hw_ip.dram_size > PAGE_SIZE)
                hw_ip.dram_enabled = 1;
        hw_ip.dram_page_size = prop->dram_page_size;
+       hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
        hw_ip.num_of_events = prop->num_of_events;
 
        memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
@@ -115,6 +116,23 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate,
        return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
 }
 
+static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+       u32 max_size = args->return_size;
+       u64 events_mask;
+       void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+       if ((max_size < sizeof(u64)) || (!out))
+               return -EINVAL;
+
+       mutex_lock(&hpriv->notifier_event.lock);
+       events_mask = hpriv->notifier_event.events_mask;
+       hpriv->notifier_event.events_mask = 0;
+       mutex_unlock(&hpriv->notifier_event.lock);
+
+       return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
+}
+
 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
 {
        struct hl_device *hdev = hpriv->hdev;
@@ -497,6 +515,8 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
        open_stats_info.last_open_period_ms = jiffies64_to_msecs(
                hdev->last_open_session_duration_jif);
        open_stats_info.open_counter = hdev->open_counter;
+       open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
+       open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
 
        return copy_to_user(out, &open_stats_info,
                min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
@@ -549,7 +569,7 @@ static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *a
        if ((!max_size) || (!out))
                return -EINVAL;
 
-       info.timestamp = ktime_to_ns(hdev->last_error.open_dev_timestamp);
+       info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
 
        return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
 }
@@ -564,8 +584,8 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
        if ((!max_size) || (!out))
                return -EINVAL;
 
-       info.seq = hdev->last_error.cs_timeout_seq;
-       info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout_timestamp);
+       info.seq = hdev->last_error.cs_timeout.seq;
+       info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout.timestamp);
 
        return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
 }
@@ -580,16 +600,74 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
        if ((!max_size) || (!out))
                return -EINVAL;
 
-       info.timestamp = ktime_to_ns(hdev->last_error.razwi_timestamp);
-       info.addr = hdev->last_error.razwi_addr;
-       info.engine_id_1 = hdev->last_error.razwi_engine_id_1;
-       info.engine_id_2 = hdev->last_error.razwi_engine_id_2;
-       info.no_engine_id = hdev->last_error.razwi_non_engine_initiator;
-       info.error_type = hdev->last_error.razwi_type;
+       info.timestamp = ktime_to_ns(hdev->last_error.razwi.timestamp);
+       info.addr = hdev->last_error.razwi.addr;
+       info.engine_id_1 = hdev->last_error.razwi.engine_id_1;
+       info.engine_id_2 = hdev->last_error.razwi.engine_id_2;
+       info.no_engine_id = hdev->last_error.razwi.non_engine_initiator;
+       info.error_type = hdev->last_error.razwi.type;
+
+       return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+       void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+       struct hl_info_dev_memalloc_page_sizes info = {0};
+       struct hl_device *hdev = hpriv->hdev;
+       u32 max_size = args->return_size;
+
+       if ((!max_size) || (!out))
+               return -EINVAL;
+
+       /*
+        * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
+        * pages (unlike some of the ASICs before supporting multiple page sizes).
+        * For this reason for all ASICs that not support multiple page size the function will
+        * return an empty bitmask indicating that multiple page sizes is not supported.
+        */
+       hdev->asic_funcs->get_valid_dram_page_orders(&info);
 
        return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
 }
 
+static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+       int rc;
+
+       /* check if there is already a registered on that process */
+       mutex_lock(&hpriv->notifier_event.lock);
+       if (hpriv->notifier_event.eventfd) {
+               mutex_unlock(&hpriv->notifier_event.lock);
+               return -EINVAL;
+       }
+
+       hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
+       if (IS_ERR(hpriv->notifier_event.eventfd)) {
+               rc = PTR_ERR(hpriv->notifier_event.eventfd);
+               hpriv->notifier_event.eventfd = NULL;
+               mutex_unlock(&hpriv->notifier_event.lock);
+               return rc;
+       }
+
+       mutex_unlock(&hpriv->notifier_event.lock);
+       return 0;
+}
+
+static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+       mutex_lock(&hpriv->notifier_event.lock);
+       if (!hpriv->notifier_event.eventfd) {
+               mutex_unlock(&hpriv->notifier_event.lock);
+               return -EINVAL;
+       }
+
+       eventfd_ctx_put(hpriv->notifier_event.eventfd);
+       hpriv->notifier_event.eventfd = NULL;
+       mutex_unlock(&hpriv->notifier_event.lock);
+       return 0;
+}
+
 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
                                struct device *dev)
 {
@@ -640,6 +718,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
        case HL_INFO_RAZWI_EVENT:
                return razwi_info(hpriv, args);
 
+       case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
+               return dev_mem_alloc_page_sizes_info(hpriv, args);
+
+       case HL_INFO_GET_EVENTS:
+               return events_info(hpriv, args);
+
        default:
                break;
        }
@@ -690,6 +774,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
        case HL_INFO_DRAM_PENDING_ROWS:
                return dram_pending_rows_info(hpriv, args);
 
+       case HL_INFO_REGISTER_EVENTFD:
+               return eventfd_register(hpriv, args);
+
+       case HL_INFO_UNREGISTER_EVENTFD:
+               return eventfd_unregister(hpriv, args);
+
        default:
                dev_err(dev, "Invalid request %d\n", args->op);
                rc = -EINVAL;
index e2bc128..8500e15 100644 (file)
@@ -152,11 +152,11 @@ static void hl_ts_free_objects(struct work_struct *work)
        struct hl_device *hdev = job->hdev;
 
        list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) {
-               dev_dbg(hdev->dev, "About to put refcount to ts_buff (%p) cq_cb(%p)\n",
-                                       free_obj->ts_buff,
+               dev_dbg(hdev->dev, "About to put refcount to buf (%p) cq_cb(%p)\n",
+                                       free_obj->buf,
                                        free_obj->cq_cb);
 
-               hl_ts_put(free_obj->ts_buff);
+               hl_mmap_mem_buf_put(free_obj->buf);
                hl_cb_put(free_obj->cq_cb);
                kfree(free_obj);
        }
@@ -210,7 +210,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
        /* Putting the refcount for ts_buff and cq_cb objects will be handled
         * in workqueue context, just add job to free_list.
         */
-       free_node->ts_buff = pend->ts_reg_info.ts_buff;
+       free_node->buf = pend->ts_reg_info.buf;
        free_node->cq_cb = pend->ts_reg_info.cq_cb;
        list_add(&free_node->free_objects_node, *free_list);
 
@@ -244,7 +244,7 @@ static void handle_user_cq(struct hl_device *hdev,
        list_for_each_entry_safe(pend, temp_pend, &user_cq->wait_list_head, wait_list_node) {
                if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
                                !pend->cq_kernel_addr) {
-                       if (pend->ts_reg_info.ts_buff) {
+                       if (pend->ts_reg_info.buf) {
                                if (!reg_node_handle_fail) {
                                        rc = handle_registration_node(hdev, pend,
                                                                        &ts_reg_free_list_head);
@@ -282,10 +282,6 @@ irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
        struct hl_user_interrupt *user_cq = arg;
        struct hl_device *hdev = user_cq->hdev;
 
-       dev_dbg(hdev->dev,
-               "got user completion interrupt id %u",
-               user_cq->interrupt_id);
-
        /* Handle user cq interrupts registered on all interrupts */
        handle_user_cq(hdev, &hdev->common_user_interrupt);
 
index a13506d..663dd7e 100644 (file)
@@ -41,7 +41,7 @@ static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u
                        return -EINVAL;
                }
        } else {
-               psize = hdev->asic_prop.dram_page_size;
+               psize = prop->device_mem_alloc_default_page_size;
        }
 
        *page_size = psize;
@@ -117,7 +117,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                        paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
                if (!paddr) {
                        dev_err(hdev->dev,
-                               "failed to allocate %llu contiguous pages with total size of %llu\n",
+                               "Cannot allocate %llu contiguous pages with total size of %llu\n",
                                num_pgs, total_size);
                        return -ENOMEM;
                }
@@ -156,9 +156,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                        else
                                phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
                                                                        page_size);
+
                        if (!phys_pg_pack->pages[i]) {
                                dev_err(hdev->dev,
-                                       "Failed to allocate device memory (out of memory)\n");
+                                       "Cannot allocate device memory (out of memory)\n");
                                rc = -ENOMEM;
                                goto page_err;
                        }
@@ -237,19 +238,18 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
                goto pin_err;
        }
 
-       rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
-                                       userptr->sgt->nents, DMA_BIDIRECTIONAL);
-       if (rc) {
-               dev_err(hdev->dev, "failed to map sgt with DMA region\n");
-               goto dma_map_err;
-       }
-
        userptr->dma_mapped = true;
        userptr->dir = DMA_BIDIRECTIONAL;
        userptr->vm_type = VM_TYPE_USERPTR;
 
        *p_userptr = userptr;
 
+       rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
+       if (rc) {
+               dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+               goto dma_map_err;
+       }
+
        return 0;
 
 dma_map_err:
@@ -900,7 +900,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
         * consecutive block.
         */
        total_npages = 0;
-       for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+       for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
                npages = hl_get_sg_info(sg, &dma_addr);
 
                total_npages += npages;
@@ -929,7 +929,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
        phys_pg_pack->total_size = total_npages * page_size;
 
        j = 0;
-       for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+       for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
                npages = hl_get_sg_info(sg, &dma_addr);
 
                /* align down to physical page size and save the offset */
@@ -1102,21 +1102,24 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
  *   map a device virtual block to this pages and return the start address of
  *   this block.
  */
-static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
-               u64 *device_addr)
+static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
 {
-       struct hl_device *hdev = ctx->hdev;
-       struct hl_vm *vm = &hdev->vm;
        struct hl_vm_phys_pg_pack *phys_pg_pack;
+       enum hl_va_range_type va_range_type = 0;
+       struct hl_device *hdev = ctx->hdev;
        struct hl_userptr *userptr = NULL;
+       u32 handle = 0, va_block_align;
        struct hl_vm_hash_node *hnode;
+       struct hl_vm *vm = &hdev->vm;
        struct hl_va_range *va_range;
-       enum vm_type *vm_type;
+       bool is_userptr, do_prefetch;
        u64 ret_vaddr, hint_addr;
-       u32 handle = 0, va_block_align;
+       enum vm_type *vm_type;
        int rc;
-       bool is_userptr = args->flags & HL_MEM_USERPTR;
-       enum hl_va_range_type va_range_type = 0;
+
+       /* set map flags */
+       is_userptr = args->flags & HL_MEM_USERPTR;
+       do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
 
        /* Assume failure */
        *device_addr = 0;
@@ -1241,19 +1244,27 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
 
        rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
        if (rc) {
-               mutex_unlock(&ctx->mmu_lock);
-               dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
-                               handle);
+               dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
                goto map_err;
        }
 
        rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
                                ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+       if (rc)
+               goto map_err;
 
        mutex_unlock(&ctx->mmu_lock);
 
-       if (rc)
-               goto map_err;
+       /*
+        * prefetch is done upon user's request. it is performed in WQ as and so can
+        * be outside the MMU lock. the operation itself is already protected by the mmu lock
+        */
+       if (do_prefetch) {
+               rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
+                                                       phys_pg_pack->total_size);
+               if (rc)
+                       goto map_err;
+       }
 
        ret_vaddr += phys_pg_pack->offset;
 
@@ -1272,6 +1283,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
        return rc;
 
 map_err:
+       mutex_unlock(&ctx->mmu_lock);
+
        if (add_va_block(hdev, va_range, ret_vaddr,
                                ret_vaddr + phys_pg_pack->total_size - 1))
                dev_warn(hdev->dev,
@@ -1509,7 +1522,7 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
        vma->vm_ops = &hw_block_vm_ops;
        vma->vm_private_data = lnode;
 
-       hl_ctx_get(hdev, ctx);
+       hl_ctx_get(ctx);
 
        rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
        if (rc) {
@@ -1819,7 +1832,7 @@ static int export_dmabuf_common(struct hl_ctx *ctx,
        }
 
        hl_dmabuf->ctx = ctx;
-       hl_ctx_get(hdev, hl_dmabuf->ctx);
+       hl_ctx_get(hl_dmabuf->ctx);
 
        *dmabuf_fd = fd;
 
@@ -2076,164 +2089,34 @@ out:
        return rc;
 }
 
-static void ts_buff_release(struct kref *ref)
-{
-       struct hl_ts_buff *buff;
-
-       buff = container_of(ref, struct hl_ts_buff, refcount);
-
-       vfree(buff->kernel_buff_address);
-       vfree(buff->user_buff_address);
-       kfree(buff);
-}
-
-struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr,
-                                       u32 handle)
-{
-       struct hl_ts_buff *buff;
-
-       spin_lock(&mgr->ts_lock);
-       buff = idr_find(&mgr->ts_handles, handle);
-       if (!buff) {
-               spin_unlock(&mgr->ts_lock);
-               dev_warn(hdev->dev,
-                       "TS buff get failed, no match to handle 0x%x\n", handle);
-               return NULL;
-       }
-       kref_get(&buff->refcount);
-       spin_unlock(&mgr->ts_lock);
-
-       return buff;
-}
-
-void hl_ts_put(struct hl_ts_buff *buff)
+static void ts_buff_release(struct hl_mmap_mem_buf *buf)
 {
-       kref_put(&buff->refcount, ts_buff_release);
-}
-
-static void buff_vm_close(struct vm_area_struct *vma)
-{
-       struct hl_ts_buff *buff = (struct hl_ts_buff *) vma->vm_private_data;
-       long new_mmap_size;
-
-       new_mmap_size = buff->mmap_size - (vma->vm_end - vma->vm_start);
+       struct hl_ts_buff *ts_buff = buf->private;
 
-       if (new_mmap_size > 0) {
-               buff->mmap_size = new_mmap_size;
-               return;
-       }
-
-       atomic_set(&buff->mmap, 0);
-       hl_ts_put(buff);
-       vma->vm_private_data = NULL;
+       vfree(ts_buff->kernel_buff_address);
+       vfree(ts_buff->user_buff_address);
+       kfree(ts_buff);
 }
 
-static const struct vm_operations_struct ts_buff_vm_ops = {
-       .close = buff_vm_close
-};
-
-int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
+static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
 {
-       struct hl_device *hdev = hpriv->hdev;
-       struct hl_ts_buff *buff;
-       u32 handle, user_buff_size;
-       int rc;
-
-       /* We use the page offset to hold the idr and thus we need to clear
-        * it before doing the mmap itself
-        */
-       handle = vma->vm_pgoff;
-       vma->vm_pgoff = 0;
-
-       buff = hl_ts_get(hdev, &hpriv->ts_mem_mgr, handle);
-       if (!buff) {
-               dev_err(hdev->dev,
-                       "TS buff mmap failed, no match to handle 0x%x\n", handle);
-               return -EINVAL;
-       }
-
-       /* Validation check */
-       user_buff_size = vma->vm_end - vma->vm_start;
-       if (user_buff_size != ALIGN(buff->user_buff_size, PAGE_SIZE)) {
-               dev_err(hdev->dev,
-                       "TS buff mmap failed, mmap size 0x%x != 0x%x buff size\n",
-                       user_buff_size, ALIGN(buff->user_buff_size, PAGE_SIZE));
-               rc = -EINVAL;
-               goto put_buff;
-       }
-
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
-       if (!access_ok(VERIFY_WRITE,
-               (void __user *) (uintptr_t) vma->vm_start, user_buff_size)) {
-#else
-       if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
-                                               user_buff_size)) {
-#endif
-               dev_err(hdev->dev,
-                       "user pointer is invalid - 0x%lx\n",
-                       vma->vm_start);
-
-               rc = -EINVAL;
-               goto put_buff;
-       }
+       struct hl_ts_buff *ts_buff = buf->private;
 
-       if (atomic_cmpxchg(&buff->mmap, 0, 1)) {
-               dev_err(hdev->dev, "TS buff memory mmap failed, already mmaped to user\n");
-               rc = -EINVAL;
-               goto put_buff;
-       }
-
-       vma->vm_ops = &ts_buff_vm_ops;
-       vma->vm_private_data = buff;
        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
-       rc = remap_vmalloc_range(vma, buff->user_buff_address, 0);
-       if (rc) {
-               atomic_set(&buff->mmap, 0);
-               goto put_buff;
-       }
-
-       buff->mmap_size = buff->user_buff_size;
-       vma->vm_pgoff = handle;
-
-       return 0;
-
-put_buff:
-       hl_ts_put(buff);
-       return rc;
-}
-
-void hl_ts_mgr_init(struct hl_ts_mgr *mgr)
-{
-       spin_lock_init(&mgr->ts_lock);
-       idr_init(&mgr->ts_handles);
+       return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
 }
 
-void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr)
-{
-       struct hl_ts_buff *buff;
-       struct idr *idp;
-       u32 id;
-
-       idp = &mgr->ts_handles;
-
-       idr_for_each_entry(idp, buff, id) {
-               if (kref_put(&buff->refcount, ts_buff_release) != 1)
-                       dev_err(hdev->dev, "TS buff handle %d for CTX is still alive\n",
-                                                       id);
-       }
-
-       idr_destroy(&mgr->ts_handles);
-}
-
-static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_elements)
+static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
 {
        struct hl_ts_buff *ts_buff = NULL;
-       u32 size;
+       u32 size, num_elements;
        void *p;
 
+       num_elements = *(u32 *)args;
+
        ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
        if (!ts_buff)
-               return NULL;
+               return -ENOMEM;
 
        /* Allocate the user buffer */
        size = num_elements * sizeof(u64);
@@ -2242,7 +2125,7 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme
                goto free_mem;
 
        ts_buff->user_buff_address = p;
-       ts_buff->user_buff_size = size;
+       buf->mappable_size = size;
 
        /* Allocate the internal kernel buffer */
        size = num_elements * sizeof(struct hl_user_pending_interrupt);
@@ -2253,15 +2136,25 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme
        ts_buff->kernel_buff_address = p;
        ts_buff->kernel_buff_size = size;
 
-       return ts_buff;
+       buf->private = ts_buff;
+
+       return 0;
 
 free_user_buff:
        vfree(ts_buff->user_buff_address);
 free_mem:
        kfree(ts_buff);
-       return NULL;
+       return -ENOMEM;
 }
 
+static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
+       .topic = "TS",
+       .mem_id = HL_MMAP_TYPE_TS_BUFF,
+       .mmap = hl_ts_mmap,
+       .alloc = hl_ts_alloc_buf,
+       .release = ts_buff_release,
+};
+
 /**
  * allocate_timestamps_buffers() - allocate timestamps buffers
  * This function will allocate ts buffer that will later on be mapped to the user
@@ -2278,54 +2171,22 @@ free_mem:
  */
 static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
 {
-       struct hl_ts_mgr *ts_mgr = &hpriv->ts_mem_mgr;
-       struct hl_device *hdev = hpriv->hdev;
-       struct hl_ts_buff *ts_buff;
-       int rc = 0;
+       struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
+       struct hl_mmap_mem_buf *buf;
 
        if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
-               dev_err(hdev->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
+               dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
                                args->num_of_elements, TS_MAX_ELEMENTS_NUM);
                return -EINVAL;
        }
 
-       /* Allocate ts buffer object
-        * This object will contain two buffers one that will be mapped to the user
-        * and another internal buffer for the driver use only, which won't be mapped
-        * to the user.
-        */
-       ts_buff = hl_ts_alloc_buff(hdev, args->num_of_elements);
-       if (!ts_buff) {
-               rc = -ENOMEM;
-               goto out_err;
-       }
-
-       spin_lock(&ts_mgr->ts_lock);
-       rc = idr_alloc(&ts_mgr->ts_handles, ts_buff, 1, 0, GFP_ATOMIC);
-       spin_unlock(&ts_mgr->ts_lock);
-       if (rc < 0) {
-               dev_err(hdev->dev, "Failed to allocate IDR for a new ts buffer\n");
-               goto release_ts_buff;
-       }
-
-       ts_buff->id = rc;
-       ts_buff->hdev = hdev;
-
-       kref_init(&ts_buff->refcount);
-
-       /* idr is 32-bit so we can safely OR it with a mask that is above 32 bit */
-       *handle = (u64) ts_buff->id | HL_MMAP_TYPE_TS_BUFF;
-       *handle <<= PAGE_SHIFT;
+       buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
+       if (!buf)
+               return -ENOMEM;
 
-       dev_dbg(hdev->dev, "Created ts buff object handle(%u)\n", ts_buff->id);
+       *handle = buf->handle;
 
        return 0;
-
-release_ts_buff:
-       kref_put(&ts_buff->refcount, ts_buff_release);
-out_err:
-       *handle = 0;
-       return rc;
 }
 
 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
@@ -2587,9 +2448,7 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
        hl_debugfs_remove_userptr(hdev, userptr);
 
        if (userptr->dma_mapped)
-               hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
-                                                       userptr->sgt->nents,
-                                                       userptr->dir);
+               hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
 
        unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
        kvfree(userptr->pages);
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c
new file mode 100644 (file)
index 0000000..ea5f2bd
--- /dev/null
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+/**
+ * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
+ *                        the buffer descriptor.
+ *
+ * @mmg: parent unifed memory manager
+ * @handle: requested buffer handle
+ *
+ * Find the buffer in the store and return a pointer to its descriptor.
+ * Increase buffer refcount. If not found - return NULL.
+ */
+struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
+{
+       struct hl_mmap_mem_buf *buf;
+
+       spin_lock(&mmg->lock);
+       buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
+       if (!buf) {
+               spin_unlock(&mmg->lock);
+               dev_warn(mmg->dev,
+                        "Buff get failed, no match to handle %#llx\n", handle);
+               return NULL;
+       }
+       kref_get(&buf->refcount);
+       spin_unlock(&mmg->lock);
+       return buf;
+}
+
+/**
+ * hl_mmap_mem_buf_destroy - destroy the unused buffer
+ *
+ * @buf: memory manager buffer descriptor
+ *
+ * Internal function, used as a final step of buffer release. Shall be invoked
+ * only when the buffer is no longer in use (removed from idr). Will call the
+ * release callback (if applicable), and free the memory.
+ */
+static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
+{
+       if (buf->behavior->release)
+               buf->behavior->release(buf);
+
+       kfree(buf);
+}
+
+/**
+ * hl_mmap_mem_buf_release - release buffer
+ *
+ * @kref: kref that reached 0.
+ *
+ * Internal function, used as a kref release callback, when the last user of
+ * the buffer is released. Shall be called from an interrupt context.
+ */
+static void hl_mmap_mem_buf_release(struct kref *kref)
+{
+       struct hl_mmap_mem_buf *buf =
+               container_of(kref, struct hl_mmap_mem_buf, refcount);
+
+       spin_lock(&buf->mmg->lock);
+       idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+       spin_unlock(&buf->mmg->lock);
+
+       hl_mmap_mem_buf_destroy(buf);
+}
+
+/**
+ * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
+ *
+ * @kref: kref that reached 0.
+ *
+ * Internal function, used for kref put by handle. Assumes mmg lock is taken.
+ * Will remove the buffer from idr, without destroying it.
+ */
+static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
+{
+       struct hl_mmap_mem_buf *buf =
+               container_of(kref, struct hl_mmap_mem_buf, refcount);
+
+       idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+}
+
+/**
+ * hl_mmap_mem_buf_put - decrease the reference to the buffer
+ *
+ * @buf: memory manager buffer descriptor
+ *
+ * Decrease the reference to the buffer, and release it if it was the last one.
+ * Shall be called from an interrupt context.
+ */
+int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
+{
+       return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
+}
+
+/**
+ * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
+ *                              given handle.
+ *
+ * @mmg: parent unifed memory manager
+ * @handle: requested buffer handle
+ *
+ * Decrease the reference to the buffer, and release it if it was the last one.
+ * Shall not be called from an interrupt context. Return -EINVAL if handle was
+ * not found, else return the put outcome (0 or 1).
+ */
+int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
+{
+       struct hl_mmap_mem_buf *buf;
+
+       spin_lock(&mmg->lock);
+       buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
+       if (!buf) {
+               spin_unlock(&mmg->lock);
+               dev_dbg(mmg->dev,
+                        "Buff put failed, no match to handle %#llx\n", handle);
+               return -EINVAL;
+       }
+
+       if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
+               spin_unlock(&mmg->lock);
+               hl_mmap_mem_buf_destroy(buf);
+               return 1;
+       }
+
+       spin_unlock(&mmg->lock);
+       return 0;
+}
+
+/**
+ * @hl_mmap_mem_buf_alloc - allocate a new mappable buffer
+ *
+ * @mmg: parent unifed memory manager
+ * @behavior: behavior object describing this buffer polymorphic behavior
+ * @gfp: gfp flags to use for the memory allocations
+ * @args: additional args passed to behavior->alloc
+ *
+ * Allocate and register a new memory buffer inside the give memory manager.
+ * Return the pointer to the new buffer on success or NULL on failure.
+ */
+struct hl_mmap_mem_buf *
+hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
+                     struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
+                     void *args)
+{
+       struct hl_mmap_mem_buf *buf;
+       int rc;
+
+       buf = kzalloc(sizeof(*buf), gfp);
+       if (!buf)
+               return NULL;
+
+       spin_lock(&mmg->lock);
+       rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
+       spin_unlock(&mmg->lock);
+       if (rc < 0) {
+               dev_err(mmg->dev,
+                       "%s: Failed to allocate IDR for a new buffer, rc=%d\n",
+                       behavior->topic, rc);
+               goto free_buf;
+       }
+
+       buf->mmg = mmg;
+       buf->behavior = behavior;
+       buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
+       kref_init(&buf->refcount);
+
+       rc = buf->behavior->alloc(buf, gfp, args);
+       if (rc) {
+               dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
+                       behavior->topic, rc);
+               goto remove_idr;
+       }
+
+       return buf;
+
+remove_idr:
+       spin_lock(&mmg->lock);
+       idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
+       spin_unlock(&mmg->lock);
+free_buf:
+       kfree(buf);
+       return NULL;
+}
+
+/**
+ * hl_mmap_mem_buf_vm_close - handle mmap close
+ *
+ * @vma: the vma object for which mmap was closed.
+ *
+ * Put the memory buffer if it is no longer mapped.
+ */
+static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
+{
+       struct hl_mmap_mem_buf *buf =
+               (struct hl_mmap_mem_buf *)vma->vm_private_data;
+       long new_mmap_size;
+
+       new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
+
+       if (new_mmap_size > 0) {
+               buf->real_mapped_size = new_mmap_size;
+               return;
+       }
+
+       atomic_set(&buf->mmap, 0);
+       hl_mmap_mem_buf_put(buf);
+       vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
+       .close = hl_mmap_mem_buf_vm_close
+};
+
+/**
+ * hl_mem_mgr_mmap - map the given buffer to the user
+ *
+ * @mmg: unifed memory manager
+ * @vma: the vma object for which mmap was closed.
+ * @args: additional args passed to behavior->mmap
+ *
+ * Map the buffer specified by the vma->vm_pgoff to the given vma.
+ */
+int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
+                   void *args)
+{
+       struct hl_mmap_mem_buf *buf;
+       u64 user_mem_size;
+       u64 handle;
+       int rc;
+
+       /* We use the page offset to hold the idr and thus we need to clear
+        * it before doing the mmap itself
+        */
+       handle = vma->vm_pgoff << PAGE_SHIFT;
+       vma->vm_pgoff = 0;
+
+       /* Reference was taken here */
+       buf = hl_mmap_mem_buf_get(mmg, handle);
+       if (!buf) {
+               dev_err(mmg->dev,
+                       "Memory mmap failed, no match to handle %#llx\n", handle);
+               return -EINVAL;
+       }
+
+       /* Validation check */
+       user_mem_size = vma->vm_end - vma->vm_start;
+       if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
+               dev_err(mmg->dev,
+                       "%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
+                       buf->behavior->topic, user_mem_size, buf->mappable_size);
+               rc = -EINVAL;
+               goto put_mem;
+       }
+
+#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
+       if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
+                      user_mem_size)) {
+#else
+       if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
+                      user_mem_size)) {
+#endif
+               dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
+                       buf->behavior->topic, vma->vm_start);
+
+               rc = -EINVAL;
+               goto put_mem;
+       }
+
+       if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
+               dev_err(mmg->dev,
+                       "%s, Memory mmap failed, already mmaped to user\n",
+                       buf->behavior->topic);
+               rc = -EINVAL;
+               goto put_mem;
+       }
+
+       vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
+
+       /* Note: We're transferring the memory reference to vma->vm_private_data here. */
+
+       vma->vm_private_data = buf;
+
+       rc = buf->behavior->mmap(buf, vma, args);
+       if (rc) {
+               atomic_set(&buf->mmap, 0);
+               goto put_mem;
+       }
+
+       buf->real_mapped_size = buf->mappable_size;
+       vma->vm_pgoff = handle >> PAGE_SHIFT;
+
+       return 0;
+
+put_mem:
+       hl_mmap_mem_buf_put(buf);
+       return rc;
+}
+
+/**
+ * hl_mem_mgr_init - initialize unified memory manager
+ *
+ * @dev: owner device pointer
+ * @mmg: structure to initialize
+ *
+ * Initialize an instance of unified memory manager
+ */
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
+{
+       mmg->dev = dev;
+       spin_lock_init(&mmg->lock);
+       idr_init(&mmg->handles);
+}
+
+/**
+ * hl_mem_mgr_fini - release unified memory manager
+ *
+ * @mmg: parent unifed memory manager
+ *
+ * Release the unified memory manager. Shall be called from an interrupt context.
+ */
+void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
+{
+       struct hl_mmap_mem_buf *buf;
+       struct idr *idp;
+       const char *topic;
+       u32 id;
+
+       idp = &mmg->handles;
+
+       idr_for_each_entry(idp, buf, id) {
+               topic = buf->behavior->topic;
+               if (hl_mmap_mem_buf_put(buf) != 1)
+                       dev_err(mmg->dev,
+                               "%s: Buff handle %u for CTX is still alive\n",
+                               topic, id);
+       }
+
+       /* TODO: can it happen that some buffer is still in use at this point? */
+
+       idr_destroy(&mmg->handles);
+}
index 810b734..f373471 100644 (file)
@@ -9,6 +9,20 @@
 
 #include "../habanalabs.h"
 
+/**
+ * hl_mmu_get_funcs() - get MMU functions structure
+ * @hdev: habanalabs device structure.
+ * @pgt_residency: page table residency.
+ * @is_dram_addr: true if we need HMMU functions
+ *
+ * @return appropriate MMU functions structure
+ */
+static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency,
+                                                                       bool is_dram_addr)
+{
+       return &hdev->mmu_func[pgt_residency];
+}
+
 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -122,6 +136,53 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
 }
 
 /*
+ * hl_mmu_get_real_page_size - get real page size to use in map/unmap operation
+ *
+ * @hdev: pointer to device data.
+ * @mmu_prop: MMU properties.
+ * @page_size: page size
+ * @real_page_size: set here the actual page size to use for the operation
+ * @is_dram_addr: true if DRAM address, otherwise false.
+ *
+ * @return 0 on success, otherwise non 0 error code
+ *
+ * note that this is general implementation that can fit most MMU arch. but as this is used as an
+ * MMU function:
+ * 1. it shall not be called directly- only from mmu_func structure instance
+ * 2. each MMU may modify the implementation internally
+ */
+int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
+                               u32 page_size, u32 *real_page_size, bool is_dram_addr)
+{
+       /*
+        * The H/W handles mapping of specific page sizes. Hence if the page
+        * size is bigger, we break it to sub-pages and map them separately.
+        */
+       if ((page_size % mmu_prop->page_size) == 0) {
+               *real_page_size = mmu_prop->page_size;
+               return 0;
+       }
+
+       dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
+                                               page_size, mmu_prop->page_size >> 10);
+
+       return -EFAULT;
+}
+
+static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size,
+                                                       bool is_dram_addr)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+       if (is_dram_addr)
+               return &prop->dmmu;
+       else if ((page_size % prop->pmmu_huge.page_size) == 0)
+               return &prop->pmmu_huge;
+
+       return &prop->pmmu;
+}
+
+/*
  * hl_mmu_unmap_page - unmaps a virtual addr
  *
  * @ctx: pointer to the context structure
@@ -142,60 +203,35 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
  * For optimization reasons PCI flush may be requested once after unmapping of
  * large area.
  */
-int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
-               bool flush_pte)
+int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte)
 {
        struct hl_device *hdev = ctx->hdev;
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct hl_mmu_properties *mmu_prop;
-       u64 real_virt_addr;
+       struct hl_mmu_funcs *mmu_funcs;
+       int i, pgt_residency, rc = 0;
        u32 real_page_size, npages;
-       int i, rc = 0, pgt_residency;
+       u64 real_virt_addr;
        bool is_dram_addr;
 
        if (!hdev->mmu_enable)
                return 0;
 
        is_dram_addr = hl_is_dram_va(hdev, virt_addr);
-
-       if (is_dram_addr)
-               mmu_prop = &prop->dmmu;
-       else if ((page_size % prop->pmmu_huge.page_size) == 0)
-               mmu_prop = &prop->pmmu_huge;
-       else
-               mmu_prop = &prop->pmmu;
+       mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
 
        pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
-       /*
-        * The H/W handles mapping of specific page sizes. Hence if the page
-        * size is bigger, we break it to sub-pages and unmap them separately.
-        */
-       if ((page_size % mmu_prop->page_size) == 0) {
-               real_page_size = mmu_prop->page_size;
-       } else {
-               /*
-                * MMU page size may differ from DRAM page size.
-                * In such case work with the DRAM page size and let the MMU
-                * scrambling routine to handle this mismatch when
-                * calculating the address to remove from the MMU page table
-                */
-               if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) {
-                       real_page_size = prop->dram_page_size;
-               } else {
-                       dev_err(hdev->dev,
-                               "page size of %u is not %uKB aligned, can't unmap\n",
-                               page_size, mmu_prop->page_size >> 10);
+       mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
 
-                       return -EFAULT;
-               }
-       }
+       rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
+                                                       is_dram_addr);
+       if (rc)
+               return rc;
 
        npages = page_size / real_page_size;
        real_virt_addr = virt_addr;
 
        for (i = 0 ; i < npages ; i++) {
-               rc = hdev->mmu_func[pgt_residency].unmap(ctx,
-                                               real_virt_addr, is_dram_addr);
+               rc = mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr);
                if (rc)
                        break;
 
@@ -203,7 +239,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
        }
 
        if (flush_pte)
-               hdev->mmu_func[pgt_residency].flush(ctx);
+               mmu_funcs->flush(ctx);
 
        return rc;
 }
@@ -230,15 +266,15 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
  * For optimization reasons PCI flush may be requested once after mapping of
  * large area.
  */
-int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
-               u32 page_size, bool flush_pte)
+int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
+                       bool flush_pte)
 {
+       int i, rc, pgt_residency, mapped_cnt = 0;
        struct hl_device *hdev = ctx->hdev;
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct hl_mmu_properties *mmu_prop;
        u64 real_virt_addr, real_phys_addr;
+       struct hl_mmu_funcs *mmu_funcs;
        u32 real_page_size, npages;
-       int i, rc, pgt_residency, mapped_cnt = 0;
        bool is_dram_addr;
 
 
@@ -246,40 +282,15 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                return 0;
 
        is_dram_addr = hl_is_dram_va(hdev, virt_addr);
-
-       if (is_dram_addr)
-               mmu_prop = &prop->dmmu;
-       else if ((page_size % prop->pmmu_huge.page_size) == 0)
-               mmu_prop = &prop->pmmu_huge;
-       else
-               mmu_prop = &prop->pmmu;
+       mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
 
        pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+       mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
 
-       /*
-        * The H/W handles mapping of specific page sizes. Hence if the page
-        * size is bigger, we break it to sub-pages and map them separately.
-        */
-       if ((page_size % mmu_prop->page_size) == 0) {
-               real_page_size = mmu_prop->page_size;
-       } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) &&
-                       (prop->dram_page_size < mmu_prop->page_size)) {
-               /*
-                * MMU page size may differ from DRAM page size.
-                * In such case work with the DRAM page size and let the MMU
-                * scrambling routine handle this mismatch when calculating
-                * the address to place in the MMU page table. (in that case
-                * also make sure that the dram_page_size smaller than the
-                * mmu page size)
-                */
-               real_page_size = prop->dram_page_size;
-       } else {
-               dev_err(hdev->dev,
-                       "page size of %u is not %uKB aligned, can't map\n",
-                       page_size, mmu_prop->page_size >> 10);
-
-               return -EFAULT;
-       }
+       rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
+                                                       is_dram_addr);
+       if (rc)
+               return rc;
 
        /*
         * Verify that the phys and virt addresses are aligned with the
@@ -302,9 +313,8 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
        real_phys_addr = phys_addr;
 
        for (i = 0 ; i < npages ; i++) {
-               rc = hdev->mmu_func[pgt_residency].map(ctx,
-                                               real_virt_addr, real_phys_addr,
-                                               real_page_size, is_dram_addr);
+               rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size,
+                                                                               is_dram_addr);
                if (rc)
                        goto err;
 
@@ -314,22 +324,21 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
        }
 
        if (flush_pte)
-               hdev->mmu_func[pgt_residency].flush(ctx);
+               mmu_funcs->flush(ctx);
 
        return 0;
 
 err:
        real_virt_addr = virt_addr;
        for (i = 0 ; i < mapped_cnt ; i++) {
-               if (hdev->mmu_func[pgt_residency].unmap(ctx,
-                                               real_virt_addr, is_dram_addr))
+               if (mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr))
                        dev_warn_ratelimited(hdev->dev,
                                "failed to unmap va: 0x%llx\n", real_virt_addr);
 
                real_virt_addr += real_page_size;
        }
 
-       hdev->mmu_func[pgt_residency].flush(ctx);
+       mmu_funcs->flush(ctx);
 
        return rc;
 }
@@ -480,11 +489,9 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
                                                struct hl_mmu_hop_info *hops,
                                                u64 *phys_addr)
 {
-       struct hl_device *hdev = ctx->hdev;
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
        u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
-       u32 hop0_shift_off;
-       void *p;
+       struct hl_mmu_properties *mmu_prop;
 
        /* last hop holds the phys address and flags */
        if (hops->unscrambled_paddr)
@@ -493,11 +500,11 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
                tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
 
        if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
-               p = &prop->pmmu_huge;
+               mmu_prop = &prop->pmmu_huge;
        else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
-               p = &prop->pmmu;
+               mmu_prop = &prop->pmmu;
        else /* HL_VA_RANGE_TYPE_DRAM */
-               p = &prop->dmmu;
+               mmu_prop = &prop->dmmu;
 
        if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
                        !is_power_of_2(prop->dram_page_size)) {
@@ -508,7 +515,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
                /*
                 * Bit arithmetics cannot be used for non power of two page
                 * sizes. In addition, since bit arithmetics is not used,
-                * we cannot ignore dram base. All that shall be considerd.
+                * we cannot ignore dram base. All that shall be considered.
                 */
 
                dram_page_size = prop->dram_page_size;
@@ -526,10 +533,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
                 * structure in order to determine the right masks
                 * for the page offset.
                 */
-               hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift);
-               p = (char *)p + hop0_shift_off;
-               p = (char *)p + ((hops->used_hops - 1) * sizeof(u64));
-               hop_shift = *(u64 *)p;
+               hop_shift = mmu_prop->hop_shifts[hops->used_hops - 1];
                offset_mask = (1ull << hop_shift) - 1;
                addr_mask = ~(offset_mask);
                *phys_addr = (tmp_phys_addr & addr_mask) |
@@ -557,40 +561,39 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
                        struct hl_mmu_hop_info *hops)
 {
        struct hl_device *hdev = ctx->hdev;
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       struct asic_fixed_properties *prop;
        struct hl_mmu_properties *mmu_prop;
-       int rc;
+       struct hl_mmu_funcs *mmu_funcs;
+       int pgt_residency, rc;
        bool is_dram_addr;
 
        if (!hdev->mmu_enable)
                return -EOPNOTSUPP;
 
+       prop = &hdev->asic_prop;
        hops->scrambled_vaddr = virt_addr;      /* assume no scrambling */
 
        is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
-                                               prop->dmmu.start_addr,
-                                               prop->dmmu.end_addr);
+                                                               prop->dmmu.start_addr,
+                                                               prop->dmmu.end_addr);
 
-       /* host-residency is the same in PMMU and HPMMU, use one of them */
+       /* host-residency is the same in PMMU and PMMU huge, no need to distinguish here */
        mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+       pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+       mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
 
        mutex_lock(&ctx->mmu_lock);
-
-       if (mmu_prop->host_resident)
-               rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx,
-                                                       virt_addr, hops);
-       else
-               rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx,
-                                                       virt_addr, hops);
-
+       rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops);
        mutex_unlock(&ctx->mmu_lock);
 
+       if (rc)
+               return rc;
+
        /* add page offset to physical address */
        if (hops->unscrambled_paddr)
-               hl_mmu_pa_page_with_offset(ctx, virt_addr, hops,
-                                       &hops->unscrambled_paddr);
+               hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, &hops->unscrambled_paddr);
 
-       return rc;
+       return 0;
 }
 
 int hl_mmu_if_set_funcs(struct hl_device *hdev)
@@ -662,6 +665,55 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
        return rc;
 }
 
+static void hl_mmu_prefetch_work_function(struct work_struct *work)
+{
+       struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work);
+       struct hl_ctx *ctx = pfw->ctx;
+
+       if (!hl_device_operational(ctx->hdev, NULL))
+               goto put_ctx;
+
+       mutex_lock(&ctx->mmu_lock);
+
+       ctx->hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid,
+                                                               pfw->va, pfw->size);
+
+       mutex_unlock(&ctx->mmu_lock);
+
+put_ctx:
+       /*
+        * context was taken in the common mmu prefetch function- see comment there about
+        * context handling.
+        */
+       hl_ctx_put(ctx);
+       kfree(pfw);
+}
+
+int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
+{
+       struct hl_prefetch_work *handle_pf_work;
+
+       handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL);
+       if (!handle_pf_work)
+               return -ENOMEM;
+
+       INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function);
+       handle_pf_work->ctx = ctx;
+       handle_pf_work->va = va;
+       handle_pf_work->size = size;
+       handle_pf_work->flags = flags;
+       handle_pf_work->asid = asid;
+
+       /*
+        * as actual prefetch is done in a WQ we must get the context (and put it
+        * at the end of the work function)
+        */
+       hl_ctx_get(ctx);
+       queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work);
+
+       return 0;
+}
+
 u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
 {
        return (curr_pte & PAGE_PRESENT_MASK) ? (curr_pte & HOP_PHYS_ADDR_MASK) : ULLONG_MAX;
@@ -670,6 +722,7 @@ u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
 /**
  * hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP
  * @ctx: pointer to the context structure to initialize.
+ * @mmu_prop: MMU properties.
  * @hop_idx: HOP index.
  * @hop_addr: HOP address.
  * @virt_addr: virtual address fro the translation.
@@ -686,33 +739,8 @@ u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *m
                return U64_MAX;
        }
 
-       /* currently max number of HOPs is 6 */
-       switch (hop_idx) {
-       case 0:
-               mask = mmu_prop->hop0_mask;
-               shift = mmu_prop->hop0_shift;
-               break;
-       case 1:
-               mask = mmu_prop->hop1_mask;
-               shift = mmu_prop->hop1_shift;
-               break;
-       case 2:
-               mask = mmu_prop->hop2_mask;
-               shift = mmu_prop->hop2_shift;
-               break;
-       case 3:
-               mask = mmu_prop->hop3_mask;
-               shift = mmu_prop->hop3_shift;
-               break;
-       case 4:
-               mask = mmu_prop->hop4_mask;
-               shift = mmu_prop->hop4_shift;
-               break;
-       default:
-               mask = mmu_prop->hop5_mask;
-               shift = mmu_prop->hop5_shift;
-               break;
-       }
+       shift = mmu_prop->hop_shifts[hop_idx];
+       mask = mmu_prop->hop_masks[hop_idx];
 
        return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
 }
index d03786d..e2d91a6 100644 (file)
@@ -10,6 +10,8 @@
 
 #include <linux/slab.h>
 
+#define MMU_V1_MAX_HOPS        (MMU_HOP4 + 1)
+
 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
 
 static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
@@ -170,51 +172,15 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
        return num_of_ptes_left;
 }
 
-static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
-                                       u64 virt_addr, u64 mask, u64 shift)
-{
-       return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
-                       ((virt_addr & mask) >> shift);
-}
-
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
-                                       struct hl_mmu_properties *mmu_prop,
-                                       u64 hop_addr, u64 vaddr)
-{
-       return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
-                                       mmu_prop->hop0_shift);
-}
-
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
-                                       struct hl_mmu_properties *mmu_prop,
-                                       u64 hop_addr, u64 vaddr)
-{
-       return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
-                                       mmu_prop->hop1_shift);
-}
-
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
-                                       struct hl_mmu_properties *mmu_prop,
-                                       u64 hop_addr, u64 vaddr)
+static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
+                                       u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
 {
-       return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
-                                       mmu_prop->hop2_shift);
-}
+       u64 mask, shift;
 
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
-                                       struct hl_mmu_properties *mmu_prop,
-                                       u64 hop_addr, u64 vaddr)
-{
-       return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
-                                       mmu_prop->hop3_shift);
-}
-
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
-                                       struct hl_mmu_properties *mmu_prop,
-                                       u64 hop_addr, u64 vaddr)
-{
-       return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
-                                       mmu_prop->hop4_shift);
+       mask = mmu_prop->hop_masks[hop_idx];
+       shift = mmu_prop->hop_shifts[hop_idx];
+       return hop_addr_arr[hop_idx] +
+                       ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
 }
 
 static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
@@ -516,74 +482,50 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
        }
 }
 
-static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
+static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
                                u64 virt_addr, bool is_dram_addr)
 {
+       u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
        struct hl_device *hdev = ctx->hdev;
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct hl_mmu_properties *mmu_prop;
-       u64 hop0_addr = 0, hop0_pte_addr = 0,
-               hop1_addr = 0, hop1_pte_addr = 0,
-               hop2_addr = 0, hop2_pte_addr = 0,
-               hop3_addr = 0, hop3_pte_addr = 0,
-               hop4_addr = 0, hop4_pte_addr = 0,
-               curr_pte;
        bool is_huge, clear_hop3 = true;
+       int hop_idx;
 
        /* shifts and masks are the same in PMMU and HPMMU, use one of them */
        mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
 
-       hop0_addr = get_hop0_addr(ctx);
-       hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
-
-       curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
-       hop1_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
-       if (hop1_addr == ULLONG_MAX)
-               goto not_mapped;
-
-       hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
-
-       curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
-       hop2_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
-       if (hop2_addr == ULLONG_MAX)
-               goto not_mapped;
-
-       hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
-
-       curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
-
-       hop3_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
-       if (hop3_addr == ULLONG_MAX)
-               goto not_mapped;
+       for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
+               if (hop_idx == MMU_HOP0) {
+                       hop_addr[hop_idx] = get_hop0_addr(ctx);
+               } else {
+                       hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+                       if (hop_addr[hop_idx] == ULLONG_MAX)
+                               goto not_mapped;
+               }
 
-       hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
+               hop_pte_addr[hop_idx] =
+                               get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
 
-       curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
+               curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
+       }
 
        is_huge = curr_pte & mmu_prop->last_mask;
 
        if (is_dram_addr && !is_huge) {
-               dev_err(hdev->dev,
-                               "DRAM unmapping should use huge pages only\n");
+               dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
                return -EFAULT;
        }
 
        if (!is_huge) {
-               hop4_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
-
-               if (hop4_addr == ULLONG_MAX)
+               hop_idx = MMU_HOP4;
+               hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+               if (hop_addr[hop_idx] == ULLONG_MAX)
                        goto not_mapped;
 
-               hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
-                                                       virt_addr);
-
-               curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
-
+               hop_pte_addr[hop_idx] =
+                               get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
+               curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
                clear_hop3 = false;
        }
 
@@ -605,39 +547,33 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
                        goto not_mapped;
                }
 
-               write_final_pte(ctx, hop3_pte_addr, default_pte);
-               put_pte(ctx, hop3_addr);
+               hop_idx = MMU_HOP3;
+               write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
+               put_pte(ctx, hop_addr[hop_idx]);
        } else {
                if (!(curr_pte & PAGE_PRESENT_MASK))
                        goto not_mapped;
 
-               if (hop4_addr)
-                       clear_pte(ctx, hop4_pte_addr);
+               if (hop_addr[MMU_HOP4])
+                       clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
                else
-                       clear_pte(ctx, hop3_pte_addr);
+                       clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
 
-               if (hop4_addr && !put_pte(ctx, hop4_addr))
+               if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
                        clear_hop3 = true;
 
                if (!clear_hop3)
                        goto mapped;
 
-               clear_pte(ctx, hop3_pte_addr);
+               for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
+                       clear_pte(ctx, hop_pte_addr[hop_idx]);
 
-               if (put_pte(ctx, hop3_addr))
-                       goto mapped;
+                       if (hop_idx == MMU_HOP0)
+                               break;
 
-               clear_pte(ctx, hop2_pte_addr);
-
-               if (put_pte(ctx, hop2_addr))
-                       goto mapped;
-
-               clear_pte(ctx, hop1_pte_addr);
-
-               if (put_pte(ctx, hop1_addr))
-                       goto mapped;
-
-               clear_pte(ctx, hop0_pte_addr);
+                       if (put_pte(ctx, hop_addr[hop_idx]))
+                               goto mapped;
+               }
        }
 
 mapped:
@@ -650,21 +586,15 @@ not_mapped:
        return -EINVAL;
 }
 
-static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                        u32 page_size, bool is_dram_addr)
 {
+       u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
        struct hl_device *hdev = ctx->hdev;
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct hl_mmu_properties *mmu_prop;
-       u64 hop0_addr = 0, hop0_pte_addr = 0,
-               hop1_addr = 0, hop1_pte_addr = 0,
-               hop2_addr = 0, hop2_pte_addr = 0,
-               hop3_addr = 0, hop3_pte_addr = 0,
-               hop4_addr = 0, hop4_pte_addr = 0,
-               curr_pte = 0;
-       bool hop1_new = false, hop2_new = false, hop3_new = false,
-               hop4_new = false, is_huge;
-       int rc = -ENOMEM;
+       bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false};
+       int num_hops, hop_idx, prev_hop, rc = -ENOMEM;
 
        /*
         * This mapping function can map a page or a huge page. For huge page
@@ -684,39 +614,21 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                is_huge = false;
        }
 
-       hop0_addr = get_hop0_addr(ctx);
-       hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
-       curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
-       hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
-       if (hop1_addr == ULLONG_MAX)
-               goto err;
-
-       hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
-       curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
-       hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
-       if (hop2_addr == ULLONG_MAX)
-               goto err;
-
-       hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
-       curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
+       num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS;
 
-       hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
-       if (hop3_addr == ULLONG_MAX)
-               goto err;
-
-       hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
-       curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
-
-       if (!is_huge) {
-               hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
-               if (hop4_addr == ULLONG_MAX)
-                       goto err;
+       for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
+               if (hop_idx == MMU_HOP0) {
+                       hop_addr[hop_idx] = get_hop0_addr(ctx);
+               } else {
+                       hop_addr[hop_idx] =
+                                       get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
+                       if (hop_addr[hop_idx] == ULLONG_MAX)
+                               goto err;
+               }
 
-               hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
-                                                       virt_addr);
-               curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
+               hop_pte_addr[hop_idx] =
+                               get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
+               curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
        }
 
        if (hdev->dram_default_page_mapping && is_dram_addr) {
@@ -732,30 +644,22 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
                        goto err;
                }
 
-               if (hop1_new || hop2_new || hop3_new || hop4_new) {
-                       dev_err(hdev->dev,
-                               "DRAM mapping should not allocate more hops\n");
-                       rc = -EFAULT;
-                       goto err;
+               for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
+                       if (hop_new[hop_idx]) {
+                               dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n");
+                               rc = -EFAULT;
+                               goto err;
+                       }
                }
        } else if (curr_pte & PAGE_PRESENT_MASK) {
                dev_err(hdev->dev,
                        "mapping already exists for virt_addr 0x%llx\n",
                                virt_addr);
 
-               dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
-                       *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
-               dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
-                       *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
-               dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
-                       *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
-               dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
-                       *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
-
-               if (!is_huge)
-                       dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
-                               *(u64 *) (uintptr_t) hop4_pte_addr,
-                               hop4_pte_addr);
+               for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++)
+                       dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx,
+                                       *(u64 *) (uintptr_t) hop_pte_addr[hop_idx],
+                                       hop_pte_addr[hop_idx]);
 
                rc = -EINVAL;
                goto err;
@@ -764,53 +668,28 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
        curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
                        | PAGE_PRESENT_MASK;
 
-       if (is_huge)
-               write_final_pte(ctx, hop3_pte_addr, curr_pte);
-       else
-               write_final_pte(ctx, hop4_pte_addr, curr_pte);
+       write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
 
-       if (hop1_new) {
-               curr_pte =
-                       (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
-               write_pte(ctx, hop0_pte_addr, curr_pte);
-       }
-       if (hop2_new) {
-               curr_pte =
-                       (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
-               write_pte(ctx, hop1_pte_addr, curr_pte);
-               get_pte(ctx, hop1_addr);
-       }
-       if (hop3_new) {
-               curr_pte =
-                       (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
-               write_pte(ctx, hop2_pte_addr, curr_pte);
-               get_pte(ctx, hop2_addr);
-       }
+       for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
+               prev_hop = hop_idx - 1;
 
-       if (!is_huge) {
-               if (hop4_new) {
-                       curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
-                                       PAGE_PRESENT_MASK;
-                       write_pte(ctx, hop3_pte_addr, curr_pte);
-                       get_pte(ctx, hop3_addr);
+               if (hop_new[hop_idx]) {
+                       curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+                       write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
+                       if (hop_idx != MMU_HOP1)
+                               get_pte(ctx, hop_addr[prev_hop]);
                }
-
-               get_pte(ctx, hop4_addr);
-       } else {
-               get_pte(ctx, hop3_addr);
        }
 
+       get_pte(ctx, hop_addr[num_hops - 1]);
+
        return 0;
 
 err:
-       if (hop4_new)
-               free_hop(ctx, hop4_addr);
-       if (hop3_new)
-               free_hop(ctx, hop3_addr);
-       if (hop2_new)
-               free_hop(ctx, hop2_addr);
-       if (hop1_new)
-               free_hop(ctx, hop1_addr);
+       for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
+               if (hop_new[hop_idx])
+                       free_hop(ctx, hop_addr[hop_idx]);
+       }
 
        return rc;
 }
@@ -928,8 +807,8 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
        mmu->fini = hl_mmu_v1_fini;
        mmu->ctx_init = hl_mmu_v1_ctx_init;
        mmu->ctx_fini = hl_mmu_v1_ctx_fini;
-       mmu->map = _hl_mmu_v1_map;
-       mmu->unmap = _hl_mmu_v1_unmap;
+       mmu->map = hl_mmu_v1_map;
+       mmu->unmap = hl_mmu_v1_unmap;
        mmu->flush = flush;
        mmu->swap_out = hl_mmu_v1_swap_out;
        mmu->swap_in = hl_mmu_v1_swap_in;
index bb9ce22..610acd4 100644 (file)
@@ -392,6 +392,7 @@ enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr)
  */
 int hl_pci_init(struct hl_device *hdev)
 {
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct pci_dev *pdev = hdev->pdev;
        int rc;
 
@@ -419,17 +420,14 @@ int hl_pci_init(struct hl_device *hdev)
        }
 
        /* Driver must sleep in order for FW to finish the iATU configuration */
-       if (hdev->asic_prop.iatu_done_by_fw) {
+       if (hdev->asic_prop.iatu_done_by_fw)
                usleep_range(2000, 3000);
-               hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-       }
 
-       rc = dma_set_mask_and_coherent(&pdev->dev,
-                                       DMA_BIT_MASK(hdev->dma_mask));
+       rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask));
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to set dma mask to %d bits, error %d\n",
-                       hdev->dma_mask, rc);
+                       prop->dma_mask, rc);
                goto unmap_pci_bars;
        }
 
index 21c2b67..fba3222 100644 (file)
@@ -95,7 +95,7 @@
 
 #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE  3
 
-#define GAUDI_ARB_WDT_TIMEOUT          0x1000000
+#define GAUDI_ARB_WDT_TIMEOUT          0xEE6b27FF /* 8 seconds */
 
 #define GAUDI_CLK_GATE_DEBUGFS_MASK    (\
                BIT(GAUDI_ENGINE_ID_MME_0) |\
@@ -557,6 +557,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
        }
 
        prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
+       prop->host_base_address = HOST_PHYS_BASE;
+       prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
        prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
        prop->collective_first_sob = 0;
        prop->collective_first_mon = 0;
@@ -595,18 +597,19 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
        prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
        prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
        prop->dram_page_size = PAGE_SIZE_2MB;
+       prop->device_mem_alloc_default_page_size = prop->dram_page_size;
        prop->dram_supports_virtual_memory = false;
 
-       prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT;
-       prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT;
-       prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT;
-       prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT;
-       prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT;
-       prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK;
-       prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK;
-       prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK;
-       prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK;
-       prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK;
+       prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT;
+       prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT;
+       prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT;
+       prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT;
+       prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT;
+       prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK;
+       prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK;
+       prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK;
+       prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK;
+       prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK;
        prop->pmmu.start_addr = VA_HOST_SPACE_START;
        prop->pmmu.end_addr =
                        (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
@@ -673,6 +676,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
 
        prop->set_max_power_on_device_init = true;
 
+       prop->dma_mask = 48;
+
        return 0;
 }
 
@@ -754,8 +759,6 @@ static int gaudi_init_iatu(struct hl_device *hdev)
        if (rc)
                goto done;
 
-       hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
        /* Outbound Region 0 - Point to Host */
        outbound_region.addr = HOST_PHYS_BASE;
        outbound_region.size = HOST_PHYS_SIZE;
@@ -1008,7 +1011,7 @@ free_job:
 
 release_cb:
        hl_cb_put(cb);
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
 
        return rc;
 }
@@ -1470,7 +1473,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
                job->patched_cb = NULL;
 
        job->job_cb_size = job->user_cb_size;
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
 
        /* increment refcount as for external queues we get completion */
        if (hw_queue_prop->type == QUEUE_TYPE_EXT)
@@ -2808,9 +2811,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
                WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
                                QM_ARB_ERR_MSG_EN_MASK);
 
-               /* Increase ARB WDT to support streams architecture */
-               WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
-                               GAUDI_ARB_WDT_TIMEOUT);
+               /* Set timeout to maximum */
+               WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
 
                WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
                                QMAN_EXTERNAL_MAKE_TRUSTED);
@@ -2987,9 +2989,8 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
                WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
                                QM_ARB_ERR_MSG_EN_MASK);
 
-               /* Increase ARB WDT to support streams architecture */
-               WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
-                               GAUDI_ARB_WDT_TIMEOUT);
+               /* Set timeout to maximum */
+               WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
 
                WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
                WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
@@ -3124,9 +3125,8 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
                WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
                                QM_ARB_ERR_MSG_EN_MASK);
 
-               /* Increase ARB WDT to support streams architecture */
-               WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset,
-                               GAUDI_ARB_WDT_TIMEOUT);
+               /* Set timeout to maximum */
+               WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset, GAUDI_ARB_WDT_TIMEOUT);
 
                WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
                WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
@@ -3258,9 +3258,8 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
                WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
                                QM_ARB_ERR_MSG_EN_MASK);
 
-               /* Increase ARB WDT to support streams architecture */
-               WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset,
-                               GAUDI_ARB_WDT_TIMEOUT);
+               /* Set timeout to maximum */
+               WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset, GAUDI_ARB_WDT_TIMEOUT);
 
                WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
                WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
@@ -3409,9 +3408,8 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
                WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset,
                                QM_ARB_ERR_MSG_EN_MASK);
 
-               /* Increase ARB WDT to support streams architecture */
-               WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset,
-                               GAUDI_ARB_WDT_TIMEOUT);
+               /* Set timeout to maximum */
+               WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, GAUDI_ARB_WDT_TIMEOUT);
 
                WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0);
                WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset,
@@ -3792,9 +3790,6 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_
 {
        u32 wait_timeout_ms;
 
-       dev_info(hdev->dev,
-               "Halting compute engines and disabling interrupts\n");
-
        if (hdev->pldm)
                wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
        else
@@ -4212,7 +4207,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
        }
 
        if (fw_reset) {
-               dev_info(hdev->dev,
+               dev_dbg(hdev->dev,
                        "Firmware performs HARD reset, going to wait %dms\n",
                        reset_timeout_ms);
 
@@ -4304,11 +4299,11 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
                WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
                        1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
 
-               dev_info(hdev->dev,
+               dev_dbg(hdev->dev,
                        "Issued HARD reset command, going to wait %dms\n",
                        reset_timeout_ms);
        } else {
-               dev_info(hdev->dev,
+               dev_dbg(hdev->dev,
                        "Firmware performs HARD reset, going to wait %dms\n",
                        reset_timeout_ms);
        }
@@ -4745,12 +4740,11 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
        dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
 }
 
-static int gaudi_hbm_scrubbing(struct hl_device *hdev)
+static int gaudi_scrub_device_dram(struct hl_device *hdev, u64 val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        u64  cur_addr = DRAM_BASE_ADDR_USER;
-       u32 val;
-       u32 chunk_size;
+       u32 chunk_size, busy;
        int rc, dma_id;
 
        while (cur_addr < prop->dram_end_address) {
@@ -4764,8 +4758,10 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
                                "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
                                cur_addr, cur_addr + chunk_size);
 
-                       WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
-                       WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
+                       WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset,
+                                       lower_32_bits(val));
+                       WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset,
+                                       upper_32_bits(val));
                        WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
                                                lower_32_bits(cur_addr));
                        WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
@@ -4788,8 +4784,8 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
                        rc = hl_poll_timeout(
                                hdev,
                                mmDMA0_CORE_STS0 + dma_offset,
-                               val,
-                               ((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
+                               busy,
+                               ((busy & DMA0_CORE_STS0_BUSY_MASK) == 0),
                                1000,
                                HBM_SCRUBBING_TIMEOUT_US);
 
@@ -4843,7 +4839,7 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
                }
 
                /* Scrub HBM using all DMA channels in parallel */
-               rc = gaudi_hbm_scrubbing(hdev);
+               rc = gaudi_scrub_device_dram(hdev, 0xdeadbeaf);
                if (rc)
                        dev_err(hdev->dev,
                                "Failed to clear HBM in mem scrub all\n");
@@ -5038,37 +5034,7 @@ static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
        hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
 }
 
-static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
-                       int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
-               return -ENOMEM;
-
-       /* Shift to the device's base physical address of host memory */
-       for_each_sg(sgl, sg, nents, i)
-               sg->dma_address += HOST_PHYS_BASE;
-
-       return 0;
-}
-
-static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
-                       int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       /* Cancel the device's base physical address of host memory */
-       for_each_sg(sgl, sg, nents, i)
-               sg->dma_address -= HOST_PHYS_BASE;
-
-       dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
-}
-
-static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
-                                       struct sg_table *sgt)
+static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
 {
        struct scatterlist *sg, *sg_next_iter;
        u32 count, dma_desc_cnt;
@@ -5077,8 +5043,7 @@ static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
 
        dma_desc_cnt = 0;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-
+       for_each_sgtable_dma_sg(sgt, sg, count) {
                len = sg_dma_len(sg);
                addr = sg_dma_address(sg);
 
@@ -5132,8 +5097,7 @@ static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
 
        list_add_tail(&userptr->job_node, parser->job_userptr_list);
 
-       rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
-                                       userptr->sgt->nents, dir);
+       rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
        if (rc) {
                dev_err(hdev->dev, "failed to map sgt with DMA region\n");
                goto unpin_memory;
@@ -5408,7 +5372,7 @@ static int gaudi_patch_dma_packet(struct hl_device *hdev,
        sgt = userptr->sgt;
        dma_desc_cnt = 0;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+       for_each_sgtable_dma_sg(sgt, sg, count) {
                len = sg_dma_len(sg);
                dma_addr = sg_dma_address(sg);
 
@@ -5562,7 +5526,7 @@ static int gaudi_patch_cb(struct hl_device *hdev,
 static int gaudi_parse_cb_mmu(struct hl_device *hdev,
                struct hl_cs_parser *parser)
 {
-       u64 patched_cb_handle;
+       u64 handle;
        u32 patched_cb_size;
        struct hl_cb *user_cb;
        int rc;
@@ -5578,9 +5542,9 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
        else
                parser->patched_cb_size = parser->user_cb_size;
 
-       rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+       rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
                                parser->patched_cb_size, false, false,
-                               &patched_cb_handle);
+                               &handle);
 
        if (rc) {
                dev_err(hdev->dev,
@@ -5589,13 +5553,10 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
                return rc;
        }
 
-       patched_cb_handle >>= PAGE_SHIFT;
-       parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
-                               (u32) patched_cb_handle);
+       parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
        /* hl_cb_get should never fail */
        if (!parser->patched_cb) {
-               dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
-                       (u32) patched_cb_handle);
+               dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
                rc = -EFAULT;
                goto out;
        }
@@ -5635,8 +5596,7 @@ out:
         * cb_put will release it, but here we want to remove it from the
         * idr
         */
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
-                                       patched_cb_handle << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
 
        return rc;
 }
@@ -5644,7 +5604,7 @@ out:
 static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
                struct hl_cs_parser *parser)
 {
-       u64 patched_cb_handle;
+       u64 handle;
        int rc;
 
        rc = gaudi_validate_cb(hdev, parser, false);
@@ -5652,22 +5612,19 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
        if (rc)
                goto free_userptr;
 
-       rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+       rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
                                parser->patched_cb_size, false, false,
-                               &patched_cb_handle);
+                               &handle);
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to allocate patched CB for DMA CS %d\n", rc);
                goto free_userptr;
        }
 
-       patched_cb_handle >>= PAGE_SHIFT;
-       parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
-                               (u32) patched_cb_handle);
+       parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
        /* hl_cb_get should never fail here */
        if (!parser->patched_cb) {
-               dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
-                               (u32) patched_cb_handle);
+               dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
                rc = -EFAULT;
                goto out;
        }
@@ -5684,8 +5641,7 @@ out:
         * cb_put will release it, but here we want to remove it from the
         * idr
         */
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
-                               patched_cb_handle << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
 
 free_userptr:
        if (rc)
@@ -5798,7 +5754,6 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
        struct hl_cs_job *job;
        u32 cb_size, ctl, err_cause;
        struct hl_cb *cb;
-       u64 id;
        int rc;
 
        cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
@@ -5865,9 +5820,8 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
        }
 
 release_cb:
-       id = cb->id;
        hl_cb_put(cb);
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
 
        return rc;
 }
@@ -5930,7 +5884,7 @@ static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
 
 release_cb:
        hl_cb_put(cb);
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
 
        return rc;
 }
@@ -6101,184 +6055,6 @@ static void gaudi_restore_phase_topology(struct hl_device *hdev)
 
 }
 
-static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr,
-                       bool user_address, u32 *val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 hbm_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-
-               *val = RREG32(addr - CFG_BASE);
-
-       } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
-
-               *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
-       } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
-               if (hbm_bar_addr != U64_MAX) {
-                       *val = readl(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
-                       hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
-               }
-
-               if (hbm_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-
-               *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr,
-                       bool user_address, u32 val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 hbm_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-
-               WREG32(addr - CFG_BASE, val);
-
-       } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
-
-               writel(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
-       } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
-               if (hbm_bar_addr != U64_MAX) {
-                       writel(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
-                       hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
-               }
-
-               if (hbm_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-
-               *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr,
-                               bool user_address, u64 *val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 hbm_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-
-               u32 val_l = RREG32(addr - CFG_BASE);
-               u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
-
-               *val = (((u64) val_h) << 32) | val_l;
-
-       } else if ((addr >= SRAM_BASE_ADDR) &&
-                       (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
-
-               *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
-       } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
-               if (hbm_bar_addr != U64_MAX) {
-                       *val = readq(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
-                       hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
-               }
-
-               if (hbm_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-
-               *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr,
-                               bool user_address, u64 val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 hbm_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-
-               WREG32(addr - CFG_BASE, lower_32_bits(val));
-               WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
-
-       } else if ((addr >= SRAM_BASE_ADDR) &&
-                       (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
-
-               writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
-
-       } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
-
-               if (hbm_bar_addr != U64_MAX) {
-                       writeq(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
-                       hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
-               }
-
-               if (hbm_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-
-               *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
 static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr,
                                        u32 size_to_dma, dma_addr_t dma_addr)
 {
@@ -7628,19 +7404,18 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
                gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
 
                /* In case it's the first razwi, save its parameters*/
-               rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1);
+               rc = atomic_cmpxchg(&hdev->last_error.razwi.write_disable, 0, 1);
                if (!rc) {
-                       hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
-                       hdev->last_error.razwi_timestamp = ktime_get();
-                       hdev->last_error.razwi_addr = razwi_addr;
-                       hdev->last_error.razwi_engine_id_1 = engine_id_1;
-                       hdev->last_error.razwi_engine_id_2 = engine_id_2;
+                       hdev->last_error.razwi.timestamp = ktime_get();
+                       hdev->last_error.razwi.addr = razwi_addr;
+                       hdev->last_error.razwi.engine_id_1 = engine_id_1;
+                       hdev->last_error.razwi.engine_id_2 = engine_id_2;
                        /*
                         * If first engine id holds non valid value the razwi initiator
                         * does not have engine id
                         */
-                       hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX);
-                       hdev->last_error.razwi_type = razwi_type;
+                       hdev->last_error.razwi.non_engine_initiator = (engine_id_1 == U16_MAX);
+                       hdev->last_error.razwi.type = razwi_type;
 
                }
        }
@@ -8103,7 +7878,6 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
        case GAUDI_EVENT_MMU_PAGE_FAULT:
        case GAUDI_EVENT_MMU_WR_PERM:
        case GAUDI_EVENT_RAZWI_OR_ADC:
-       case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
        case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
        case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
                fallthrough;
@@ -8123,6 +7897,19 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
                hl_fw_unmask_irq(hdev, event_type);
                break;
 
+       case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+               gaudi_print_irq_info(hdev, event_type, true);
+               gaudi_handle_qman_err(hdev, event_type);
+               hl_fw_unmask_irq(hdev, event_type);
+
+               /* In TPC QM event, notify on TPC assertion. While there isn't
+                * a specific event for assertion yet, the FW generates QM event.
+                * The SW upper layer will inspect an internal mapped area to indicate
+                * if the event is a tpc assertion or tpc QM.
+                */
+               hl_notifier_event_send_all(hdev, HL_NOTIFIER_EVENT_TPC_ASSERT);
+               break;
+
        case GAUDI_EVENT_RAZWI_OR_ADC_SW:
                gaudi_print_irq_info(hdev, event_type, true);
                goto reset_device;
@@ -8328,8 +8115,6 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
 
        set_default_power_values(hdev);
 
-       hdev->max_power = prop->max_power_default;
-
        return 0;
 }
 
@@ -8501,6 +8286,16 @@ static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
        return hl_fw_get_eeprom_data(hdev, data, max_size);
 }
 
+static int gaudi_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+       struct gaudi_device *gaudi = hdev->asic_specific;
+
+       if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+               return 0;
+
+       return hl_fw_get_monitor_dump(hdev, data);
+}
+
 /*
  * this function should be used only during initialization and/or after reset,
  * when there are no active users.
@@ -9066,11 +8861,6 @@ static void gaudi_reset_sob(struct hl_device *hdev, void *data)
        kref_init(&hw_sob->kref);
 }
 
-static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev)
-{
-       hdev->dma_mask = 48;
-}
-
 static u64 gaudi_get_device_time(struct hl_device *hdev)
 {
        u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
@@ -9132,7 +8922,7 @@ static int gaudi_add_sync_to_engine_map_entry(
         */
        if (reg_value == 0 || reg_value == 0xffffffff)
                return 0;
-       reg_value -= (u32)CFG_BASE;
+       reg_value -= lower_32_bits(CFG_BASE);
 
        /* create a new hash entry */
        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
@@ -9377,6 +9167,12 @@ static u32 *gaudi_get_stream_master_qid_arr(void)
        return gaudi_stream_master;
 }
 
+static void gaudi_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+{
+       /* set 0 since multiple pages are not supported */
+       info->page_order_bitmask = 0;
+}
+
 static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct hl_device *hdev = dev_get_drvdata(dev);
@@ -9418,24 +9214,21 @@ static const struct hl_asic_funcs gaudi_funcs = {
        .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
        .asic_dma_free_coherent = gaudi_dma_free_coherent,
        .scrub_device_mem = gaudi_scrub_device_mem,
+       .scrub_device_dram = gaudi_scrub_device_dram,
        .get_int_queue_base = gaudi_get_int_queue_base,
        .test_queues = gaudi_test_queues,
        .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
        .asic_dma_pool_free = gaudi_dma_pool_free,
        .cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
        .cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
-       .hl_dma_unmap_sg = gaudi_dma_unmap_sg,
+       .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
        .cs_parser = gaudi_cs_parser,
-       .asic_dma_map_sg = gaudi_dma_map_sg,
+       .asic_dma_map_sgtable = hl_dma_map_sgtable,
        .get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
        .add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
        .update_eq_ci = gaudi_update_eq_ci,
        .context_switch = gaudi_context_switch,
        .restore_phase_topology = gaudi_restore_phase_topology,
-       .debugfs_read32 = gaudi_debugfs_read32,
-       .debugfs_write32 = gaudi_debugfs_write32,
-       .debugfs_read64 = gaudi_debugfs_read64,
-       .debugfs_write64 = gaudi_debugfs_write64,
        .debugfs_read_dma = gaudi_debugfs_read_dma,
        .add_device_attr = gaudi_add_device_attr,
        .handle_eqe = gaudi_handle_eqe,
@@ -9444,6 +9237,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
        .write_pte = gaudi_write_pte,
        .mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
        .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
+       .mmu_prefetch_cache_range = NULL,
        .send_heartbeat = gaudi_send_heartbeat,
        .debug_coresight = gaudi_debug_coresight,
        .is_device_idle = gaudi_is_device_idle,
@@ -9452,6 +9246,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
        .hw_queues_unlock = gaudi_hw_queues_unlock,
        .get_pci_id = gaudi_get_pci_id,
        .get_eeprom_data = gaudi_get_eeprom_data,
+       .get_monitor_dump = gaudi_get_monitor_dump,
        .send_cpu_message = gaudi_send_cpu_message,
        .pci_bars_map = gaudi_pci_bars_map,
        .init_iatu = gaudi_init_iatu,
@@ -9469,7 +9264,6 @@ static const struct hl_asic_funcs gaudi_funcs = {
        .gen_wait_cb = gaudi_gen_wait_cb,
        .reset_sob = gaudi_reset_sob,
        .reset_sob_group = gaudi_reset_sob_group,
-       .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
        .get_device_time = gaudi_get_device_time,
        .collective_wait_init_cs = gaudi_collective_wait_init_cs,
        .collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
@@ -9486,7 +9280,11 @@ static const struct hl_asic_funcs gaudi_funcs = {
        .get_sob_addr = gaudi_get_sob_addr,
        .set_pci_memory_regions = gaudi_set_pci_memory_regions,
        .get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr,
-       .is_valid_dram_page_size = NULL
+       .is_valid_dram_page_size = NULL,
+       .mmu_get_real_page_size = hl_mmu_get_real_page_size,
+       .get_valid_dram_page_orders = gaudi_get_valid_dram_page_orders,
+       .access_dev_mem = hl_access_dev_mem,
+       .set_dram_bar_base = gaudi_set_hbm_bar_base,
 };
 
 /**
index 54de7c5..4fbcf3f 100644 (file)
 #define MME_QMAN_LENGTH                        1024
 #define MME_QMAN_SIZE_IN_BYTES         (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
 
-#define HBM_DMA_QMAN_LENGTH            1024
+#define HBM_DMA_QMAN_LENGTH            4096
 #define HBM_DMA_QMAN_SIZE_IN_BYTES     \
                                (HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
 
 #define TPC_QMAN_LENGTH                        1024
 #define TPC_QMAN_SIZE_IN_BYTES         (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
 
-#define NIC_QMAN_LENGTH                        1024
+#define NIC_QMAN_LENGTH                        4096
 #define NIC_QMAN_SIZE_IN_BYTES         (NIC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
 
 
index ec9358b..4cde505 100644 (file)
@@ -390,6 +390,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
        }
 
        prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
+       prop->host_base_address = HOST_PHYS_BASE;
+       prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
        prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
 
        prop->dram_base_address = DRAM_PHYS_BASE;
@@ -413,18 +415,19 @@ int goya_set_fixed_properties(struct hl_device *hdev)
        prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
        prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
        prop->dram_page_size = PAGE_SIZE_2MB;
+       prop->device_mem_alloc_default_page_size = prop->dram_page_size;
        prop->dram_supports_virtual_memory = true;
 
-       prop->dmmu.hop0_shift = MMU_V1_0_HOP0_SHIFT;
-       prop->dmmu.hop1_shift = MMU_V1_0_HOP1_SHIFT;
-       prop->dmmu.hop2_shift = MMU_V1_0_HOP2_SHIFT;
-       prop->dmmu.hop3_shift = MMU_V1_0_HOP3_SHIFT;
-       prop->dmmu.hop4_shift = MMU_V1_0_HOP4_SHIFT;
-       prop->dmmu.hop0_mask = MMU_V1_0_HOP0_MASK;
-       prop->dmmu.hop1_mask = MMU_V1_0_HOP1_MASK;
-       prop->dmmu.hop2_mask = MMU_V1_0_HOP2_MASK;
-       prop->dmmu.hop3_mask = MMU_V1_0_HOP3_MASK;
-       prop->dmmu.hop4_mask = MMU_V1_0_HOP4_MASK;
+       prop->dmmu.hop_shifts[MMU_HOP0] = MMU_V1_0_HOP0_SHIFT;
+       prop->dmmu.hop_shifts[MMU_HOP1] = MMU_V1_0_HOP1_SHIFT;
+       prop->dmmu.hop_shifts[MMU_HOP2] = MMU_V1_0_HOP2_SHIFT;
+       prop->dmmu.hop_shifts[MMU_HOP3] = MMU_V1_0_HOP3_SHIFT;
+       prop->dmmu.hop_shifts[MMU_HOP4] = MMU_V1_0_HOP4_SHIFT;
+       prop->dmmu.hop_masks[MMU_HOP0] = MMU_V1_0_HOP0_MASK;
+       prop->dmmu.hop_masks[MMU_HOP1] = MMU_V1_0_HOP1_MASK;
+       prop->dmmu.hop_masks[MMU_HOP2] = MMU_V1_0_HOP2_MASK;
+       prop->dmmu.hop_masks[MMU_HOP3] = MMU_V1_0_HOP3_MASK;
+       prop->dmmu.hop_masks[MMU_HOP4] = MMU_V1_0_HOP4_MASK;
        prop->dmmu.start_addr = VA_DDR_SPACE_START;
        prop->dmmu.end_addr = VA_DDR_SPACE_END;
        prop->dmmu.page_size = PAGE_SIZE_2MB;
@@ -487,6 +490,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
 
        prop->set_max_power_on_device_init = true;
 
+       prop->dma_mask = 48;
+
        return 0;
 }
 
@@ -574,8 +579,6 @@ static int goya_init_iatu(struct hl_device *hdev)
        if (rc)
                goto done;
 
-       hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
        /* Outbound Region 0 - Point to Host  */
        outbound_region.addr = HOST_PHYS_BASE;
        outbound_region.size = HOST_PHYS_SIZE;
@@ -2479,9 +2482,6 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_r
 {
        u32 wait_timeout_ms;
 
-       dev_info(hdev->dev,
-               "Halting compute engines and disabling interrupts\n");
-
        if (hdev->pldm)
                wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
        else
@@ -2825,12 +2825,12 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
                goya_set_pll_refclk(hdev);
 
                WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
-               dev_info(hdev->dev,
+               dev_dbg(hdev->dev,
                        "Issued HARD reset command, going to wait %dms\n",
                        reset_timeout_ms);
        } else {
                WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
-               dev_info(hdev->dev,
+               dev_dbg(hdev->dev,
                        "Issued SOFT reset command, going to wait %dms\n",
                        reset_timeout_ms);
        }
@@ -3311,35 +3311,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
        hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
 }
 
-static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
-                               int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
-               return -ENOMEM;
-
-       /* Shift to the device's base physical address of host memory */
-       for_each_sg(sgl, sg, nents, i)
-               sg->dma_address += HOST_PHYS_BASE;
-
-       return 0;
-}
-
-static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
-                               int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       /* Cancel the device's base physical address of host memory */
-       for_each_sg(sgl, sg, nents, i)
-               sg->dma_address -= HOST_PHYS_BASE;
-
-       dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
-}
-
 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
 {
        struct scatterlist *sg, *sg_next_iter;
@@ -3349,8 +3320,7 @@ u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
 
        dma_desc_cnt = 0;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-
+       for_each_sgtable_dma_sg(sgt, sg, count) {
                len = sg_dma_len(sg);
                addr = sg_dma_address(sg);
 
@@ -3404,8 +3374,7 @@ static int goya_pin_memory_before_cs(struct hl_device *hdev,
 
        list_add_tail(&userptr->job_node, parser->job_userptr_list);
 
-       rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
-                                       userptr->sgt->nents, dir);
+       rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
        if (rc) {
                dev_err(hdev->dev, "failed to map sgt with DMA region\n");
                goto unpin_memory;
@@ -3869,7 +3838,7 @@ static int goya_patch_dma_packet(struct hl_device *hdev,
        sgt = userptr->sgt;
        dma_desc_cnt = 0;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+       for_each_sgtable_dma_sg(sgt, sg, count) {
                len = sg_dma_len(sg);
                dma_addr = sg_dma_address(sg);
 
@@ -4032,7 +4001,7 @@ static int goya_patch_cb(struct hl_device *hdev,
 static int goya_parse_cb_mmu(struct hl_device *hdev,
                struct hl_cs_parser *parser)
 {
-       u64 patched_cb_handle;
+       u64 handle;
        u32 patched_cb_size;
        struct hl_cb *user_cb;
        int rc;
@@ -4045,9 +4014,9 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
        parser->patched_cb_size = parser->user_cb_size +
                        sizeof(struct packet_msg_prot) * 2;
 
-       rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+       rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
                                parser->patched_cb_size, false, false,
-                               &patched_cb_handle);
+                               &handle);
 
        if (rc) {
                dev_err(hdev->dev,
@@ -4056,13 +4025,10 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
                return rc;
        }
 
-       patched_cb_handle >>= PAGE_SHIFT;
-       parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
-                               (u32) patched_cb_handle);
+       parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
        /* hl_cb_get should never fail here */
        if (!parser->patched_cb) {
-               dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
-                       (u32) patched_cb_handle);
+               dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
                rc = -EFAULT;
                goto out;
        }
@@ -4102,8 +4068,7 @@ out:
         * cb_put will release it, but here we want to remove it from the
         * idr
         */
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
-                                       patched_cb_handle << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
 
        return rc;
 }
@@ -4111,7 +4076,7 @@ out:
 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
                                struct hl_cs_parser *parser)
 {
-       u64 patched_cb_handle;
+       u64 handle;
        int rc;
 
        rc = goya_validate_cb(hdev, parser, false);
@@ -4119,22 +4084,19 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
        if (rc)
                goto free_userptr;
 
-       rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+       rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
                                parser->patched_cb_size, false, false,
-                               &patched_cb_handle);
+                               &handle);
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to allocate patched CB for DMA CS %d\n", rc);
                goto free_userptr;
        }
 
-       patched_cb_handle >>= PAGE_SHIFT;
-       parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
-                               (u32) patched_cb_handle);
+       parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
        /* hl_cb_get should never fail here */
        if (!parser->patched_cb) {
-               dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
-                       (u32) patched_cb_handle);
+               dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
                rc = -EFAULT;
                goto out;
        }
@@ -4151,8 +4113,7 @@ out:
         * cb_put will release it, but here we want to remove it from the
         * idr
         */
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
-                               patched_cb_handle << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
 
 free_userptr:
        if (rc)
@@ -4259,224 +4220,7 @@ static void goya_clear_sm_regs(struct hl_device *hdev)
        i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
 }
 
-/*
- * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
- *                       address.
- *
- * @hdev:      pointer to hl_device structure
- * @addr:      device or host mapped address
- * @val:       returned value
- *
- * In case of DDR address that is not mapped into the default aperture that
- * the DDR bar exposes, the function will configure the iATU so that the DDR
- * bar will be positioned at a base address that allows reading from the
- * required address. Configuring the iATU during normal operation can
- * lead to undefined behavior and therefore, should be done with extreme care
- *
- */
-static int goya_debugfs_read32(struct hl_device *hdev, u64 addr,
-                       bool user_address, u32 *val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-               *val = RREG32(addr - CFG_BASE);
-
-       } else if ((addr >= SRAM_BASE_ADDR) &&
-                       (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
-
-               *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
-                               (addr - SRAM_BASE_ADDR));
-
-       } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE +
-                               (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
-               if (ddr_bar_addr != U64_MAX) {
-                       *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
-                                               (addr - bar_base_addr));
-
-                       ddr_bar_addr = goya_set_ddr_bar_base(hdev,
-                                                       ddr_bar_addr);
-               }
-               if (ddr_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-               *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-/*
- * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
- *                        address.
- *
- * @hdev:      pointer to hl_device structure
- * @addr:      device or host mapped address
- * @val:       returned value
- *
- * In case of DDR address that is not mapped into the default aperture that
- * the DDR bar exposes, the function will configure the iATU so that the DDR
- * bar will be positioned at a base address that allows writing to the
- * required address. Configuring the iATU during normal operation can
- * lead to undefined behavior and therefore, should be done with extreme care
- *
- */
-static int goya_debugfs_write32(struct hl_device *hdev, u64 addr,
-                       bool user_address, u32 val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
-               WREG32(addr - CFG_BASE, val);
-
-       } else if ((addr >= SRAM_BASE_ADDR) &&
-                       (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
-
-               writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
-                                       (addr - SRAM_BASE_ADDR));
-
-       } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE +
-                               (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
-               if (ddr_bar_addr != U64_MAX) {
-                       writel(val, hdev->pcie_bar[DDR_BAR_ID] +
-                                               (addr - bar_base_addr));
-
-                       ddr_bar_addr = goya_set_ddr_bar_base(hdev,
-                                                       ddr_bar_addr);
-               }
-               if (ddr_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-               *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-static int goya_debugfs_read64(struct hl_device *hdev, u64 addr,
-                       bool user_address, u64 *val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-               u32 val_l = RREG32(addr - CFG_BASE);
-               u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
-
-               *val = (((u64) val_h) << 32) | val_l;
-
-       } else if ((addr >= SRAM_BASE_ADDR) &&
-                       (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
-
-               *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
-                               (addr - SRAM_BASE_ADDR));
-
-       } else if (addr <=
-                  DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE +
-                               (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
-               if (ddr_bar_addr != U64_MAX) {
-                       *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
-                                               (addr - bar_base_addr));
-
-                       ddr_bar_addr = goya_set_ddr_bar_base(hdev,
-                                                       ddr_bar_addr);
-               }
-               if (ddr_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-               *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-static int goya_debugfs_write64(struct hl_device *hdev, u64 addr,
-                               bool user_address, u64 val)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr, host_phys_end;
-       int rc = 0;
-
-       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
-
-       if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
-               WREG32(addr - CFG_BASE, lower_32_bits(val));
-               WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
-
-       } else if ((addr >= SRAM_BASE_ADDR) &&
-                       (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
-
-               writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
-                                       (addr - SRAM_BASE_ADDR));
-
-       } else if (addr <=
-                  DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
-
-               u64 bar_base_addr = DRAM_PHYS_BASE +
-                               (addr & ~(prop->dram_pci_bar_size - 0x1ull));
-
-               ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
-               if (ddr_bar_addr != U64_MAX) {
-                       writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
-                                               (addr - bar_base_addr));
-
-                       ddr_bar_addr = goya_set_ddr_bar_base(hdev,
-                                                       ddr_bar_addr);
-               }
-               if (ddr_bar_addr == U64_MAX)
-                       rc = -EIO;
-
-       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
-                       user_address && !iommu_present(&pci_bus_type)) {
-               *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
-
-       } else {
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
-                               void *blob_addr)
+static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
 {
        dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
        return -EPERM;
@@ -5101,7 +4845,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
 
 release_cb:
        hl_cb_put(cb);
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+       hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
 
        return rc;
 }
@@ -5561,11 +5305,6 @@ static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
 
 }
 
-static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
-{
-       hdev->dma_mask = 48;
-}
-
 u64 goya_get_device_time(struct hl_device *hdev)
 {
        u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
@@ -5678,6 +5417,22 @@ static u32 *goya_get_stream_master_qid_arr(void)
        return NULL;
 }
 
+static void goya_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info)
+{
+       /* set 0 since multiple pages are not supported */
+       info->page_order_bitmask = 0;
+}
+
+static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
+{
+       return -EOPNOTSUPP;
+}
+
+static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
+{
+       return -EOPNOTSUPP;
+}
+
 static const struct hl_asic_funcs goya_funcs = {
        .early_init = goya_early_init,
        .early_fini = goya_early_fini,
@@ -5696,24 +5451,21 @@ static const struct hl_asic_funcs goya_funcs = {
        .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
        .asic_dma_free_coherent = goya_dma_free_coherent,
        .scrub_device_mem = goya_scrub_device_mem,
+       .scrub_device_dram = goya_scrub_device_dram,
        .get_int_queue_base = goya_get_int_queue_base,
        .test_queues = goya_test_queues,
        .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
        .asic_dma_pool_free = goya_dma_pool_free,
        .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
        .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
-       .hl_dma_unmap_sg = goya_dma_unmap_sg,
+       .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
        .cs_parser = goya_cs_parser,
-       .asic_dma_map_sg = goya_dma_map_sg,
+       .asic_dma_map_sgtable = hl_dma_map_sgtable,
        .get_dma_desc_list_size = goya_get_dma_desc_list_size,
        .add_end_of_cb_packets = goya_add_end_of_cb_packets,
        .update_eq_ci = goya_update_eq_ci,
        .context_switch = goya_context_switch,
        .restore_phase_topology = goya_restore_phase_topology,
-       .debugfs_read32 = goya_debugfs_read32,
-       .debugfs_write32 = goya_debugfs_write32,
-       .debugfs_read64 = goya_debugfs_read64,
-       .debugfs_write64 = goya_debugfs_write64,
        .debugfs_read_dma = goya_debugfs_read_dma,
        .add_device_attr = goya_add_device_attr,
        .handle_eqe = goya_handle_eqe,
@@ -5722,6 +5474,7 @@ static const struct hl_asic_funcs goya_funcs = {
        .write_pte = goya_write_pte,
        .mmu_invalidate_cache = goya_mmu_invalidate_cache,
        .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
+       .mmu_prefetch_cache_range = NULL,
        .send_heartbeat = goya_send_heartbeat,
        .debug_coresight = goya_debug_coresight,
        .is_device_idle = goya_is_device_idle,
@@ -5730,6 +5483,7 @@ static const struct hl_asic_funcs goya_funcs = {
        .hw_queues_unlock = goya_hw_queues_unlock,
        .get_pci_id = goya_get_pci_id,
        .get_eeprom_data = goya_get_eeprom_data,
+       .get_monitor_dump = goya_get_monitor_dump,
        .send_cpu_message = goya_send_cpu_message,
        .pci_bars_map = goya_pci_bars_map,
        .init_iatu = goya_init_iatu,
@@ -5747,7 +5501,6 @@ static const struct hl_asic_funcs goya_funcs = {
        .gen_wait_cb = goya_gen_wait_cb,
        .reset_sob = goya_reset_sob,
        .reset_sob_group = goya_reset_sob_group,
-       .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
        .get_device_time = goya_get_device_time,
        .collective_wait_init_cs = goya_collective_wait_init_cs,
        .collective_wait_create_jobs = goya_collective_wait_create_jobs,
@@ -5764,7 +5517,11 @@ static const struct hl_asic_funcs goya_funcs = {
        .get_sob_addr = &goya_get_sob_addr,
        .set_pci_memory_regions = goya_set_pci_memory_regions,
        .get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
-       .is_valid_dram_page_size = NULL
+       .is_valid_dram_page_size = NULL,
+       .mmu_get_real_page_size = hl_mmu_get_real_page_size,
+       .get_valid_dram_page_orders = goya_get_valid_dram_page_orders,
+       .access_dev_mem = hl_access_dev_mem,
+       .set_dram_bar_base = goya_set_ddr_bar_base,
 };
 
 /*
index 65668da..38e44b6 100644 (file)
@@ -389,6 +389,14 @@ enum pq_init_status {
  *
  * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
  *       Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ *       Get monitors registers dump from the CpuCP kernel.
+ *       The CPU will put the registers dump in the a buffer allocated by the driver
+ *       which address is passed via the CpuCp packet. In addition, the host's driver
+ *       passes the max size it allows the CpuCP to write to the structure, to prevent
+ *       data corruption in case of mismatched driver/FW versions.
+ *       Relevant only to Gaudi.
  */
 
 enum cpucp_packet_id {
@@ -439,6 +447,11 @@ enum cpucp_packet_id {
        CPUCP_PACKET_POWER_SET,                 /* internal */
        CPUCP_PACKET_RESERVED,                  /* not used */
        CPUCP_PACKET_ENGINE_CORE_ASID_SET,      /* internal */
+       CPUCP_PACKET_RESERVED2,                 /* not used */
+       CPUCP_PACKET_RESERVED3,                 /* not used */
+       CPUCP_PACKET_RESERVED4,                 /* not used */
+       CPUCP_PACKET_RESERVED5,                 /* not used */
+       CPUCP_PACKET_MONITOR_DUMP_GET,          /* debugfs */
 };
 
 #define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -555,6 +568,12 @@ struct cpucp_array_data_packet {
        __le32 data[];
 };
 
+enum cpucp_led_index {
+       CPUCP_LED0_INDEX = 0,
+       CPUCP_LED1_INDEX,
+       CPUCP_LED2_INDEX
+};
+
 enum cpucp_packet_rc {
        cpucp_packet_success,
        cpucp_packet_invalid,
@@ -576,7 +595,10 @@ enum cpucp_temp_type {
        cpucp_temp_offset = 19,
        cpucp_temp_lowest = 21,
        cpucp_temp_highest = 22,
-       cpucp_temp_reset_history = 23
+       cpucp_temp_reset_history = 23,
+       cpucp_temp_warn = 24,
+       cpucp_temp_max_crit = 25,
+       cpucp_temp_max_warn = 26,
 };
 
 enum cpucp_in_attributes {
@@ -686,6 +708,7 @@ enum pll_index {
 enum rl_index {
        TPC_RL = 0,
        MME_RL,
+       EDMA_RL,
 };
 
 enum pvt_index {
@@ -820,6 +843,7 @@ enum cpucp_serdes_type {
        TYPE_2_SERDES_TYPE,
        HLS1_SERDES_TYPE,
        HLS1H_SERDES_TYPE,
+       HLS2_SERDES_TYPE,
        UNKNOWN_SERDES_TYPE,
        MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
 };
@@ -833,9 +857,28 @@ struct cpucp_nic_info {
        __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
        __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
        __le16 serdes_type; /* enum cpucp_serdes_type */
+       __le16 tx_swap_map[CPUCP_MAX_NICS];
        __u8 reserved[6];
 };
 
+#define PAGE_DISCARD_MAX       64
+
+struct page_discard_info {
+       __u8 num_entries;
+       __u8 reserved[7];
+       __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+       __le16 integer;
+       __le16 exp;
+};
+
 /*
  * struct cpucp_nic_status - describes the status of a NIC port.
  * @port: NIC port index.
@@ -889,4 +932,29 @@ struct cpucp_hbm_row_replaced_rows_info {
        struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
 };
 
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. relevant only to Gaudi.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+       __le32 mon_pay_addrl[512];
+       __le32 mon_pay_addrh[512];
+       __le32 mon_pay_data[512];
+       __le32 mon_arm[512];
+       __le32 mon_status[512];
+};
+
+/* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */
+struct cpucp_monitor_dump {
+       struct dcore_monitor_regs_data sync_mngr_w_s;
+       struct dcore_monitor_regs_data sync_mngr_e_s;
+       struct dcore_monitor_regs_data sync_mngr_w_n;
+       struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
 #endif /* CPUCP_IF_H */
index 758f246..cae8ac8 100644 (file)
 
 #define MMU_CONFIG_TIMEOUT_USEC                2000 /* 2 ms */
 
+enum mmu_hop_num {
+       MMU_HOP0,
+       MMU_HOP1,
+       MMU_HOP2,
+       MMU_HOP3,
+       MMU_HOP4,
+       MMU_HOP5,
+       MMU_HOP_MAX,
+};
+
 #endif /* INCLUDE_MMU_GENERAL_H_ */
index f21854a..009239a 100644 (file)
@@ -68,40 +68,40 @@ void __init lkdtm_bugs_init(int *recur_param)
                recur_count = *recur_param;
 }
 
-void lkdtm_PANIC(void)
+static void lkdtm_PANIC(void)
 {
        panic("dumptest");
 }
 
-void lkdtm_BUG(void)
+static void lkdtm_BUG(void)
 {
        BUG();
 }
 
 static int warn_counter;
 
-void lkdtm_WARNING(void)
+static void lkdtm_WARNING(void)
 {
        WARN_ON(++warn_counter);
 }
 
-void lkdtm_WARNING_MESSAGE(void)
+static void lkdtm_WARNING_MESSAGE(void)
 {
        WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
 }
 
-void lkdtm_EXCEPTION(void)
+static void lkdtm_EXCEPTION(void)
 {
        *((volatile int *) 0) = 0;
 }
 
-void lkdtm_LOOP(void)
+static void lkdtm_LOOP(void)
 {
        for (;;)
                ;
 }
 
-void lkdtm_EXHAUST_STACK(void)
+static void lkdtm_EXHAUST_STACK(void)
 {
        pr_info("Calling function with %lu frame size to depth %d ...\n",
                REC_STACK_SIZE, recur_count);
@@ -115,7 +115,7 @@ static noinline void __lkdtm_CORRUPT_STACK(void *stack)
 }
 
 /* This should trip the stack canary, not corrupt the return address. */
-noinline void lkdtm_CORRUPT_STACK(void)
+static noinline void lkdtm_CORRUPT_STACK(void)
 {
        /* Use default char array length that triggers stack protection. */
        char data[8] __aligned(sizeof(void *));
@@ -125,7 +125,7 @@ noinline void lkdtm_CORRUPT_STACK(void)
 }
 
 /* Same as above but will only get a canary with -fstack-protector-strong */
-noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
 {
        union {
                unsigned short shorts[4];
@@ -139,7 +139,7 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
 static pid_t stack_pid;
 static unsigned long stack_addr;
 
-void lkdtm_REPORT_STACK(void)
+static void lkdtm_REPORT_STACK(void)
 {
        volatile uintptr_t magic;
        pid_t pid = task_pid_nr(current);
@@ -222,7 +222,7 @@ static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
        }
 }
 
-void lkdtm_REPORT_STACK_CANARY(void)
+static void lkdtm_REPORT_STACK_CANARY(void)
 {
        /* Use default char array length that triggers stack protection. */
        char data[8] __aligned(sizeof(void *)) = { };
@@ -230,7 +230,7 @@ void lkdtm_REPORT_STACK_CANARY(void)
        __lkdtm_REPORT_STACK_CANARY((void *)&data);
 }
 
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
 {
        static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
        u32 *p;
@@ -245,21 +245,21 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
                pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
 }
 
-void lkdtm_SOFTLOCKUP(void)
+static void lkdtm_SOFTLOCKUP(void)
 {
        preempt_disable();
        for (;;)
                cpu_relax();
 }
 
-void lkdtm_HARDLOCKUP(void)
+static void lkdtm_HARDLOCKUP(void)
 {
        local_irq_disable();
        for (;;)
                cpu_relax();
 }
 
-void lkdtm_SPINLOCKUP(void)
+static void lkdtm_SPINLOCKUP(void)
 {
        /* Must be called twice to trigger. */
        spin_lock(&lock_me_up);
@@ -267,7 +267,7 @@ void lkdtm_SPINLOCKUP(void)
        __release(&lock_me_up);
 }
 
-void lkdtm_HUNG_TASK(void)
+static void lkdtm_HUNG_TASK(void)
 {
        set_current_state(TASK_UNINTERRUPTIBLE);
        schedule();
@@ -276,7 +276,7 @@ void lkdtm_HUNG_TASK(void)
 volatile unsigned int huge = INT_MAX - 2;
 volatile unsigned int ignored;
 
-void lkdtm_OVERFLOW_SIGNED(void)
+static void lkdtm_OVERFLOW_SIGNED(void)
 {
        int value;
 
@@ -291,7 +291,7 @@ void lkdtm_OVERFLOW_SIGNED(void)
 }
 
 
-void lkdtm_OVERFLOW_UNSIGNED(void)
+static void lkdtm_OVERFLOW_UNSIGNED(void)
 {
        unsigned int value;
 
@@ -319,7 +319,7 @@ struct array_bounds {
        int three;
 };
 
-void lkdtm_ARRAY_BOUNDS(void)
+static void lkdtm_ARRAY_BOUNDS(void)
 {
        struct array_bounds_flex_array *not_checked;
        struct array_bounds *checked;
@@ -327,6 +327,11 @@ void lkdtm_ARRAY_BOUNDS(void)
 
        not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
        checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+       if (!not_checked || !checked) {
+               kfree(not_checked);
+               kfree(checked);
+               return;
+       }
 
        pr_info("Array access within bounds ...\n");
        /* For both, touch all bytes in the actual member size. */
@@ -346,10 +351,13 @@ void lkdtm_ARRAY_BOUNDS(void)
        kfree(not_checked);
        kfree(checked);
        pr_err("FAIL: survived array bounds overflow!\n");
-       pr_expected_config(CONFIG_UBSAN_BOUNDS);
+       if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+               pr_expected_config(CONFIG_UBSAN_TRAP);
+       else
+               pr_expected_config(CONFIG_UBSAN_BOUNDS);
 }
 
-void lkdtm_CORRUPT_LIST_ADD(void)
+static void lkdtm_CORRUPT_LIST_ADD(void)
 {
        /*
         * Initially, an empty list via LIST_HEAD:
@@ -389,7 +397,7 @@ void lkdtm_CORRUPT_LIST_ADD(void)
        }
 }
 
-void lkdtm_CORRUPT_LIST_DEL(void)
+static void lkdtm_CORRUPT_LIST_DEL(void)
 {
        LIST_HEAD(test_head);
        struct lkdtm_list item;
@@ -417,7 +425,7 @@ void lkdtm_CORRUPT_LIST_DEL(void)
 }
 
 /* Test that VMAP_STACK is actually allocating with a leading guard page */
-void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
 {
        const unsigned char *stack = task_stack_page(current);
        const unsigned char *ptr = stack - 1;
@@ -431,7 +439,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void)
 }
 
 /* Test that VMAP_STACK is actually allocating with a trailing guard page */
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
 {
        const unsigned char *stack = task_stack_page(current);
        const unsigned char *ptr = stack + THREAD_SIZE;
@@ -444,7 +452,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
        pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
 }
 
-void lkdtm_UNSET_SMEP(void)
+static void lkdtm_UNSET_SMEP(void)
 {
 #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
 #define MOV_CR4_DEPTH  64
@@ -510,7 +518,7 @@ void lkdtm_UNSET_SMEP(void)
 #endif
 }
 
-void lkdtm_DOUBLE_FAULT(void)
+static void lkdtm_DOUBLE_FAULT(void)
 {
 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
        /*
@@ -558,7 +566,7 @@ static noinline void change_pac_parameters(void)
 }
 #endif
 
-noinline void lkdtm_CORRUPT_PAC(void)
+static noinline void lkdtm_CORRUPT_PAC(void)
 {
 #ifdef CONFIG_ARM64
 #define CORRUPT_PAC_ITERATE    10
@@ -586,3 +594,37 @@ noinline void lkdtm_CORRUPT_PAC(void)
        pr_err("XFAIL: this test is arm64-only\n");
 #endif
 }
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(PANIC),
+       CRASHTYPE(BUG),
+       CRASHTYPE(WARNING),
+       CRASHTYPE(WARNING_MESSAGE),
+       CRASHTYPE(EXCEPTION),
+       CRASHTYPE(LOOP),
+       CRASHTYPE(EXHAUST_STACK),
+       CRASHTYPE(CORRUPT_STACK),
+       CRASHTYPE(CORRUPT_STACK_STRONG),
+       CRASHTYPE(REPORT_STACK),
+       CRASHTYPE(REPORT_STACK_CANARY),
+       CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+       CRASHTYPE(SOFTLOCKUP),
+       CRASHTYPE(HARDLOCKUP),
+       CRASHTYPE(SPINLOCKUP),
+       CRASHTYPE(HUNG_TASK),
+       CRASHTYPE(OVERFLOW_SIGNED),
+       CRASHTYPE(OVERFLOW_UNSIGNED),
+       CRASHTYPE(ARRAY_BOUNDS),
+       CRASHTYPE(CORRUPT_LIST_ADD),
+       CRASHTYPE(CORRUPT_LIST_DEL),
+       CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+       CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+       CRASHTYPE(UNSET_SMEP),
+       CRASHTYPE(DOUBLE_FAULT),
+       CRASHTYPE(CORRUPT_PAC),
+};
+
+struct crashtype_category bugs_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index c9aedde..666a7f4 100644 (file)
@@ -3,6 +3,7 @@
  * This is for all the tests relating directly to Control Flow Integrity.
  */
 #include "lkdtm.h"
+#include <asm/page.h>
 
 static int called_count;
 
@@ -22,7 +23,7 @@ static noinline int lkdtm_increment_int(int *counter)
 /*
  * This tries to call an indirect function with a mismatched prototype.
  */
-void lkdtm_CFI_FORWARD_PROTO(void)
+static void lkdtm_CFI_FORWARD_PROTO(void)
 {
        /*
         * Matches lkdtm_increment_void()'s prototype, but not
@@ -41,3 +42,145 @@ void lkdtm_CFI_FORWARD_PROTO(void)
        pr_err("FAIL: survived mismatched prototype function call!\n");
        pr_expected_config(CONFIG_CFI_CLANG);
 }
+
+/*
+ * This can stay local to LKDTM, as there should not be a production reason
+ * to disable PAC && SCS.
+ */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+# ifdef CONFIG_ARM64_BTI_KERNEL
+#  define __no_pac             "branch-protection=bti"
+# else
+#  define __no_pac             "branch-protection=none"
+# endif
+# define __no_ret_protection   __noscs __attribute__((__target__(__no_pac)))
+#else
+# define __no_ret_protection   __noscs
+#endif
+
+#define no_pac_addr(addr)      \
+       ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
+
+/* The ultimate ROP gadget. */
+static noinline __no_ret_protection
+void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
+{
+       /* Use of volatile is to make sure final write isn't seen as a dead store. */
+       unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+       /* Make sure we've found the right place on the stack before writing it. */
+       if (no_pac_addr(*ret_addr) == expected)
+               *ret_addr = (addr);
+       else
+               /* Check architecture, stack layout, or compiler behavior... */
+               pr_warn("Eek: return address mismatch! %px != %px\n",
+                       *ret_addr, addr);
+}
+
+static noinline
+void set_return_addr(unsigned long *expected, unsigned long *addr)
+{
+       /* Use of volatile is to make sure final write isn't seen as a dead store. */
+       unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+       /* Make sure we've found the right place on the stack before writing it. */
+       if (no_pac_addr(*ret_addr) == expected)
+               *ret_addr = (addr);
+       else
+               /* Check architecture, stack layout, or compiler behavior... */
+               pr_warn("Eek: return address mismatch! %px != %px\n",
+                       *ret_addr, addr);
+}
+
+static volatile int force_check;
+
+static void lkdtm_CFI_BACKWARD(void)
+{
+       /* Use calculated gotos to keep labels addressable. */
+       void *labels[] = {0, &&normal, &&redirected, &&check_normal, &&check_redirected};
+
+       pr_info("Attempting unchecked stack return address redirection ...\n");
+
+       /* Always false */
+       if (force_check) {
+               /*
+                * Prepare to call with NULLs to avoid parameters being treated as
+                * constants in -02.
+                */
+               set_return_addr_unchecked(NULL, NULL);
+               set_return_addr(NULL, NULL);
+               if (force_check)
+                       goto *labels[1];
+               if (force_check)
+                       goto *labels[2];
+               if (force_check)
+                       goto *labels[3];
+               if (force_check)
+                       goto *labels[4];
+               return;
+       }
+
+       /*
+        * Use fallthrough switch case to keep basic block ordering between
+        * set_return_addr*() and the label after it.
+        */
+       switch (force_check) {
+       case 0:
+               set_return_addr_unchecked(&&normal, &&redirected);
+               fallthrough;
+       case 1:
+normal:
+               /* Always true */
+               if (!force_check) {
+                       pr_err("FAIL: stack return address manipulation failed!\n");
+                       /* If we can't redirect "normally", we can't test mitigations. */
+                       return;
+               }
+               break;
+       default:
+redirected:
+               pr_info("ok: redirected stack return address.\n");
+               break;
+       }
+
+       pr_info("Attempting checked stack return address redirection ...\n");
+
+       switch (force_check) {
+       case 0:
+               set_return_addr(&&check_normal, &&check_redirected);
+               fallthrough;
+       case 1:
+check_normal:
+               /* Always true */
+               if (!force_check) {
+                       pr_info("ok: control flow unchanged.\n");
+                       return;
+               }
+
+check_redirected:
+               pr_err("FAIL: stack return address was redirected!\n");
+               break;
+       }
+
+       if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
+               pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
+               return;
+       }
+       if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
+               pr_expected_config(CONFIG_SHADOW_CALL_STACK);
+               return;
+       }
+       pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
+               lkdtm_kernel_info,
+               "CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
+}
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(CFI_FORWARD_PROTO),
+       CRASHTYPE(CFI_BACKWARD),
+};
+
+struct crashtype_category cfi_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index e2228b6..b4712ff 100644 (file)
@@ -86,109 +86,21 @@ static struct crashpoint crashpoints[] = {
 #endif
 };
 
-
-/* Crash types. */
-struct crashtype {
-       const char *name;
-       void (*func)(void);
-};
-
-#define CRASHTYPE(_name)                       \
-       {                                       \
-               .name = __stringify(_name),     \
-               .func = lkdtm_ ## _name,        \
-       }
-
-/* Define the possible types of crashes that can be triggered. */
-static const struct crashtype crashtypes[] = {
-       CRASHTYPE(PANIC),
-       CRASHTYPE(BUG),
-       CRASHTYPE(WARNING),
-       CRASHTYPE(WARNING_MESSAGE),
-       CRASHTYPE(EXCEPTION),
-       CRASHTYPE(LOOP),
-       CRASHTYPE(EXHAUST_STACK),
-       CRASHTYPE(CORRUPT_STACK),
-       CRASHTYPE(CORRUPT_STACK_STRONG),
-       CRASHTYPE(REPORT_STACK),
-       CRASHTYPE(REPORT_STACK_CANARY),
-       CRASHTYPE(CORRUPT_LIST_ADD),
-       CRASHTYPE(CORRUPT_LIST_DEL),
-       CRASHTYPE(STACK_GUARD_PAGE_LEADING),
-       CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
-       CRASHTYPE(UNSET_SMEP),
-       CRASHTYPE(CORRUPT_PAC),
-       CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
-       CRASHTYPE(SLAB_LINEAR_OVERFLOW),
-       CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
-       CRASHTYPE(WRITE_AFTER_FREE),
-       CRASHTYPE(READ_AFTER_FREE),
-       CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
-       CRASHTYPE(READ_BUDDY_AFTER_FREE),
-       CRASHTYPE(SLAB_INIT_ON_ALLOC),
-       CRASHTYPE(BUDDY_INIT_ON_ALLOC),
-       CRASHTYPE(SLAB_FREE_DOUBLE),
-       CRASHTYPE(SLAB_FREE_CROSS),
-       CRASHTYPE(SLAB_FREE_PAGE),
-       CRASHTYPE(SOFTLOCKUP),
-       CRASHTYPE(HARDLOCKUP),
-       CRASHTYPE(SPINLOCKUP),
-       CRASHTYPE(HUNG_TASK),
-       CRASHTYPE(OVERFLOW_SIGNED),
-       CRASHTYPE(OVERFLOW_UNSIGNED),
-       CRASHTYPE(ARRAY_BOUNDS),
-       CRASHTYPE(EXEC_DATA),
-       CRASHTYPE(EXEC_STACK),
-       CRASHTYPE(EXEC_KMALLOC),
-       CRASHTYPE(EXEC_VMALLOC),
-       CRASHTYPE(EXEC_RODATA),
-       CRASHTYPE(EXEC_USERSPACE),
-       CRASHTYPE(EXEC_NULL),
-       CRASHTYPE(ACCESS_USERSPACE),
-       CRASHTYPE(ACCESS_NULL),
-       CRASHTYPE(WRITE_RO),
-       CRASHTYPE(WRITE_RO_AFTER_INIT),
-       CRASHTYPE(WRITE_KERN),
-       CRASHTYPE(WRITE_OPD),
-       CRASHTYPE(REFCOUNT_INC_OVERFLOW),
-       CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
-       CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
-       CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
-       CRASHTYPE(REFCOUNT_DEC_ZERO),
-       CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
-       CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
-       CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
-       CRASHTYPE(REFCOUNT_INC_ZERO),
-       CRASHTYPE(REFCOUNT_ADD_ZERO),
-       CRASHTYPE(REFCOUNT_INC_SATURATED),
-       CRASHTYPE(REFCOUNT_DEC_SATURATED),
-       CRASHTYPE(REFCOUNT_ADD_SATURATED),
-       CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
-       CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
-       CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
-       CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
-       CRASHTYPE(REFCOUNT_TIMING),
-       CRASHTYPE(ATOMIC_TIMING),
-       CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
-       CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
-       CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
-       CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
-       CRASHTYPE(USERCOPY_STACK_FRAME_TO),
-       CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
-       CRASHTYPE(USERCOPY_STACK_BEYOND),
-       CRASHTYPE(USERCOPY_KERNEL),
-       CRASHTYPE(STACKLEAK_ERASING),
-       CRASHTYPE(CFI_FORWARD_PROTO),
-       CRASHTYPE(FORTIFIED_OBJECT),
-       CRASHTYPE(FORTIFIED_SUBOBJECT),
-       CRASHTYPE(FORTIFIED_STRSCPY),
-       CRASHTYPE(DOUBLE_FAULT),
+/* List of possible types for crashes that can be triggered. */
+static const struct crashtype_category *crashtype_categories[] = {
+       &bugs_crashtypes,
+       &heap_crashtypes,
+       &perms_crashtypes,
+       &refcount_crashtypes,
+       &usercopy_crashtypes,
+       &stackleak_crashtypes,
+       &cfi_crashtypes,
+       &fortify_crashtypes,
 #ifdef CONFIG_PPC_64S_HASH_MMU
-       CRASHTYPE(PPC_SLB_MULTIHIT),
+       &powerpc_crashtypes,
 #endif
 };
 
-
 /* Global kprobe entry and crashtype. */
 static struct kprobe *lkdtm_kprobe;
 static struct crashpoint *lkdtm_crashpoint;
@@ -223,11 +135,16 @@ char *lkdtm_kernel_info;
 /* Return the crashtype number or NULL if the name is invalid */
 static const struct crashtype *find_crashtype(const char *name)
 {
-       int i;
+       int cat, idx;
+
+       for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+               for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+                       struct crashtype *crashtype;
 
-       for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
-               if (!strcmp(name, crashtypes[i].name))
-                       return &crashtypes[i];
+                       crashtype = &crashtype_categories[cat]->crashtypes[idx];
+                       if (!strcmp(name, crashtype->name))
+                               return crashtype;
+               }
        }
 
        return NULL;
@@ -347,17 +264,24 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
 static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
                size_t count, loff_t *off)
 {
+       int n, cat, idx;
+       ssize_t out;
        char *buf;
-       int i, n, out;
 
        buf = (char *)__get_free_page(GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
 
        n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
-       for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
-               n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
-                             crashtypes[i].name);
+
+       for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+               for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+                       struct crashtype *crashtype;
+
+                       crashtype = &crashtype_categories[cat]->crashtypes[idx];
+                       n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
+                                     crashtype->name);
+               }
        }
        buf[n] = '\0';
 
index ab33bb5..080293f 100644 (file)
@@ -10,7 +10,7 @@
 
 static volatile int fortify_scratch_space;
 
-void lkdtm_FORTIFIED_OBJECT(void)
+static void lkdtm_FORTIFIED_OBJECT(void)
 {
        struct target {
                char a[10];
@@ -31,7 +31,7 @@ void lkdtm_FORTIFIED_OBJECT(void)
        pr_expected_config(CONFIG_FORTIFY_SOURCE);
 }
 
-void lkdtm_FORTIFIED_SUBOBJECT(void)
+static void lkdtm_FORTIFIED_SUBOBJECT(void)
 {
        struct target {
                char a[10];
@@ -67,7 +67,7 @@ void lkdtm_FORTIFIED_SUBOBJECT(void)
  * strscpy and generate a panic because there is a write overflow (i.e. src
  * length is greater than dst length).
  */
-void lkdtm_FORTIFIED_STRSCPY(void)
+static void lkdtm_FORTIFIED_STRSCPY(void)
 {
        char *src;
        char dst[5];
@@ -134,3 +134,14 @@ void lkdtm_FORTIFIED_STRSCPY(void)
 
        kfree(src);
 }
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(FORTIFIED_OBJECT),
+       CRASHTYPE(FORTIFIED_SUBOBJECT),
+       CRASHTYPE(FORTIFIED_STRSCPY),
+};
+
+struct crashtype_category fortify_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index 8a92f5a..6251607 100644 (file)
@@ -22,8 +22,11 @@ static volatile int __offset = 1;
 /*
  * If there aren't guard pages, it's likely that a consecutive allocation will
  * let us overflow into the second allocation without overwriting something real.
+ *
+ * This should always be caught because there is an unconditional unmapped
+ * page after vmap allocations.
  */
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
+static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
 {
        char *one, *two;
 
@@ -41,8 +44,11 @@ void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
  * This tries to stay within the next largest power-of-2 kmalloc cache
  * to avoid actually overwriting anything important if it's not detected
  * correctly.
+ *
+ * This should get caught by either memory tagging, KASan, or by using
+ * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
  */
-void lkdtm_SLAB_LINEAR_OVERFLOW(void)
+static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
 {
        size_t len = 1020;
        u32 *data = kmalloc(len, GFP_KERNEL);
@@ -50,11 +56,12 @@ void lkdtm_SLAB_LINEAR_OVERFLOW(void)
                return;
 
        pr_info("Attempting slab linear overflow ...\n");
+       OPTIMIZER_HIDE_VAR(data);
        data[1024 / sizeof(u32)] = 0x12345678;
        kfree(data);
 }
 
-void lkdtm_WRITE_AFTER_FREE(void)
+static void lkdtm_WRITE_AFTER_FREE(void)
 {
        int *base, *again;
        size_t len = 1024;
@@ -80,7 +87,7 @@ void lkdtm_WRITE_AFTER_FREE(void)
                pr_info("Hmm, didn't get the same memory range.\n");
 }
 
-void lkdtm_READ_AFTER_FREE(void)
+static void lkdtm_READ_AFTER_FREE(void)
 {
        int *base, *val, saw;
        size_t len = 1024;
@@ -124,7 +131,7 @@ void lkdtm_READ_AFTER_FREE(void)
        kfree(val);
 }
 
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
 {
        unsigned long p = __get_free_page(GFP_KERNEL);
        if (!p) {
@@ -144,7 +151,7 @@ void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
        schedule();
 }
 
-void lkdtm_READ_BUDDY_AFTER_FREE(void)
+static void lkdtm_READ_BUDDY_AFTER_FREE(void)
 {
        unsigned long p = __get_free_page(GFP_KERNEL);
        int saw, *val;
@@ -181,7 +188,7 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void)
        kfree(val);
 }
 
-void lkdtm_SLAB_INIT_ON_ALLOC(void)
+static void lkdtm_SLAB_INIT_ON_ALLOC(void)
 {
        u8 *first;
        u8 *val;
@@ -213,7 +220,7 @@ void lkdtm_SLAB_INIT_ON_ALLOC(void)
        kfree(val);
 }
 
-void lkdtm_BUDDY_INIT_ON_ALLOC(void)
+static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
 {
        u8 *first;
        u8 *val;
@@ -246,7 +253,7 @@ void lkdtm_BUDDY_INIT_ON_ALLOC(void)
        free_page((unsigned long)val);
 }
 
-void lkdtm_SLAB_FREE_DOUBLE(void)
+static void lkdtm_SLAB_FREE_DOUBLE(void)
 {
        int *val;
 
@@ -263,7 +270,7 @@ void lkdtm_SLAB_FREE_DOUBLE(void)
        kmem_cache_free(double_free_cache, val);
 }
 
-void lkdtm_SLAB_FREE_CROSS(void)
+static void lkdtm_SLAB_FREE_CROSS(void)
 {
        int *val;
 
@@ -279,7 +286,7 @@ void lkdtm_SLAB_FREE_CROSS(void)
        kmem_cache_free(b_cache, val);
 }
 
-void lkdtm_SLAB_FREE_PAGE(void)
+static void lkdtm_SLAB_FREE_PAGE(void)
 {
        unsigned long p = __get_free_page(GFP_KERNEL);
 
@@ -313,3 +320,22 @@ void __exit lkdtm_heap_exit(void)
        kmem_cache_destroy(a_cache);
        kmem_cache_destroy(b_cache);
 }
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(SLAB_LINEAR_OVERFLOW),
+       CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
+       CRASHTYPE(WRITE_AFTER_FREE),
+       CRASHTYPE(READ_AFTER_FREE),
+       CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+       CRASHTYPE(READ_BUDDY_AFTER_FREE),
+       CRASHTYPE(SLAB_INIT_ON_ALLOC),
+       CRASHTYPE(BUDDY_INIT_ON_ALLOC),
+       CRASHTYPE(SLAB_FREE_DOUBLE),
+       CRASHTYPE(SLAB_FREE_CROSS),
+       CRASHTYPE(SLAB_FREE_PAGE),
+};
+
+struct crashtype_category heap_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index 305fc2e..015e048 100644 (file)
@@ -9,19 +9,19 @@
 extern char *lkdtm_kernel_info;
 
 #define pr_expected_config(kconfig)                            \
-{                                                              \
+do {                                                           \
        if (IS_ENABLED(kconfig))                                \
                pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
                        lkdtm_kernel_info);                     \
        else                                                    \
                pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
                        lkdtm_kernel_info);                     \
-}
+} while (0)
 
 #ifndef MODULE
 int lkdtm_check_bool_cmdline(const char *param);
 #define pr_expected_config_param(kconfig, param)               \
-{                                                              \
+do {                                                           \
        if (IS_ENABLED(kconfig)) {                              \
                switch (lkdtm_check_bool_cmdline(param)) {      \
                case 0:                                         \
@@ -52,119 +52,49 @@ int lkdtm_check_bool_cmdline(const char *param);
                        break;                                  \
                }                                               \
        }                                                       \
-}
+} while (0)
 #else
 #define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
 #endif
 
-/* bugs.c */
+/* Crash types. */
+struct crashtype {
+       const char *name;
+       void (*func)(void);
+};
+
+#define CRASHTYPE(_name)                       \
+       {                                       \
+               .name = __stringify(_name),     \
+               .func = lkdtm_ ## _name,        \
+       }
+
+/* Category's collection of crashtypes. */
+struct crashtype_category {
+       struct crashtype *crashtypes;
+       size_t len;
+};
+
+/* Each category's crashtypes list. */
+extern struct crashtype_category bugs_crashtypes;
+extern struct crashtype_category heap_crashtypes;
+extern struct crashtype_category perms_crashtypes;
+extern struct crashtype_category refcount_crashtypes;
+extern struct crashtype_category usercopy_crashtypes;
+extern struct crashtype_category stackleak_crashtypes;
+extern struct crashtype_category cfi_crashtypes;
+extern struct crashtype_category fortify_crashtypes;
+extern struct crashtype_category powerpc_crashtypes;
+
+/* Each category's init/exit routines. */
 void __init lkdtm_bugs_init(int *recur_param);
-void lkdtm_PANIC(void);
-void lkdtm_BUG(void);
-void lkdtm_WARNING(void);
-void lkdtm_WARNING_MESSAGE(void);
-void lkdtm_EXCEPTION(void);
-void lkdtm_LOOP(void);
-void lkdtm_EXHAUST_STACK(void);
-void lkdtm_CORRUPT_STACK(void);
-void lkdtm_CORRUPT_STACK_STRONG(void);
-void lkdtm_REPORT_STACK(void);
-void lkdtm_REPORT_STACK_CANARY(void);
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
-void lkdtm_SOFTLOCKUP(void);
-void lkdtm_HARDLOCKUP(void);
-void lkdtm_SPINLOCKUP(void);
-void lkdtm_HUNG_TASK(void);
-void lkdtm_OVERFLOW_SIGNED(void);
-void lkdtm_OVERFLOW_UNSIGNED(void);
-void lkdtm_ARRAY_BOUNDS(void);
-void lkdtm_CORRUPT_LIST_ADD(void);
-void lkdtm_CORRUPT_LIST_DEL(void);
-void lkdtm_STACK_GUARD_PAGE_LEADING(void);
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
-void lkdtm_UNSET_SMEP(void);
-void lkdtm_DOUBLE_FAULT(void);
-void lkdtm_CORRUPT_PAC(void);
-
-/* heap.c */
 void __init lkdtm_heap_init(void);
 void __exit lkdtm_heap_exit(void);
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void);
-void lkdtm_SLAB_LINEAR_OVERFLOW(void);
-void lkdtm_WRITE_AFTER_FREE(void);
-void lkdtm_READ_AFTER_FREE(void);
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
-void lkdtm_READ_BUDDY_AFTER_FREE(void);
-void lkdtm_SLAB_INIT_ON_ALLOC(void);
-void lkdtm_BUDDY_INIT_ON_ALLOC(void);
-void lkdtm_SLAB_FREE_DOUBLE(void);
-void lkdtm_SLAB_FREE_CROSS(void);
-void lkdtm_SLAB_FREE_PAGE(void);
-
-/* perms.c */
 void __init lkdtm_perms_init(void);
-void lkdtm_WRITE_RO(void);
-void lkdtm_WRITE_RO_AFTER_INIT(void);
-void lkdtm_WRITE_KERN(void);
-void lkdtm_WRITE_OPD(void);
-void lkdtm_EXEC_DATA(void);
-void lkdtm_EXEC_STACK(void);
-void lkdtm_EXEC_KMALLOC(void);
-void lkdtm_EXEC_VMALLOC(void);
-void lkdtm_EXEC_RODATA(void);
-void lkdtm_EXEC_USERSPACE(void);
-void lkdtm_EXEC_NULL(void);
-void lkdtm_ACCESS_USERSPACE(void);
-void lkdtm_ACCESS_NULL(void);
-
-/* refcount.c */
-void lkdtm_REFCOUNT_INC_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_DEC_ZERO(void);
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_INC_ZERO(void);
-void lkdtm_REFCOUNT_ADD_ZERO(void);
-void lkdtm_REFCOUNT_INC_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_SATURATED(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_TIMING(void);
-void lkdtm_ATOMIC_TIMING(void);
-
-/* rodata.c */
-void lkdtm_rodata_do_nothing(void);
-
-/* usercopy.c */
 void __init lkdtm_usercopy_init(void);
 void __exit lkdtm_usercopy_exit(void);
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
-void lkdtm_USERCOPY_STACK_FRAME_TO(void);
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
-void lkdtm_USERCOPY_STACK_BEYOND(void);
-void lkdtm_USERCOPY_KERNEL(void);
-
-/* stackleak.c */
-void lkdtm_STACKLEAK_ERASING(void);
-
-/* cfi.c */
-void lkdtm_CFI_FORWARD_PROTO(void);
 
-/* fortify.c */
-void lkdtm_FORTIFIED_OBJECT(void);
-void lkdtm_FORTIFIED_SUBOBJECT(void);
-void lkdtm_FORTIFIED_STRSCPY(void);
-
-/* powerpc.c */
-void lkdtm_PPC_SLB_MULTIHIT(void);
+/* Special declaration for function-in-rodata. */
+void lkdtm_rodata_do_nothing(void);
 
 #endif
index 2c6aba3..b93404d 100644 (file)
@@ -103,7 +103,7 @@ static void execute_user_location(void *dst)
        pr_err("FAIL: func returned\n");
 }
 
-void lkdtm_WRITE_RO(void)
+static void lkdtm_WRITE_RO(void)
 {
        /* Explicitly cast away "const" for the test and make volatile. */
        volatile unsigned long *ptr = (unsigned long *)&rodata;
@@ -113,7 +113,7 @@ void lkdtm_WRITE_RO(void)
        pr_err("FAIL: survived bad write\n");
 }
 
-void lkdtm_WRITE_RO_AFTER_INIT(void)
+static void lkdtm_WRITE_RO_AFTER_INIT(void)
 {
        volatile unsigned long *ptr = &ro_after_init;
 
@@ -132,7 +132,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
        pr_err("FAIL: survived bad write\n");
 }
 
-void lkdtm_WRITE_KERN(void)
+static void lkdtm_WRITE_KERN(void)
 {
        size_t size;
        volatile unsigned char *ptr;
@@ -149,7 +149,7 @@ void lkdtm_WRITE_KERN(void)
        do_overwritten();
 }
 
-void lkdtm_WRITE_OPD(void)
+static void lkdtm_WRITE_OPD(void)
 {
        size_t size = sizeof(func_desc_t);
        void (*func)(void) = do_nothing;
@@ -166,38 +166,38 @@ void lkdtm_WRITE_OPD(void)
        func();
 }
 
-void lkdtm_EXEC_DATA(void)
+static void lkdtm_EXEC_DATA(void)
 {
        execute_location(data_area, CODE_WRITE);
 }
 
-void lkdtm_EXEC_STACK(void)
+static void lkdtm_EXEC_STACK(void)
 {
        u8 stack_area[EXEC_SIZE];
        execute_location(stack_area, CODE_WRITE);
 }
 
-void lkdtm_EXEC_KMALLOC(void)
+static void lkdtm_EXEC_KMALLOC(void)
 {
        u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
        execute_location(kmalloc_area, CODE_WRITE);
        kfree(kmalloc_area);
 }
 
-void lkdtm_EXEC_VMALLOC(void)
+static void lkdtm_EXEC_VMALLOC(void)
 {
        u32 *vmalloc_area = vmalloc(EXEC_SIZE);
        execute_location(vmalloc_area, CODE_WRITE);
        vfree(vmalloc_area);
 }
 
-void lkdtm_EXEC_RODATA(void)
+static void lkdtm_EXEC_RODATA(void)
 {
        execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing),
                         CODE_AS_IS);
 }
 
-void lkdtm_EXEC_USERSPACE(void)
+static void lkdtm_EXEC_USERSPACE(void)
 {
        unsigned long user_addr;
 
@@ -212,12 +212,12 @@ void lkdtm_EXEC_USERSPACE(void)
        vm_munmap(user_addr, PAGE_SIZE);
 }
 
-void lkdtm_EXEC_NULL(void)
+static void lkdtm_EXEC_NULL(void)
 {
        execute_location(NULL, CODE_AS_IS);
 }
 
-void lkdtm_ACCESS_USERSPACE(void)
+static void lkdtm_ACCESS_USERSPACE(void)
 {
        unsigned long user_addr, tmp = 0;
        unsigned long *ptr;
@@ -250,7 +250,7 @@ void lkdtm_ACCESS_USERSPACE(void)
        vm_munmap(user_addr, PAGE_SIZE);
 }
 
-void lkdtm_ACCESS_NULL(void)
+static void lkdtm_ACCESS_NULL(void)
 {
        unsigned long tmp;
        volatile unsigned long *ptr = (unsigned long *)NULL;
@@ -270,3 +270,24 @@ void __init lkdtm_perms_init(void)
        /* Make sure we can write to __ro_after_init values during __init */
        ro_after_init |= 0xAA;
 }
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(WRITE_RO),
+       CRASHTYPE(WRITE_RO_AFTER_INIT),
+       CRASHTYPE(WRITE_KERN),
+       CRASHTYPE(WRITE_OPD),
+       CRASHTYPE(EXEC_DATA),
+       CRASHTYPE(EXEC_STACK),
+       CRASHTYPE(EXEC_KMALLOC),
+       CRASHTYPE(EXEC_VMALLOC),
+       CRASHTYPE(EXEC_RODATA),
+       CRASHTYPE(EXEC_USERSPACE),
+       CRASHTYPE(EXEC_NULL),
+       CRASHTYPE(ACCESS_USERSPACE),
+       CRASHTYPE(ACCESS_NULL),
+};
+
+struct crashtype_category perms_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index 077c9f9..be38544 100644 (file)
@@ -100,7 +100,7 @@ static void insert_dup_slb_entry_0(void)
        preempt_enable();
 }
 
-void lkdtm_PPC_SLB_MULTIHIT(void)
+static void lkdtm_PPC_SLB_MULTIHIT(void)
 {
        if (!radix_enabled()) {
                pr_info("Injecting SLB multihit errors\n");
@@ -118,3 +118,12 @@ void lkdtm_PPC_SLB_MULTIHIT(void)
                pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
        }
 }
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(PPC_SLB_MULTIHIT),
+};
+
+struct crashtype_category powerpc_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index de7c5ab..5cd488f 100644 (file)
@@ -24,7 +24,7 @@ static void overflow_check(refcount_t *ref)
  * A refcount_inc() above the maximum value of the refcount implementation,
  * should at least saturate, and at most also WARN.
  */
-void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_OVERFLOW(void)
 {
        refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
 
@@ -40,7 +40,7 @@ void lkdtm_REFCOUNT_INC_OVERFLOW(void)
 }
 
 /* refcount_add() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
 {
        refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
 
@@ -58,7 +58,7 @@ void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
 }
 
 /* refcount_inc_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
 {
        refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
 
@@ -70,7 +70,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
 }
 
 /* refcount_add_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
 {
        refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
 
@@ -103,7 +103,7 @@ static void check_zero(refcount_t *ref)
  * zero it should either saturate (when inc-from-zero isn't protected)
  * or stay at zero (when inc-from-zero is protected) and should WARN for both.
  */
-void lkdtm_REFCOUNT_DEC_ZERO(void)
+static void lkdtm_REFCOUNT_DEC_ZERO(void)
 {
        refcount_t zero = REFCOUNT_INIT(2);
 
@@ -142,7 +142,7 @@ static void check_negative(refcount_t *ref, int start)
 }
 
 /* A refcount_dec() going negative should saturate and may WARN. */
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
 {
        refcount_t neg = REFCOUNT_INIT(0);
 
@@ -156,7 +156,7 @@ void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
  * A refcount_dec_and_test() should act like refcount_dec() above when
  * going negative.
  */
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
 {
        refcount_t neg = REFCOUNT_INIT(0);
 
@@ -171,7 +171,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
  * A refcount_sub_and_test() should act like refcount_dec_and_test()
  * above when going negative.
  */
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
 {
        refcount_t neg = REFCOUNT_INIT(3);
 
@@ -203,7 +203,7 @@ static void check_from_zero(refcount_t *ref)
 /*
  * A refcount_inc() from zero should pin to zero or saturate and may WARN.
  */
-void lkdtm_REFCOUNT_INC_ZERO(void)
+static void lkdtm_REFCOUNT_INC_ZERO(void)
 {
        refcount_t zero = REFCOUNT_INIT(0);
 
@@ -228,7 +228,7 @@ void lkdtm_REFCOUNT_INC_ZERO(void)
  * A refcount_add() should act like refcount_inc() above when starting
  * at zero.
  */
-void lkdtm_REFCOUNT_ADD_ZERO(void)
+static void lkdtm_REFCOUNT_ADD_ZERO(void)
 {
        refcount_t zero = REFCOUNT_INIT(0);
 
@@ -267,7 +267,7 @@ static void check_saturated(refcount_t *ref)
  * A refcount_inc() from a saturated value should at most warn about
  * being saturated already.
  */
-void lkdtm_REFCOUNT_INC_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_SATURATED(void)
 {
        refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
 
@@ -278,7 +278,7 @@ void lkdtm_REFCOUNT_INC_SATURATED(void)
 }
 
 /* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_SATURATED(void)
 {
        refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
 
@@ -289,7 +289,7 @@ void lkdtm_REFCOUNT_DEC_SATURATED(void)
 }
 
 /* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_SATURATED(void)
 {
        refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
 
@@ -300,7 +300,7 @@ void lkdtm_REFCOUNT_ADD_SATURATED(void)
 }
 
 /* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
 {
        refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
 
@@ -312,7 +312,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
 }
 
 /* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
 {
        refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
 
@@ -324,7 +324,7 @@ void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
 }
 
 /* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
 {
        refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
 
@@ -336,7 +336,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
 }
 
 /* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
 {
        refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
 
@@ -348,7 +348,7 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
 }
 
 /* Used to time the existing atomic_t when used for reference counting */
-void lkdtm_ATOMIC_TIMING(void)
+static void lkdtm_ATOMIC_TIMING(void)
 {
        unsigned int i;
        atomic_t count = ATOMIC_INIT(1);
@@ -373,7 +373,7 @@ void lkdtm_ATOMIC_TIMING(void)
  *    cd /sys/kernel/debug/provoke-crash
  *    perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
  */
-void lkdtm_REFCOUNT_TIMING(void)
+static void lkdtm_REFCOUNT_TIMING(void)
 {
        unsigned int i;
        refcount_t count = REFCOUNT_INIT(1);
@@ -390,3 +390,30 @@ void lkdtm_REFCOUNT_TIMING(void)
        else
                pr_info("refcount timing: done\n");
 }
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+       CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+       CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+       CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+       CRASHTYPE(REFCOUNT_DEC_ZERO),
+       CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+       CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+       CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+       CRASHTYPE(REFCOUNT_INC_ZERO),
+       CRASHTYPE(REFCOUNT_ADD_ZERO),
+       CRASHTYPE(REFCOUNT_INC_SATURATED),
+       CRASHTYPE(REFCOUNT_DEC_SATURATED),
+       CRASHTYPE(REFCOUNT_ADD_SATURATED),
+       CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+       CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+       CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+       CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+       CRASHTYPE(ATOMIC_TIMING),
+       CRASHTYPE(REFCOUNT_TIMING),
+};
+
+struct crashtype_category refcount_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index 82369c6..025b133 100644 (file)
@@ -115,7 +115,7 @@ out:
        }
 }
 
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
 {
        unsigned long flags;
 
@@ -124,7 +124,7 @@ void lkdtm_STACKLEAK_ERASING(void)
        local_irq_restore(flags);
 }
 #else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
 {
        if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
                pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
@@ -133,3 +133,12 @@ void lkdtm_STACKLEAK_ERASING(void)
        }
 }
 #endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(STACKLEAK_ERASING),
+};
+
+struct crashtype_category stackleak_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index 9161ce7..6215ec9 100644 (file)
@@ -5,6 +5,7 @@
  */
 #include "lkdtm.h"
 #include <linux/slab.h>
+#include <linux/highmem.h>
 #include <linux/vmalloc.h>
 #include <linux/sched/task_stack.h>
 #include <linux/mman.h>
@@ -30,12 +31,12 @@ static const unsigned char test_text[] = "This is a test.\n";
  */
 static noinline unsigned char *trick_compiler(unsigned char *stack)
 {
-       return stack + 0;
+       return stack + unconst;
 }
 
 static noinline unsigned char *do_usercopy_stack_callee(int value)
 {
-       unsigned char buf[32];
+       unsigned char buf[128];
        int i;
 
        /* Exercise stack to avoid everything living in registers. */
@@ -43,7 +44,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
                buf[i] = value & 0xff;
        }
 
-       return trick_compiler(buf);
+       /*
+        * Put the target buffer in the middle of stack allocation
+        * so that we don't step on future stack users regardless
+        * of stack growth direction.
+        */
+       return trick_compiler(&buf[(128/2)-32]);
 }
 
 static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
@@ -66,6 +72,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
                bad_stack -= sizeof(unsigned long);
        }
 
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+       pr_info("stack     : %px\n", (void *)current_stack_pointer);
+#endif
+       pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+       pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
        user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
                            PROT_READ | PROT_WRITE | PROT_EXEC,
                            MAP_ANONYMOUS | MAP_PRIVATE, 0);
@@ -119,7 +131,7 @@ free_user:
  * This checks for whole-object size validation with hardened usercopy,
  * with or without usercopy whitelisting.
  */
-static void do_usercopy_heap_size(bool to_user)
+static void do_usercopy_slab_size(bool to_user)
 {
        unsigned long user_addr;
        unsigned char *one, *two;
@@ -185,9 +197,9 @@ free_kernel:
 
 /*
  * This checks for the specific whitelist window within an object. If this
- * test passes, then do_usercopy_heap_size() tests will pass too.
+ * test passes, then do_usercopy_slab_size() tests will pass too.
  */
-static void do_usercopy_heap_whitelist(bool to_user)
+static void do_usercopy_slab_whitelist(bool to_user)
 {
        unsigned long user_alloc;
        unsigned char *buf = NULL;
@@ -261,42 +273,42 @@ free_alloc:
 }
 
 /* Callable tests. */
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
 {
-       do_usercopy_heap_size(true);
+       do_usercopy_slab_size(true);
 }
 
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
 {
-       do_usercopy_heap_size(false);
+       do_usercopy_slab_size(false);
 }
 
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
 {
-       do_usercopy_heap_whitelist(true);
+       do_usercopy_slab_whitelist(true);
 }
 
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
 {
-       do_usercopy_heap_whitelist(false);
+       do_usercopy_slab_whitelist(false);
 }
 
-void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
 {
        do_usercopy_stack(true, true);
 }
 
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
 {
        do_usercopy_stack(false, true);
 }
 
-void lkdtm_USERCOPY_STACK_BEYOND(void)
+static void lkdtm_USERCOPY_STACK_BEYOND(void)
 {
        do_usercopy_stack(true, false);
 }
 
-void lkdtm_USERCOPY_KERNEL(void)
+static void lkdtm_USERCOPY_KERNEL(void)
 {
        unsigned long user_addr;
 
@@ -330,6 +342,86 @@ free_user:
        vm_munmap(user_addr, PAGE_SIZE);
 }
 
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+       unsigned long uaddr;
+
+       uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+                       MAP_ANONYMOUS | MAP_PRIVATE, 0);
+       if (uaddr >= TASK_SIZE) {
+               pr_warn("Failed to allocate user memory\n");
+               return;
+       }
+
+       /* Initialize contents. */
+       memset(kaddr, 0xAA, PAGE_SIZE);
+
+       /* Bump the kaddr forward to detect a page-spanning overflow. */
+       kaddr += PAGE_SIZE / 2;
+
+       pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+               name, kaddr);
+       if (copy_to_user((void __user *)uaddr, kaddr,
+                        unconst + (PAGE_SIZE / 2))) {
+               pr_err("copy_to_user() failed unexpectedly?!\n");
+               goto free_user;
+       }
+
+       pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+               name, kaddr);
+       if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
+               pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+               goto free_user;
+       }
+
+       pr_err("FAIL: bad copy_to_user() not detected!\n");
+       pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+       vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+       void *addr;
+
+       addr = vmalloc(PAGE_SIZE);
+       if (!addr) {
+               pr_err("vmalloc() failed!?\n");
+               return;
+       }
+       do_usercopy_page_span("vmalloc", addr);
+       vfree(addr);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+       struct folio *folio;
+       void *addr;
+
+       /*
+        * FIXME: Folio checking currently misses 0-order allocations, so
+        * allocate and bump forward to the last page.
+        */
+       folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+       if (!folio) {
+               pr_err("folio_alloc() failed!?\n");
+               return;
+       }
+       addr = folio_address(folio);
+       if (addr)
+               do_usercopy_page_span("folio", addr + PAGE_SIZE);
+       else
+               pr_err("folio_address() failed?!\n");
+       folio_put(folio);
+}
+
 void __init lkdtm_usercopy_init(void)
 {
        /* Prepare cache that lacks SLAB_USERCOPY flag. */
@@ -345,3 +437,21 @@ void __exit lkdtm_usercopy_exit(void)
 {
        kmem_cache_destroy(whitelist_cache);
 }
+
+static struct crashtype crashtypes[] = {
+       CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
+       CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
+       CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
+       CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
+       CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+       CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+       CRASHTYPE(USERCOPY_STACK_BEYOND),
+       CRASHTYPE(USERCOPY_VMALLOC),
+       CRASHTYPE(USERCOPY_FOLIO),
+       CRASHTYPE(USERCOPY_KERNEL),
+};
+
+struct crashtype_category usercopy_crashtypes = {
+       .crashtypes = crashtypes,
+       .len        = ARRAY_SIZE(crashtypes),
+};
index ec2a4fc..e889a8b 100644 (file)
@@ -784,7 +784,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
 {
        struct device *base = data;
 
-       if (strcmp(dev->driver->name, "i915") ||
+       if (!dev->driver || strcmp(dev->driver->name, "i915") ||
            subcomponent != I915_COMPONENT_HDCP)
                return 0;
 
index f7380d3..5c39457 100644 (file)
@@ -131,7 +131,7 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
 {
        struct device *base = data;
 
-       if (strcmp(dev->driver->name, "i915") ||
+       if (!dev->driver || strcmp(dev->driver->name, "i915") ||
            subcomponent != I915_COMPONENT_PXP)
                return 0;
 
index 4b8f1c7..049a120 100644 (file)
@@ -34,7 +34,9 @@ pvpanic_send_event(unsigned int event)
 {
        struct pvpanic_instance *pi_cur;
 
-       spin_lock(&pvpanic_lock);
+       if (!spin_trylock(&pvpanic_lock))
+               return;
+
        list_for_each_entry(pi_cur, &pvpanic_list, list) {
                if (event & pi_cur->capability & pi_cur->events)
                        iowrite8(event, pi_cur->base);
@@ -55,9 +57,13 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused
        return NOTIFY_DONE;
 }
 
+/*
+ * Call our notifier very early on panic, deferring the
+ * action taken to the hypervisor.
+ */
 static struct notifier_block pvpanic_panic_nb = {
        .notifier_call = pvpanic_panic_notify,
-       .priority = 1, /* let this called before broken drm_fb_helper() */
+       .priority = INT_MAX,
 };
 
 static void pvpanic_remove(void *param)
index f1d8ba6..086ce77 100644 (file)
@@ -1452,10 +1452,10 @@ static void vmballoon_reset(struct vmballoon *b)
 
        error = vmballoon_vmci_init(b);
        if (error)
-               pr_err("failed to initialize vmci doorbell\n");
+               pr_err_once("failed to initialize vmci doorbell\n");
 
        if (vmballoon_send_guest_id(b))
-               pr_err("failed to send guest ID to the host\n");
+               pr_err_once("failed to send guest ID to the host\n");
 
 unlock:
        up_write(&b->conf_sem);
index 605794a..b6d4d7f 100644 (file)
@@ -5,7 +5,7 @@
 
 config VMWARE_VMCI
        tristate "VMware VMCI Driver"
-       depends on X86 && PCI
+       depends on (X86 || ARM64) && !CPU_BIG_ENDIAN && PCI
        help
          This is VMware's Virtual Machine Communication Interface.  It enables
          high-speed communication between host and guest in a virtual
index 6cf3e21..172696a 100644 (file)
@@ -665,9 +665,8 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
 int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
 {
        struct vmci_ctx *context;
-       struct vmci_handle_list *notifier, *tmp;
+       struct vmci_handle_list *notifier = NULL, *iter, *tmp;
        struct vmci_handle handle;
-       bool found = false;
 
        context = vmci_ctx_get(context_id);
        if (!context)
@@ -676,23 +675,23 @@ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
        handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
 
        spin_lock(&context->lock);
-       list_for_each_entry_safe(notifier, tmp,
+       list_for_each_entry_safe(iter, tmp,
                                 &context->notifier_list, node) {
-               if (vmci_handle_is_equal(notifier->handle, handle)) {
-                       list_del_rcu(&notifier->node);
+               if (vmci_handle_is_equal(iter->handle, handle)) {
+                       list_del_rcu(&iter->node);
                        context->n_notifiers--;
-                       found = true;
+                       notifier = iter;
                        break;
                }
        }
        spin_unlock(&context->lock);
 
-       if (found)
+       if (notifier)
                kvfree_rcu(notifier);
 
        vmci_ctx_put(context);
 
-       return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+       return notifier ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
 }
 
 static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
index 57a6157..aa7b05d 100644 (file)
@@ -614,6 +614,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
        }
 
        if (!mmio_base) {
+               if (IS_ENABLED(CONFIG_ARM64)) {
+                       dev_err(&pdev->dev, "MMIO base is invalid\n");
+                       return -ENXIO;
+               }
                error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
                if (error) {
                        dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
index 94ebf7f..8f2de18 100644 (file)
@@ -2577,6 +2577,12 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
        if (result < VMCI_SUCCESS)
                return result;
 
+       /*
+        * This virt_wmb() ensures that data written to the queue
+        * is observable before the new producer_tail is.
+        */
+       virt_wmb();
+
        vmci_q_header_add_producer_tail(produce_q->q_header, written,
                                        produce_q_size);
        return written;
@@ -2620,6 +2626,12 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
        if (buf_ready < VMCI_SUCCESS)
                return (ssize_t) buf_ready;
 
+       /*
+        * This virt_rmb() ensures that data from the queue will be read
+        * after we have determined how much is ready to be consumed.
+        */
+       virt_rmb();
+
        read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
        head = vmci_q_header_consumer_head(produce_q->q_header);
        if (likely(head + read < consume_q_size)) {
index af6c3c3..d614497 100644 (file)
@@ -508,7 +508,7 @@ config MMC_OMAP_HS
 
 config MMC_WBSD
        tristate "Winbond W83L51xD SD/MMC Card Interface support"
-       depends on ISA_DMA_API && !M68K
+       depends on ISA_DMA_API
        help
          This selects the Winbond(R) W83L51xD Secure digital and
          Multimedia card Interface.
index 316393c..0db9490 100644 (file)
 #include <linux/gfp.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include <linux/sizes.h>
 
-#include <mach/hardware.h>
 #include <linux/platform_data/mmc-pxamci.h>
 
 #include "pxamci.h"
index 7d96758..1749dbb 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/mtd/partitions.h>
 
 #include <asm/io.h>
-#include <mach/hardware.h>
-
 #include <asm/mach/flash.h>
 
 #define CACHELINESIZE  32
index 28f55f9..0ee4522 100644 (file)
@@ -97,6 +97,33 @@ out:
        return e;
 }
 
+/*
+ * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * @ubi: UBI device description object
+ * @is_wl_pool: whether UBI is filling wear leveling pool
+ *
+ * This helper function checks whether there are enough free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
+ * there is at least one of free pebs is filled into fm_wl_pool.
+ * For wear leveling pool, UBI should also reserve free pebs for bad pebs
+ * handling, because there maybe no enough free pebs for user volumes after
+ * producing new bad pebs.
+ */
+static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+{
+       int fm_used = 0;        // fastmap non anchor pebs.
+       int beb_rsvd_pebs;
+
+       if (!ubi->free.rb_node)
+               return false;
+
+       beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
+       if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+               fm_used = ubi->fm_size / ubi->leb_size - 1;
+
+       return ubi->free_count - beb_rsvd_pebs > fm_used;
+}
+
 /**
  * ubi_refill_pools - refills all fastmap PEB pools.
  * @ubi: UBI device description object
@@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi)
                wl_tree_add(ubi->fm_anchor, &ubi->free);
                ubi->free_count++;
        }
-       if (ubi->fm_next_anchor) {
-               wl_tree_add(ubi->fm_next_anchor, &ubi->free);
-               ubi->free_count++;
-       }
 
-       /* All available PEBs are in ubi->free, now is the time to get
+       /*
+        * All available PEBs are in ubi->free, now is the time to get
         * the best anchor PEBs.
         */
        ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
-       ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
 
        for (;;) {
                enough = 0;
                if (pool->size < pool->max_size) {
-                       if (!ubi->free.rb_node)
+                       if (!has_enough_free_count(ubi, false))
                                break;
 
                        e = wl_get_wle(ubi);
@@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
                        enough++;
 
                if (wl_pool->size < wl_pool->max_size) {
-                       if (!ubi->free.rb_node ||
-                          (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+                       if (!has_enough_free_count(ubi, true))
                                break;
 
                        e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
@@ -253,6 +275,58 @@ out:
        return ret;
 }
 
+/**
+ * next_peb_for_wl - returns next PEB to be used internally by the
+ * WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
+{
+       struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+       int pnum;
+
+       if (pool->used == pool->size)
+               return NULL;
+
+       pnum = pool->pebs[pool->used];
+       return ubi->lookuptbl[pnum];
+}
+
+/**
+ * need_wear_leveling - checks whether to trigger a wear leveling work.
+ * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
+ * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
+ * 'wl_pool' by ubi_refill_pools().
+ *
+ * @ubi: UBI device description object
+ */
+static bool need_wear_leveling(struct ubi_device *ubi)
+{
+       int ec;
+       struct ubi_wl_entry *e;
+
+       if (!ubi->used.rb_node)
+               return false;
+
+       e = next_peb_for_wl(ubi);
+       if (!e) {
+               if (!ubi->free.rb_node)
+                       return false;
+               e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+               ec = e->ec;
+       } else {
+               ec = e->ec;
+               if (ubi->free.rb_node) {
+                       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+                       ec = max(ec, e->ec);
+               }
+       }
+       e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+
+       return ec - e->ec >= UBI_WL_THRESHOLD;
+}
+
 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
  *
  * @ubi: UBI device description object
@@ -286,20 +360,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
 {
        struct ubi_work *wrk;
+       struct ubi_wl_entry *anchor;
 
        spin_lock(&ubi->wl_lock);
 
-       /* Do we have a next anchor? */
-       if (!ubi->fm_next_anchor) {
-               ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
-               if (!ubi->fm_next_anchor)
-                       /* Tell wear leveling to produce a new anchor PEB */
-                       ubi->fm_do_produce_anchor = 1;
+       /* Do we already have an anchor? */
+       if (ubi->fm_anchor) {
+               spin_unlock(&ubi->wl_lock);
+               return 0;
        }
 
-       /* Do wear leveling to get a new anchor PEB or check the
-        * existing next anchor candidate.
-        */
+       /* See if we can find an anchor PEB on the list of free PEBs */
+       anchor = ubi_wl_get_fm_peb(ubi, 1);
+       if (anchor) {
+               ubi->fm_anchor = anchor;
+               spin_unlock(&ubi->wl_lock);
+               return 0;
+       }
+
+       ubi->fm_do_produce_anchor = 1;
+       /* No luck, trigger wear leveling to produce a new anchor PEB. */
        if (ubi->wl_scheduled) {
                spin_unlock(&ubi->wl_lock);
                return 0;
@@ -381,11 +461,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
                ubi->fm_anchor = NULL;
        }
 
-       if (ubi->fm_next_anchor) {
-               return_unused_peb(ubi, ubi->fm_next_anchor);
-               ubi->fm_next_anchor = NULL;
-       }
-
        if (ubi->fm) {
                for (i = 0; i < ubi->fm->used_blocks; i++)
                        kfree(ubi->fm->e[i]);
index 6b5f1ff..6e95c4b 100644 (file)
@@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
                fm_pos += sizeof(*fec);
                ubi_assert(fm_pos <= ubi->fm_size);
        }
-       if (ubi->fm_next_anchor) {
-               fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
-
-               fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
-               set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
-               fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
-
-               free_peb_count++;
-               fm_pos += sizeof(*fec);
-               ubi_assert(fm_pos <= ubi->fm_size);
-       }
        fmh->free_peb_count = cpu_to_be32(free_peb_count);
 
        ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
index 7c083ad..078112e 100644 (file)
@@ -489,8 +489,7 @@ struct ubi_debug_info {
  * @fm_work: fastmap work queue
  * @fm_work_scheduled: non-zero if fastmap work was scheduled
  * @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The new anchor PEB used during fastmap update
- * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
+ * @fm_anchor: The next anchor PEB to use for fastmap
  * @fm_do_produce_anchor: If true produce an anchor PEB in wl
  *
  * @used: RB-tree of used physical eraseblocks
@@ -601,7 +600,6 @@ struct ubi_device {
        int fm_work_scheduled;
        int fast_attach;
        struct ubi_wl_entry *fm_anchor;
-       struct ubi_wl_entry *fm_next_anchor;
        int fm_do_produce_anchor;
 
        /* Wear-leveling sub-system's stuff */
index 1bc7b3a..6ea95ad 100644 (file)
@@ -309,7 +309,6 @@ out_mapping:
        ubi->volumes[vol_id] = NULL;
        ubi->vol_count -= 1;
        spin_unlock(&ubi->volumes_lock);
-       ubi_eba_destroy_table(eba_tbl);
 out_acc:
        spin_lock(&ubi->volumes_lock);
        ubi->rsvd_pebs -= vol->reserved_pebs;
index 8455f1d..55bae06 100644 (file)
@@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
        ubi_assert(!ubi->move_from && !ubi->move_to);
        ubi_assert(!ubi->move_to_put);
 
+#ifdef CONFIG_MTD_UBI_FASTMAP
+       if (!next_peb_for_wl(ubi) ||
+#else
        if (!ubi->free.rb_node ||
+#endif
            (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
                /*
                 * No free physical eraseblocks? Well, they must be waiting in
@@ -689,16 +693,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 
 #ifdef CONFIG_MTD_UBI_FASTMAP
        e1 = find_anchor_wl_entry(&ubi->used);
-       if (e1 && ubi->fm_next_anchor &&
-           (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+       if (e1 && ubi->fm_anchor &&
+           (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
                ubi->fm_do_produce_anchor = 1;
-               /* fm_next_anchor is no longer considered a good anchor
-                * candidate.
+               /*
+                * fm_anchor is no longer considered a good anchor.
                 * NULL assignment also prevents multiple wear level checks
                 * of this PEB.
                 */
-               wl_tree_add(ubi->fm_next_anchor, &ubi->free);
-               ubi->fm_next_anchor = NULL;
+               wl_tree_add(ubi->fm_anchor, &ubi->free);
+               ubi->fm_anchor = NULL;
                ubi->free_count++;
        }
 
@@ -1003,8 +1007,6 @@ out_cancel:
 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
 {
        int err = 0;
-       struct ubi_wl_entry *e1;
-       struct ubi_wl_entry *e2;
        struct ubi_work *wrk;
 
        spin_lock(&ubi->wl_lock);
@@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
         * the WL worker has to be scheduled anyway.
         */
        if (!ubi->scrub.rb_node) {
+#ifdef CONFIG_MTD_UBI_FASTMAP
+               if (!need_wear_leveling(ubi))
+                       goto out_unlock;
+#else
+               struct ubi_wl_entry *e1;
+               struct ubi_wl_entry *e2;
+
                if (!ubi->used.rb_node || !ubi->free.rb_node)
                        /* No physical eraseblocks - no deal */
                        goto out_unlock;
@@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
 
                if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
                        goto out_unlock;
+#endif
                dbg_wl("schedule wear-leveling");
        } else
                dbg_wl("schedule scrubbing");
@@ -1085,12 +1095,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
        if (!err) {
                spin_lock(&ubi->wl_lock);
 
-               if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+               if (!ubi->fm_disabled && !ubi->fm_anchor &&
                    e->pnum < UBI_FM_MAX_START) {
-                       /* Abort anchor production, if needed it will be
+                       /*
+                        * Abort anchor production, if needed it will be
                         * enabled again in the wear leveling started below.
                         */
-                       ubi->fm_next_anchor = e;
+                       ubi->fm_anchor = e;
                        ubi->fm_do_produce_anchor = 0;
                } else {
                        wl_tree_add(e, &ubi->free);
index c93a532..5ebe374 100644 (file)
@@ -5,6 +5,8 @@
 static void update_fastmap_work_fn(struct work_struct *wrk);
 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
+static bool need_wear_leveling(struct ubi_device *ubi);
 static void ubi_fastmap_close(struct ubi_device *ubi);
 static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
 {
index de4ea51..ebee5f0 100644 (file)
@@ -57,7 +57,7 @@ static char *type_str[] = {
        "AMT_MSG_MEMBERSHIP_QUERY",
        "AMT_MSG_MEMBERSHIP_UPDATE",
        "AMT_MSG_MULTICAST_DATA",
-       "AMT_MSG_TEARDOWM",
+       "AMT_MSG_TEARDOWN",
 };
 
 static char *action_str[] = {
@@ -2423,7 +2423,7 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
                }
        }
 
-       return false;
+       return true;
 
 report:
        iph = ip_hdr(skb);
@@ -2679,7 +2679,7 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
        amt = rcu_dereference_sk_user_data(sk);
        if (!amt) {
                err = true;
-               goto out;
+               goto drop;
        }
 
        skb->dev = amt->dev;
index 3b7baae..f85372a 100644 (file)
@@ -6159,7 +6159,9 @@ static int bond_check_params(struct bond_params *params)
                strscpy_pad(params->primary, primary, sizeof(params->primary));
 
        memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+#if IS_ENABLED(CONFIG_IPV6)
        memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
+#endif
 
        return 0;
 }
index f427fa1..6f404f9 100644 (file)
@@ -290,11 +290,6 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
 
                        addr6 = nla_get_in6_addr(attr);
 
-                       if (ipv6_addr_type(&addr6) & IPV6_ADDR_LINKLOCAL) {
-                               NL_SET_ERR_MSG(extack, "Invalid IPv6 addr6");
-                               return -EINVAL;
-                       }
-
                        bond_opt_initextra(&newval, &addr6, sizeof(addr6));
                        err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
                                             &newval);
index 64f7db2..1f8323a 100644 (file)
@@ -34,10 +34,8 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
 static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
 static int bond_option_arp_ip_targets_set(struct bonding *bond,
                                          const struct bond_opt_value *newval);
-#if IS_ENABLED(CONFIG_IPV6)
 static int bond_option_ns_ip6_targets_set(struct bonding *bond,
                                          const struct bond_opt_value *newval);
-#endif
 static int bond_option_arp_validate_set(struct bonding *bond,
                                        const struct bond_opt_value *newval);
 static int bond_option_arp_all_targets_set(struct bonding *bond,
@@ -299,7 +297,6 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
                .flags = BOND_OPTFLAG_RAWVAL,
                .set = bond_option_arp_ip_targets_set
        },
-#if IS_ENABLED(CONFIG_IPV6)
        [BOND_OPT_NS_TARGETS] = {
                .id = BOND_OPT_NS_TARGETS,
                .name = "ns_ip6_target",
@@ -307,7 +304,6 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
                .flags = BOND_OPTFLAG_RAWVAL,
                .set = bond_option_ns_ip6_targets_set
        },
-#endif
        [BOND_OPT_DOWNDELAY] = {
                .id = BOND_OPT_DOWNDELAY,
                .name = "downdelay",
@@ -1254,6 +1250,12 @@ static int bond_option_ns_ip6_targets_set(struct bonding *bond,
 
        return 0;
 }
+#else
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+                                         const struct bond_opt_value *newval)
+{
+       return -EPERM;
+}
 #endif
 
 static int bond_option_arp_validate_set(struct bonding *bond,
index cfe37be..43be458 100644 (file)
@@ -129,6 +129,21 @@ static void bond_info_show_master(struct seq_file *seq)
                        printed = 1;
                }
                seq_printf(seq, "\n");
+
+#if IS_ENABLED(CONFIG_IPV6)
+               printed = 0;
+               seq_printf(seq, "NS IPv6 target/s (xx::xx form):");
+
+               for (i = 0; (i < BOND_MAX_NS_TARGETS); i++) {
+                       if (ipv6_addr_any(&bond->params.ns_targets[i]))
+                               break;
+                       if (printed)
+                               seq_printf(seq, ",");
+                       seq_printf(seq, " %pI6c", &bond->params.ns_targets[i]);
+                       printed = 1;
+               }
+               seq_printf(seq, "\n");
+#endif
        }
 
        if (BOND_MODE(bond) == BOND_MODE_8023AD) {
index fbb32aa..48cf344 100644 (file)
@@ -1603,12 +1603,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
                return 0;
        }
 
-       if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
-               return -ENOSPC;
-
        *idx = find_first_bit(free_bins, dev->num_arl_bins);
-
-       return -ENOENT;
+       return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
 }
 
 static int b53_arl_op(struct b53_device *dev, int op, int port,
index 5d2c57a..0b49d24 100644 (file)
@@ -3960,6 +3960,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
         */
        child = of_get_child_by_name(np, "mdio");
        err = mv88e6xxx_mdio_register(chip, child, false);
+       of_node_put(child);
        if (err)
                return err;
 
index 3272aca..47fc8e6 100644 (file)
@@ -2180,13 +2180,9 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
        if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
                return -EOPNOTSUPP;
 
-       /* All filters are already in use, we cannot match more rules */
-       if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
-           RXCHK_BRCM_TAG_MAX)
-               return -ENOSPC;
-
        index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
        if (index >= RXCHK_BRCM_TAG_MAX)
+               /* All filters are already in use, we cannot match more rules */
                return -ENOSPC;
 
        /* Location is the classification ID, and index is the position
index c78883c..4563457 100644 (file)
@@ -1,32 +1,7 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
  * Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *      names of its contributors may be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index daf894a..35b8cea 100644 (file)
@@ -1,31 +1,6 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *      names of its contributors may be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
  */
 
 #ifndef __DPAA_H
index ee62d25..4fee74c 100644 (file)
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *      names of its contributors may be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
  */
 
 #include <linux/init.h>
index 409c1dc..889f89d 100644 (file)
@@ -1,32 +1,6 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *      names of its contributors may be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
  */
 
 #undef TRACE_SYSTEM
index 5750f9a..73f0788 100644 (file)
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *      notice, this list of conditions and the following disclaimer in the
- *      documentation and/or other materials provided with the distribution.
- *     * Neither the name of Freescale Semiconductor nor the
- *      names of its contributors may be used to endorse or promote products
- *      derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index 15f37c5..dafb26f 100644 (file)
@@ -69,7 +69,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
        return 0;
 
 err_mdiobus_reg:
-       pci_release_mem_regions(pdev);
+       pci_release_region(pdev, 0);
 err_pci_mem_reg:
        pci_disable_device(pdev);
 err_pci_enable:
@@ -88,7 +88,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
        mdiobus_unregister(bus);
        mdio_priv = bus->priv;
        iounmap(mdio_priv->hw->port);
-       pci_release_mem_regions(pdev);
+       pci_release_region(pdev, 0);
        pci_disable_device(pdev);
 }
 
index 46f4396..9183d48 100644 (file)
@@ -47,8 +47,3 @@ ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
 ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
 ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
 ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
-
-# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_ice_switch.o += -Wno-array-bounds
-endif
index b25e27c..05cb9dd 100644 (file)
@@ -601,12 +601,30 @@ struct ice_aqc_sw_rules {
        __le32 addr_low;
 };
 
+/* Add switch rule response:
+ * Content of return buffer is same as the input buffer. The status field and
+ * LUT index are updated as part of the response
+ */
+struct ice_aqc_sw_rules_elem_hdr {
+       __le16 type; /* Switch rule type, one of T_... */
+#define ICE_AQC_SW_RULES_T_LKUP_RX             0x0
+#define ICE_AQC_SW_RULES_T_LKUP_TX             0x1
+#define ICE_AQC_SW_RULES_T_LG_ACT              0x2
+#define ICE_AQC_SW_RULES_T_VSI_LIST_SET                0x3
+#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR      0x4
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET      0x5
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR    0x6
+       __le16 status;
+} __packed __aligned(sizeof(__le16));
+
 /* Add/Update/Get/Remove lookup Rx/Tx command/response entry
  * This structures describes the lookup rules and associated actions. "index"
  * is returned as part of a response to a successful Add command, and can be
  * used to identify the rule for Update/Get/Remove commands.
  */
 struct ice_sw_rule_lkup_rx_tx {
+       struct ice_aqc_sw_rules_elem_hdr hdr;
+
        __le16 recipe_id;
 #define ICE_SW_RECIPE_LOGICAL_PORT_FWD         10
        /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
@@ -683,14 +701,16 @@ struct ice_sw_rule_lkup_rx_tx {
         * lookup-type
         */
        __le16 hdr_len;
-       u8 hdr[];
-};
+       u8 hdr_data[];
+} __packed __aligned(sizeof(__le16));
 
 /* Add/Update/Remove large action command/response entry
  * "index" is returned as part of a response to a successful Add command, and
  * can be used to identify the action for Update/Get/Remove commands.
  */
 struct ice_sw_rule_lg_act {
+       struct ice_aqc_sw_rules_elem_hdr hdr;
+
        __le16 index; /* Index in large action table */
        __le16 size;
        /* Max number of large actions */
@@ -744,45 +764,19 @@ struct ice_sw_rule_lg_act {
 #define ICE_LG_ACT_STAT_COUNT_S                3
 #define ICE_LG_ACT_STAT_COUNT_M                (0x7F << ICE_LG_ACT_STAT_COUNT_S)
        __le32 act[]; /* array of size for actions */
-};
+} __packed __aligned(sizeof(__le16));
 
 /* Add/Update/Remove VSI list command/response entry
  * "index" is returned as part of a response to a successful Add command, and
  * can be used to identify the VSI list for Update/Get/Remove commands.
  */
 struct ice_sw_rule_vsi_list {
+       struct ice_aqc_sw_rules_elem_hdr hdr;
+
        __le16 index; /* Index of VSI/Prune list */
        __le16 number_vsi;
        __le16 vsi[]; /* Array of number_vsi VSI numbers */
-};
-
-/* Query VSI list command/response entry */
-struct ice_sw_rule_vsi_list_query {
-       __le16 index;
-       DECLARE_BITMAP(vsi_list, ICE_MAX_VSI);
-} __packed;
-
-/* Add switch rule response:
- * Content of return buffer is same as the input buffer. The status field and
- * LUT index are updated as part of the response
- */
-struct ice_aqc_sw_rules_elem {
-       __le16 type; /* Switch rule type, one of T_... */
-#define ICE_AQC_SW_RULES_T_LKUP_RX             0x0
-#define ICE_AQC_SW_RULES_T_LKUP_TX             0x1
-#define ICE_AQC_SW_RULES_T_LG_ACT              0x2
-#define ICE_AQC_SW_RULES_T_VSI_LIST_SET                0x3
-#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR      0x4
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET      0x5
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR    0x6
-       __le16 status;
-       union {
-               struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
-               struct ice_sw_rule_lg_act lg_act;
-               struct ice_sw_rule_vsi_list vsi_list;
-               struct ice_sw_rule_vsi_list_query vsi_list_query;
-       } __packed pdata;
-};
+} __packed __aligned(sizeof(__le16));
 
 /* Query PFC Mode (direct 0x0302)
  * Set PFC Mode (direct 0x0303)
index 9f0a4df..8d8f3ee 100644 (file)
@@ -1282,18 +1282,13 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
        ICE_PKT_PROFILE(tcp, 0),
 };
 
-#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
-       (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
-        (DUMMY_ETH_HDR_LEN * \
-         sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
-       (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
-#define ICE_SW_RULE_LG_ACT_SIZE(n) \
-       (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
-        ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
-#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
-       (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
-        ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)       struct_size((s), hdr_data, (l))
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)      \
+       ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)       \
+       ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
+#define ICE_SW_RULE_LG_ACT_SIZE(s, n)          struct_size((s), act, (n))
+#define ICE_SW_RULE_VSI_LIST_SIZE(s, n)                struct_size((s), vsi, (n))
 
 /* this is a recipe to profile association bitmap */
 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
@@ -2376,7 +2371,8 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
  */
 static void
 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
-                struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
+                struct ice_sw_rule_lkup_rx_tx *s_rule,
+                enum ice_adminq_opc opc)
 {
        u16 vlan_id = ICE_MAX_VLAN_ID + 1;
        u16 vlan_tpid = ETH_P_8021Q;
@@ -2388,15 +2384,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
        u8 q_rgn;
 
        if (opc == ice_aqc_opc_remove_sw_rules) {
-               s_rule->pdata.lkup_tx_rx.act = 0;
-               s_rule->pdata.lkup_tx_rx.index =
-                       cpu_to_le16(f_info->fltr_rule_id);
-               s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+               s_rule->act = 0;
+               s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
+               s_rule->hdr_len = 0;
                return;
        }
 
        eth_hdr_sz = sizeof(dummy_eth_header);
-       eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+       eth_hdr = s_rule->hdr_data;
 
        /* initialize the ether header with a dummy header */
        memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
@@ -2481,14 +2476,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
                break;
        }
 
-       s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
+       s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
                cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
                cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
 
        /* Recipe set depending on lookup type */
-       s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
-       s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
-       s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+       s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
+       s_rule->src = cpu_to_le16(f_info->src);
+       s_rule->act = cpu_to_le32(act);
 
        if (daddr)
                ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
@@ -2502,7 +2497,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
 
        /* Create the switch rule with the final dummy Ethernet header */
        if (opc != ice_aqc_opc_update_sw_rules)
-               s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
+               s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
 }
 
 /**
@@ -2519,7 +2514,8 @@ static int
 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
                   u16 sw_marker, u16 l_id)
 {
-       struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
+       struct ice_sw_rule_lkup_rx_tx *rx_tx;
+       struct ice_sw_rule_lg_act *lg_act;
        /* For software marker we need 3 large actions
         * 1. FWD action: FWD TO VSI or VSI LIST
         * 2. GENERIC VALUE action to hold the profile ID
@@ -2540,18 +2536,18 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
         *    1. Large Action
         *    2. Look up Tx Rx
         */
-       lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
-       rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+       lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
+       rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
        lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
        if (!lg_act)
                return -ENOMEM;
 
-       rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
+       rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
 
        /* Fill in the first switch rule i.e. large action */
-       lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
-       lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
-       lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
+       lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
+       lg_act->index = cpu_to_le16(l_id);
+       lg_act->size = cpu_to_le16(num_lg_acts);
 
        /* First action VSI forwarding or VSI list forwarding depending on how
         * many VSIs
@@ -2563,13 +2559,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
        act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
        if (m_ent->vsi_count > 1)
                act |= ICE_LG_ACT_VSI_LIST;
-       lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
+       lg_act->act[0] = cpu_to_le32(act);
 
        /* Second action descriptor type */
        act = ICE_LG_ACT_GENERIC;
 
        act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
-       lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
+       lg_act->act[1] = cpu_to_le32(act);
 
        act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
               ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
@@ -2579,24 +2575,22 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
        act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
                ICE_LG_ACT_GENERIC_VALUE_M;
 
-       lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
+       lg_act->act[2] = cpu_to_le32(act);
 
        /* call the fill switch rule to fill the lookup Tx Rx structure */
        ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
                         ice_aqc_opc_update_sw_rules);
 
        /* Update the action to point to the large action ID */
-       rx_tx->pdata.lkup_tx_rx.act =
-               cpu_to_le32(ICE_SINGLE_ACT_PTR |
-                           ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
-                            ICE_SINGLE_ACT_PTR_VAL_M));
+       rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
+                                ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
+                                 ICE_SINGLE_ACT_PTR_VAL_M));
 
        /* Use the filter rule ID of the previously created rule with single
         * act. Once the update happens, hardware will treat this as large
         * action
         */
-       rx_tx->pdata.lkup_tx_rx.index =
-               cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
+       rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
 
        status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
                                 ice_aqc_opc_update_sw_rules, NULL);
@@ -2658,7 +2652,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
                         u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
                         enum ice_sw_lkup_type lkup_type)
 {
-       struct ice_aqc_sw_rules_elem *s_rule;
+       struct ice_sw_rule_vsi_list *s_rule;
        u16 s_rule_size;
        u16 rule_type;
        int status;
@@ -2681,7 +2675,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
        else
                return -EINVAL;
 
-       s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
+       s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
        s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
        if (!s_rule)
                return -ENOMEM;
@@ -2691,13 +2685,13 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
                        goto exit;
                }
                /* AQ call requires hw_vsi_id(s) */
-               s_rule->pdata.vsi_list.vsi[i] =
+               s_rule->vsi[i] =
                        cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
        }
 
-       s_rule->type = cpu_to_le16(rule_type);
-       s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
-       s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+       s_rule->hdr.type = cpu_to_le16(rule_type);
+       s_rule->number_vsi = cpu_to_le16(num_vsi);
+       s_rule->index = cpu_to_le16(vsi_list_id);
 
        status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
 
@@ -2745,13 +2739,14 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
                        struct ice_fltr_list_entry *f_entry)
 {
        struct ice_fltr_mgmt_list_entry *fm_entry;
-       struct ice_aqc_sw_rules_elem *s_rule;
+       struct ice_sw_rule_lkup_rx_tx *s_rule;
        enum ice_sw_lkup_type l_type;
        struct ice_sw_recipe *recp;
        int status;
 
        s_rule = devm_kzalloc(ice_hw_to_dev(hw),
-                             ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+                             ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+                             GFP_KERNEL);
        if (!s_rule)
                return -ENOMEM;
        fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
@@ -2772,17 +2767,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
        ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
                         ice_aqc_opc_add_sw_rules);
 
-       status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+       status = ice_aq_sw_rules(hw, s_rule,
+                                ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
                                 ice_aqc_opc_add_sw_rules, NULL);
        if (status) {
                devm_kfree(ice_hw_to_dev(hw), fm_entry);
                goto ice_create_pkt_fwd_rule_exit;
        }
 
-       f_entry->fltr_info.fltr_rule_id =
-               le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
-       fm_entry->fltr_info.fltr_rule_id =
-               le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+       f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
+       fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
 
        /* The book keeping entries will get removed when base driver
         * calls remove filter AQ command
@@ -2807,20 +2801,22 @@ ice_create_pkt_fwd_rule_exit:
 static int
 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
 {
-       struct ice_aqc_sw_rules_elem *s_rule;
+       struct ice_sw_rule_lkup_rx_tx *s_rule;
        int status;
 
        s_rule = devm_kzalloc(ice_hw_to_dev(hw),
-                             ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+                             ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+                             GFP_KERNEL);
        if (!s_rule)
                return -ENOMEM;
 
        ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
 
-       s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
+       s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
 
        /* Update switch rule with new rule set to forward VSI list */
-       status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+       status = ice_aq_sw_rules(hw, s_rule,
+                                ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
                                 ice_aqc_opc_update_sw_rules, NULL);
 
        devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -3104,17 +3100,17 @@ static int
 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
                         enum ice_sw_lkup_type lkup_type)
 {
-       struct ice_aqc_sw_rules_elem *s_rule;
+       struct ice_sw_rule_vsi_list *s_rule;
        u16 s_rule_size;
        int status;
 
-       s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+       s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
        s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
        if (!s_rule)
                return -ENOMEM;
 
-       s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
-       s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+       s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+       s_rule->index = cpu_to_le16(vsi_list_id);
 
        /* Free the vsi_list resource that we allocated. It is assumed that the
         * list is empty at this point.
@@ -3274,10 +3270,10 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
 
        if (remove_rule) {
                /* Remove the lookup rule */
-               struct ice_aqc_sw_rules_elem *s_rule;
+               struct ice_sw_rule_lkup_rx_tx *s_rule;
 
                s_rule = devm_kzalloc(ice_hw_to_dev(hw),
-                                     ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+                                     ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
                                      GFP_KERNEL);
                if (!s_rule) {
                        status = -ENOMEM;
@@ -3288,8 +3284,8 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
                                 ice_aqc_opc_remove_sw_rules);
 
                status = ice_aq_sw_rules(hw, s_rule,
-                                        ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
-                                        ice_aqc_opc_remove_sw_rules, NULL);
+                                        ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
+                                        1, ice_aqc_opc_remove_sw_rules, NULL);
 
                /* Remove a book keeping from the list */
                devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -3437,7 +3433,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
  */
 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
 {
-       struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
+       struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
        struct ice_fltr_list_entry *m_list_itr;
        struct list_head *rule_head;
        u16 total_elem_left, s_rule_size;
@@ -3501,7 +3497,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
        rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
 
        /* Allocate switch rule buffer for the bulk update for unicast */
-       s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+       s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
        s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
                              GFP_KERNEL);
        if (!s_rule) {
@@ -3517,8 +3513,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
                if (is_unicast_ether_addr(mac_addr)) {
                        ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
                                         ice_aqc_opc_add_sw_rules);
-                       r_iter = (struct ice_aqc_sw_rules_elem *)
-                               ((u8 *)r_iter + s_rule_size);
+                       r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
                }
        }
 
@@ -3527,7 +3522,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
        /* Call AQ switch rule in AQ_MAX chunk */
        for (total_elem_left = num_unicast; total_elem_left > 0;
             total_elem_left -= elem_sent) {
-               struct ice_aqc_sw_rules_elem *entry = r_iter;
+               struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
 
                elem_sent = min_t(u8, total_elem_left,
                                  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
@@ -3536,7 +3531,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
                                         NULL);
                if (status)
                        goto ice_add_mac_exit;
-               r_iter = (struct ice_aqc_sw_rules_elem *)
+               r_iter = (typeof(s_rule))
                        ((u8 *)r_iter + (elem_sent * s_rule_size));
        }
 
@@ -3548,8 +3543,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
                struct ice_fltr_mgmt_list_entry *fm_entry;
 
                if (is_unicast_ether_addr(mac_addr)) {
-                       f_info->fltr_rule_id =
-                               le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
+                       f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
                        f_info->fltr_act = ICE_FWD_TO_VSI;
                        /* Create an entry to track this MAC address */
                        fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
@@ -3565,8 +3559,7 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
                         */
 
                        list_add(&fm_entry->list_entry, rule_head);
-                       r_iter = (struct ice_aqc_sw_rules_elem *)
-                               ((u8 *)r_iter + s_rule_size);
+                       r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
                }
        }
 
@@ -3865,7 +3858,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
  */
 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
 {
-       struct ice_aqc_sw_rules_elem *s_rule;
+       struct ice_sw_rule_lkup_rx_tx *s_rule;
        struct ice_fltr_info f_info;
        enum ice_adminq_opc opcode;
        u16 s_rule_size;
@@ -3876,8 +3869,8 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
                return -EINVAL;
        hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
 
-       s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
-               ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+       s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
+                           ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
 
        s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
        if (!s_rule)
@@ -3915,7 +3908,7 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
        if (status || !(f_info.flag & ICE_FLTR_TX_RX))
                goto out;
        if (set) {
-               u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+               u16 index = le16_to_cpu(s_rule->index);
 
                if (f_info.flag & ICE_FLTR_TX) {
                        hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
@@ -5641,7 +5634,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
  */
 static int
 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
-                         struct ice_aqc_sw_rules_elem *s_rule,
+                         struct ice_sw_rule_lkup_rx_tx *s_rule,
                          const struct ice_dummy_pkt_profile *profile)
 {
        u8 *pkt;
@@ -5650,7 +5643,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
        /* Start with a packet with a pre-defined/dummy content. Then, fill
         * in the header values to be looked up or matched.
         */
-       pkt = s_rule->pdata.lkup_tx_rx.hdr;
+       pkt = s_rule->hdr_data;
 
        memcpy(pkt, profile->pkt, profile->pkt_len);
 
@@ -5740,7 +5733,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
                }
        }
 
-       s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len);
+       s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
 
        return 0;
 }
@@ -5963,7 +5956,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
                 struct ice_rule_query_data *added_entry)
 {
        struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
-       struct ice_aqc_sw_rules_elem *s_rule = NULL;
+       struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
        const struct ice_dummy_pkt_profile *profile;
        u16 rid = 0, i, rule_buf_sz, vsi_handle;
        struct list_head *rule_head;
@@ -6040,7 +6033,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
                }
                return status;
        }
-       rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len;
+       rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
        s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
        if (!s_rule)
                return -ENOMEM;
@@ -6089,16 +6082,15 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
         * by caller)
         */
        if (rinfo->rx) {
-               s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
-               s_rule->pdata.lkup_tx_rx.src =
-                       cpu_to_le16(hw->port_info->lport);
+               s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+               s_rule->src = cpu_to_le16(hw->port_info->lport);
        } else {
-               s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
-               s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+               s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+               s_rule->src = cpu_to_le16(rinfo->sw_act.src);
        }
 
-       s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
-       s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+       s_rule->recipe_id = cpu_to_le16(rid);
+       s_rule->act = cpu_to_le32(act);
 
        status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
        if (status)
@@ -6107,7 +6099,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        if (rinfo->tun_type != ICE_NON_TUN &&
            rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
                status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
-                                                s_rule->pdata.lkup_tx_rx.hdr,
+                                                s_rule->hdr_data,
                                                 profile->offsets);
                if (status)
                        goto err_ice_add_adv_rule;
@@ -6135,8 +6127,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
 
        adv_fltr->lkups_cnt = lkups_cnt;
        adv_fltr->rule_info = *rinfo;
-       adv_fltr->rule_info.fltr_rule_id =
-               le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+       adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
        sw = hw->switch_info;
        sw->recp_list[rid].adv_rule = true;
        rule_head = &sw->recp_list[rid].filt_rules;
@@ -6384,17 +6375,16 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        }
        mutex_unlock(rule_lock);
        if (remove_rule) {
-               struct ice_aqc_sw_rules_elem *s_rule;
+               struct ice_sw_rule_lkup_rx_tx *s_rule;
                u16 rule_buf_sz;
 
-               rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+               rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
                s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
                if (!s_rule)
                        return -ENOMEM;
-               s_rule->pdata.lkup_tx_rx.act = 0;
-               s_rule->pdata.lkup_tx_rx.index =
-                       cpu_to_le16(list_elem->rule_info.fltr_rule_id);
-               s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+               s_rule->act = 0;
+               s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+               s_rule->hdr_len = 0;
                status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
                                         rule_buf_sz, 1,
                                         ice_aqc_opc_remove_sw_rules, NULL);
index ecac75e..eb641e5 100644 (file)
@@ -23,9 +23,6 @@
 #define ICE_PROFID_IPV6_GTPU_TEID                      46
 #define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER            70
 
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
-       (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
-
 /* VSI context structure for add/get/update/free operations */
 struct ice_vsi_ctx {
        u16 vsi_num;
index a79201a..a9da85e 100644 (file)
@@ -579,7 +579,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
 
        blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
        if (blkaddr < 0)
-               return blkaddr;
+               return false;
 
        /* Registers that can be accessed from PF/VF */
        if ((offset & 0xFF000) ==  CPT_AF_LFX_CTL(0) ||
index 54f235c..2dd192b 100644 (file)
@@ -355,7 +355,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
 {
        struct otx2_nic *pf = netdev_priv(netdev);
 
-       if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+       if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
                          pf->flow_cfg->dmacflt_max_flows))
                netdev_warn(netdev,
                            "Add %pM to CGX/RPM DMAC filters list as well\n",
@@ -438,7 +438,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
                return 0;
 
        if (flow_cfg->nr_flows == flow_cfg->max_flows ||
-           bitmap_weight(&flow_cfg->dmacflt_bmap,
+           !bitmap_empty(&flow_cfg->dmacflt_bmap,
                          flow_cfg->dmacflt_max_flows))
                return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
        else
index fe3472e..9106c35 100644 (file)
@@ -1120,7 +1120,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
        struct msg_req *msg;
        int err;
 
-       if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+       if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
                                    pf->flow_cfg->dmacflt_max_flows))
                netdev_warn(pf->netdev,
                            "CGX/RPM internal loopback might not work as DMAC filters are active\n");
index a9d4fd8..b3b3c07 100644 (file)
@@ -2212,6 +2212,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
        struct ethtool_rx_flow_spec *fsp =
                (struct ethtool_rx_flow_spec *)&cmd->fs;
 
+       if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+               return -EINVAL;
+
        /* only tcp dst ipv4 is meaningful, others are meaningless */
        fsp->flow_type = TCP_V4_FLOW;
        fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
index e10b7b0..c56d219 100644 (file)
@@ -1994,21 +1994,16 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
 
 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
 {
-       int port, err;
+       int p, port, err;
        struct mlx4_vport_state *vp_admin;
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_slave_state *slave_state =
                &priv->mfunc.master.slave_state[slave];
        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
                        &priv->dev, slave);
-       int min_port = find_first_bit(actv_ports.ports,
-                                     priv->dev.caps.num_ports) + 1;
-       int max_port = min_port - 1 +
-               bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
 
-       for (port = min_port; port <= max_port; port++) {
-               if (!test_bit(port - 1, actv_ports.ports))
-                       continue;
+       for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+               port = p + 1;
                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
                        priv->mfunc.master.vf_admin[slave].enable_smi[port];
                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
@@ -2063,19 +2058,13 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
 
 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
 {
-       int port;
+       int p, port;
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
                        &priv->dev, slave);
-       int min_port = find_first_bit(actv_ports.ports,
-                                     priv->dev.caps.num_ports) + 1;
-       int max_port = min_port - 1 +
-               bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
 
-
-       for (port = min_port; port <= max_port; port++) {
-               if (!test_bit(port - 1, actv_ports.ports))
-                       continue;
+       for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+               port = p + 1;
                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
                        MLX4_VF_SMI_DISABLED;
                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
index 11f7c03..0eb9d74 100644 (file)
@@ -571,18 +571,32 @@ static int _next_phys_dev(struct mlx5_core_dev *mdev,
        return 1;
 }
 
+static void *pci_get_other_drvdata(struct device *this, struct device *other)
+{
+       if (this->driver != other->driver)
+               return NULL;
+
+       return pci_get_drvdata(to_pci_dev(other));
+}
+
 static int next_phys_dev(struct device *dev, const void *data)
 {
-       struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
-       struct mlx5_core_dev *mdev = madev->mdev;
+       struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+       mdev = pci_get_other_drvdata(this->device, dev);
+       if (!mdev)
+               return 0;
 
        return _next_phys_dev(mdev, data);
 }
 
 static int next_phys_dev_lag(struct device *dev, const void *data)
 {
-       struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
-       struct mlx5_core_dev *mdev = madev->mdev;
+       struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+       mdev = pci_get_other_drvdata(this->device, dev);
+       if (!mdev)
+               return 0;
 
        if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
            !MLX5_CAP_GEN(mdev, lag_master) ||
@@ -596,19 +610,17 @@ static int next_phys_dev_lag(struct device *dev, const void *data)
 static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
                                               int (*match)(struct device *dev, const void *data))
 {
-       struct auxiliary_device *adev;
-       struct mlx5_adev *madev;
+       struct device *next;
 
        if (!mlx5_core_is_pf(dev))
                return NULL;
 
-       adev = auxiliary_find_device(NULL, dev, match);
-       if (!adev)
+       next = bus_find_device(&pci_bus_type, NULL, dev, match);
+       if (!next)
                return NULL;
 
-       madev = container_of(adev, struct mlx5_adev, adev);
-       put_device(&adev->dev);
-       return madev->mdev;
+       put_device(next);
+       return pci_get_drvdata(to_pci_dev(next));
 }
 
 /* Must be called with intf_mutex held */
index 65d3c48..b6c15ef 100644 (file)
@@ -764,6 +764,7 @@ struct mlx5e_rq {
        u8                     wq_type;
        u32                    rqn;
        struct mlx5_core_dev  *mdev;
+       struct mlx5e_channel  *channel;
        u32  umr_mkey;
        struct mlx5e_dma_info  wqe_overflow;
 
@@ -1076,6 +1077,9 @@ void mlx5e_close_cq(struct mlx5e_cq *cq);
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
 
+void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c);
+void mlx5e_trigger_napi_sched(struct napi_struct *napi);
+
 int mlx5e_open_channels(struct mlx5e_priv *priv,
                        struct mlx5e_channels *chs);
 void mlx5e_close_channels(struct mlx5e_channels *chs);
index 4130a87..6e3a90a 100644 (file)
@@ -12,6 +12,7 @@ struct mlx5e_post_act;
 enum {
        MLX5E_TC_FT_LEVEL = 0,
        MLX5E_TC_TTC_FT_LEVEL,
+       MLX5E_TC_MISS_LEVEL,
 };
 
 struct mlx5e_tc_table {
@@ -20,6 +21,7 @@ struct mlx5e_tc_table {
         */
        struct mutex                    t_lock;
        struct mlx5_flow_table          *t;
+       struct mlx5_flow_table          *miss_t;
        struct mlx5_fs_chains           *chains;
        struct mlx5e_post_act           *post_act;
 
index 335b20b..047f88f 100644 (file)
@@ -736,6 +736,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
        if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
                mlx5e_ptp_rx_set_fs(c->priv);
                mlx5e_activate_rq(&c->rq);
+               mlx5e_trigger_napi_sched(&c->napi);
        }
 }
 
index 2684e9d..fc366e6 100644 (file)
@@ -123,6 +123,8 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
                xskrq->stats->recover++;
        }
 
+       mlx5e_trigger_napi_icosq(icosq->channel);
+
        mutex_unlock(&icosq->channel->icosq_recovery_lock);
 
        return 0;
@@ -166,6 +168,10 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
        clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
        mlx5e_activate_rq(rq);
        rq->stats->recover++;
+       if (rq->channel)
+               mlx5e_trigger_napi_icosq(rq->channel);
+       else
+               mlx5e_trigger_napi_sched(rq->cq.napi);
        return 0;
 out:
        clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
index bceea7a..25f51f8 100644 (file)
@@ -715,7 +715,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
                                struct mlx5_flow_attr *attr,
                                struct flow_rule *flow_rule,
                                struct mlx5e_mod_hdr_handle **mh,
-                               u8 zone_restore_id, bool nat)
+                               u8 zone_restore_id, bool nat_table, bool has_nat)
 {
        DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
        DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
@@ -731,11 +731,12 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
                                     &attr->ct_attr.ct_labels_id);
        if (err)
                return -EOPNOTSUPP;
-       if (nat) {
-               err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
-                                                 &mod_acts);
-               if (err)
-                       goto err_mapping;
+       if (nat_table) {
+               if (has_nat) {
+                       err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts);
+                       if (err)
+                               goto err_mapping;
+               }
 
                ct_state |= MLX5_CT_STATE_NAT_BIT;
        }
@@ -750,7 +751,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        if (err)
                goto err_mapping;
 
-       if (nat) {
+       if (nat_table && has_nat) {
                attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
                                                            mod_acts.num_actions,
                                                            mod_acts.actions);
@@ -818,7 +819,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
 
        err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
                                              &zone_rule->mh,
-                                             zone_restore_id, nat);
+                                             zone_restore_id,
+                                             nat,
+                                             mlx5_tc_ct_entry_has_nat(entry));
        if (err) {
                ct_dbg("Failed to create ct entry mod hdr");
                goto err_mod_hdr;
index 857840a..11f2a7f 100644 (file)
@@ -179,6 +179,7 @@ static void mlx5e_activate_trap(struct mlx5e_trap *trap)
 {
        napi_enable(&trap->napi);
        mlx5e_activate_rq(&trap->rq);
+       mlx5e_trigger_napi_sched(&trap->napi);
 }
 
 void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
index 279cd8f..2c52039 100644 (file)
@@ -117,6 +117,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
                goto err_remove_pool;
 
        mlx5e_activate_xsk(c);
+       mlx5e_trigger_napi_icosq(c);
 
        /* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
         * any Fill Ring entries at the setup stage.
index 3ad7f13..98ed9ef 100644 (file)
@@ -64,6 +64,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
        rq->clock        = &mdev->clock;
        rq->icosq        = &c->icosq;
        rq->ix           = c->ix;
+       rq->channel      = c;
        rq->mdev         = mdev;
        rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq->xdpsq        = &c->rq_xdpsq;
@@ -179,10 +180,6 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
        mlx5e_reporter_icosq_resume_recovery(c);
 
        /* TX queue is created active. */
-
-       spin_lock_bh(&c->async_icosq_lock);
-       mlx5e_trigger_irq(&c->async_icosq);
-       spin_unlock_bh(&c->async_icosq_lock);
 }
 
 void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
index 05c0155..087952b 100644 (file)
@@ -475,6 +475,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
        rq->clock        = &mdev->clock;
        rq->icosq        = &c->icosq;
        rq->ix           = c->ix;
+       rq->channel      = c;
        rq->mdev         = mdev;
        rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq->xdpsq        = &c->rq_xdpsq;
@@ -1066,13 +1067,6 @@ err_free_rq:
 void mlx5e_activate_rq(struct mlx5e_rq *rq)
 {
        set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-       if (rq->icosq) {
-               mlx5e_trigger_irq(rq->icosq);
-       } else {
-               local_bh_disable();
-               napi_schedule(rq->cq.napi);
-               local_bh_enable();
-       }
 }
 
 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2227,6 +2221,20 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
        return 0;
 }
 
+void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
+{
+       spin_lock_bh(&c->async_icosq_lock);
+       mlx5e_trigger_irq(&c->async_icosq);
+       spin_unlock_bh(&c->async_icosq_lock);
+}
+
+void mlx5e_trigger_napi_sched(struct napi_struct *napi)
+{
+       local_bh_disable();
+       napi_schedule(napi);
+       local_bh_enable();
+}
+
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
                              struct mlx5e_params *params,
                              struct mlx5e_channel_param *cparam,
@@ -2308,6 +2316,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
 
        if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
                mlx5e_activate_xsk(c);
+
+       mlx5e_trigger_napi_icosq(c);
 }
 
 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -4559,6 +4569,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
 
 unlock:
        mutex_unlock(&priv->state_lock);
+
+       /* Need to fix some features. */
+       if (!err)
+               netdev_update_features(netdev);
+
        return err;
 }
 
index 49dea02..34bf11c 100644 (file)
@@ -4714,6 +4714,33 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
        return tc_tbl_size;
 }
 
+static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_flow_namespace *ns;
+       int err = 0;
+
+       ft_attr.max_fte = 1;
+       ft_attr.autogroup.max_num_groups = 1;
+       ft_attr.level = MLX5E_TC_MISS_LEVEL;
+       ft_attr.prio = 0;
+       ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+
+       *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+       if (IS_ERR(*ft)) {
+               err = PTR_ERR(*ft);
+               netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
+       }
+
+       return err;
+}
+
+static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
+{
+       mlx5_destroy_flow_table(priv->fs.tc.miss_t);
+}
+
 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
@@ -4746,19 +4773,23 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
        }
        tc->mapping = chains_mapping;
 
+       err = mlx5e_tc_nic_create_miss_table(priv);
+       if (err)
+               goto err_chains;
+
        if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
                attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
                        MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
        attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
        attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
        attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
-       attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+       attr.default_ft = priv->fs.tc.miss_t;
        attr.mapping = chains_mapping;
 
        tc->chains = mlx5_chains_create(dev, &attr);
        if (IS_ERR(tc->chains)) {
                err = PTR_ERR(tc->chains);
-               goto err_chains;
+               goto err_miss;
        }
 
        tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
@@ -4781,6 +4812,8 @@ err_reg:
        mlx5_tc_ct_clean(tc->ct);
        mlx5e_tc_post_act_destroy(tc->post_act);
        mlx5_chains_destroy(tc->chains);
+err_miss:
+       mlx5e_tc_nic_destroy_miss_table(priv);
 err_chains:
        mapping_destroy(chains_mapping);
 err_mapping:
@@ -4821,6 +4854,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
        mlx5e_tc_post_act_destroy(tc->post_act);
        mapping_destroy(tc->mapping);
        mlx5_chains_destroy(tc->chains);
+       mlx5e_tc_nic_destroy_miss_table(priv);
 }
 
 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
index 84caffe..fdcf7f5 100644 (file)
 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
 
 #define KERNEL_NIC_TC_NUM_PRIOS  1
-#define KERNEL_NIC_TC_NUM_LEVELS 2
+#define KERNEL_NIC_TC_NUM_LEVELS 3
 
 #define ANCHOR_NUM_LEVELS 1
 #define ANCHOR_NUM_PRIOS 1
index 887ee0f..2935614 100644 (file)
@@ -87,6 +87,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
 enable_vfs_hca:
        num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
        for (vf = 0; vf < num_vfs; vf++) {
+               /* Notify the VF before its enablement to let it set
+                * some stuff.
+                */
+               blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
+                                            MLX5_PF_NOTIFY_ENABLE_VF, dev);
                err = mlx5_core_enable_hca(dev, vf + 1);
                if (err) {
                        mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
@@ -127,6 +132,11 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
        for (vf = num_vfs - 1; vf >= 0; vf--) {
                if (!sriov->vfs_ctx[vf].enabled)
                        continue;
+               /* Notify the VF before its disablement to let it clean
+                * some resources.
+                */
+               blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier,
+                                            MLX5_PF_NOTIFY_DISABLE_VF, dev);
                err = mlx5_core_disable_hca(dev, vf + 1);
                if (err) {
                        mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
@@ -257,7 +267,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_sriov *sriov = &dev->priv.sriov;
        struct pci_dev *pdev = dev->pdev;
-       int total_vfs;
+       int total_vfs, i;
 
        if (!mlx5_core_is_pf(dev))
                return 0;
@@ -269,6 +279,9 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
        if (!sriov->vfs_ctx)
                return -ENOMEM;
 
+       for (i = 0; i < total_vfs; i++)
+               BLOCKING_INIT_NOTIFIER_HEAD(&sriov->vfs_ctx[i].notifier);
+
        return 0;
 }
 
@@ -281,3 +294,53 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
 
        kfree(sriov->vfs_ctx);
 }
+
+/**
+ * mlx5_sriov_blocking_notifier_unregister - Unregister a VF from
+ * a notification block chain.
+ *
+ * @mdev: The mlx5 core device.
+ * @vf_id: The VF id.
+ * @nb: The notifier block to be unregistered.
+ */
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+                                            int vf_id,
+                                            struct notifier_block *nb)
+{
+       struct mlx5_vf_context *vfs_ctx;
+       struct mlx5_core_sriov *sriov;
+
+       sriov = &mdev->priv.sriov;
+       if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs))
+               return;
+
+       vfs_ctx = &sriov->vfs_ctx[vf_id];
+       blocking_notifier_chain_unregister(&vfs_ctx->notifier, nb);
+}
+EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_unregister);
+
+/**
+ * mlx5_sriov_blocking_notifier_register - Register a VF notification
+ * block chain.
+ *
+ * @mdev: The mlx5 core device.
+ * @vf_id: The VF id.
+ * @nb: The notifier block to be called upon the VF events.
+ *
+ * Returns 0 on success or an error code.
+ */
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+                                         int vf_id,
+                                         struct notifier_block *nb)
+{
+       struct mlx5_vf_context *vfs_ctx;
+       struct mlx5_core_sriov *sriov;
+
+       sriov = &mdev->priv.sriov;
+       if (vf_id < 0 || vf_id >= sriov->num_vfs)
+               return -EINVAL;
+
+       vfs_ctx = &sriov->vfs_ctx[vf_id];
+       return blocking_notifier_chain_register(&vfs_ctx->notifier, nb);
+}
+EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_register);
index 728f818..6a9abba 100644 (file)
@@ -44,11 +44,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
        err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
        if (err && action) {
                err = mlx5dr_action_destroy(action);
-               if (err) {
-                       action = NULL;
-                       mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
-                                     err);
-               }
+               if (err)
+                       mlx5_core_err(ns->dev,
+                                     "Failed to destroy action (%d)\n", err);
+               action = NULL;
        }
        ft->fs_dr_table.miss_action = action;
        if (old_miss_action) {
index efbddf2..af81236 100644 (file)
@@ -1164,9 +1164,14 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
                if (!phydev)
                        goto return_error;
 
-               ret = phy_connect_direct(netdev, phydev,
-                                        lan743x_phy_link_status_change,
-                                        PHY_INTERFACE_MODE_GMII);
+               if (adapter->is_pci11x1x)
+                       ret = phy_connect_direct(netdev, phydev,
+                                                lan743x_phy_link_status_change,
+                                                PHY_INTERFACE_MODE_RGMII);
+               else
+                       ret = phy_connect_direct(netdev, phydev,
+                                                lan743x_phy_link_status_change,
+                                                PHY_INTERFACE_MODE_GMII);
                if (ret)
                        goto return_error;
        }
@@ -2936,20 +2941,27 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
                        lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
                        netif_dbg(adapter, drv, adapter->netdev,
                                  "SGMII operation\n");
+                       adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
+                       adapter->mdiobus->read = lan743x_mdiobus_c45_read;
+                       adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+                       adapter->mdiobus->name = "lan743x-mdiobus-c45";
+                       netif_dbg(adapter, drv, adapter->netdev,
+                                 "lan743x-mdiobus-c45\n");
                } else {
                        sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
                        sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
                        sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
                        lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
                        netif_dbg(adapter, drv, adapter->netdev,
-                                         "(R)GMII operation\n");
+                                 "RGMII operation\n");
+                       // Only C22 support when RGMII I/F
+                       adapter->mdiobus->probe_capabilities = MDIOBUS_C22;
+                       adapter->mdiobus->read = lan743x_mdiobus_read;
+                       adapter->mdiobus->write = lan743x_mdiobus_write;
+                       adapter->mdiobus->name = "lan743x-mdiobus";
+                       netif_dbg(adapter, drv, adapter->netdev,
+                                 "lan743x-mdiobus\n");
                }
-
-               adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
-               adapter->mdiobus->read = lan743x_mdiobus_c45_read;
-               adapter->mdiobus->write = lan743x_mdiobus_c45_write;
-               adapter->mdiobus->name = "lan743x-mdiobus-c45";
-               netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n");
        } else {
                adapter->mdiobus->read = lan743x_mdiobus_read;
                adapter->mdiobus->write = lan743x_mdiobus_write;
index 6ad68b4..5784c41 100644 (file)
@@ -1120,8 +1120,13 @@ static int lan966x_probe(struct platform_device *pdev)
                lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
 
                serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
-               if (!IS_ERR(serdes))
-                       lan966x->ports[p]->serdes = serdes;
+               if (PTR_ERR(serdes) == -ENODEV)
+                       serdes = NULL;
+               if (IS_ERR(serdes)) {
+                       err = PTR_ERR(serdes);
+                       goto cleanup_ports;
+               }
+               lan966x->ports[p]->serdes = serdes;
 
                lan966x_port_init(lan966x->ports[p]);
        }
index e3da9ac..e509d6d 100644 (file)
@@ -314,7 +314,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
                    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
 
        txd->dma_len_type = cpu_to_le16(dlen_type);
-       nfp_desc_set_dma_addr(txd, dma_addr);
+       nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
 
        /* starts at bit 0 */
        BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
@@ -339,7 +339,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
                        dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
 
                        txd->dma_len_type = cpu_to_le16(dlen_type);
-                       nfp_desc_set_dma_addr(txd, dma_addr);
+                       nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
 
                        dma_len -= dlen_type;
                        dma_addr += dlen_type + 1;
@@ -929,7 +929,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
                    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
 
        txd->dma_len_type = cpu_to_le16(dlen_type);
-       nfp_desc_set_dma_addr(txd, dma_addr);
+       nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
 
        tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
        dma_len -= tmp_dlen;
@@ -940,7 +940,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
                dma_len -= 1;
                dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
                txd->dma_len_type = cpu_to_le16(dlen_type);
-               nfp_desc_set_dma_addr(txd, dma_addr);
+               nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
 
                dlen_type &= NFDK_DESC_TX_DMA_LEN;
                dma_len -= dlen_type;
@@ -1332,7 +1332,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
                    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
 
        txd->dma_len_type = cpu_to_le16(dlen_type);
-       nfp_desc_set_dma_addr(txd, dma_addr);
+       nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
 
        tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
        dma_len -= tmp_dlen;
@@ -1343,7 +1343,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
                dma_len -= 1;
                dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
                txd->dma_len_type = cpu_to_le16(dlen_type);
-               nfp_desc_set_dma_addr(txd, dma_addr);
+               nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
 
                dlen_type &= NFDK_DESC_TX_DMA_LEN;
                dma_len -= dlen_type;
index c41e097..0ea51d9 100644 (file)
@@ -46,8 +46,7 @@
 struct nfp_nfdk_tx_desc {
        union {
                struct {
-                       u8 dma_addr_hi;  /* High bits of host buf address */
-                       u8 padding;  /* Must be zero */
+                       __le16 dma_addr_hi;  /* High bits of host buf address */
                        __le16 dma_len_type; /* Length to DMA for this desc */
                        __le32 dma_addr_lo;  /* Low 32bit of host buf addr */
                };
index 428783b..3dd3a92 100644 (file)
@@ -117,13 +117,22 @@ struct nfp_nfdk_tx_buf;
 /* Convenience macro for writing dma address into RX/TX descriptors */
 #define nfp_desc_set_dma_addr(desc, dma_addr)                          \
        do {                                                            \
-               __typeof(desc) __d = (desc);                            \
+               __typeof__(desc) __d = (desc);                          \
                dma_addr_t __addr = (dma_addr);                         \
                                                                        \
                __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr));  \
                __d->dma_addr_hi = upper_32_bits(__addr) & 0xff;        \
        } while (0)
 
+#define nfp_nfdk_tx_desc_set_dma_addr(desc, dma_addr)                         \
+       do {                                                                   \
+               __typeof__(desc) __d = (desc);                                 \
+               dma_addr_t __addr = (dma_addr);                                \
+                                                                              \
+               __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr) & 0xff);  \
+               __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr));         \
+       } while (0)
+
 /**
  * struct nfp_net_tx_ring - TX ring structure
  * @r_vec:      Back pointer to ring vector structure
index 61c8b45..df0afd2 100644 (file)
@@ -289,8 +289,6 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
 
        /* Init to unknowns */
        ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
        cmd->base.port = PORT_OTHER;
        cmd->base.speed = SPEED_UNKNOWN;
        cmd->base.duplex = DUPLEX_UNKNOWN;
@@ -298,6 +296,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
        port = nfp_port_from_netdev(netdev);
        eth_port = nfp_port_get_eth_port(port);
        if (eth_port) {
+               ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
                cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
                        AUTONEG_ENABLE : AUTONEG_DISABLE;
                nfp_net_set_fec_link_mode(eth_port, cmd);
index 23b668d..69b0ede 100644 (file)
@@ -319,44 +319,27 @@ free_rdma_dev:
 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
                        struct qed_bmap *bmap, bool check)
 {
-       int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
-       int last_line = bmap->max_count / (64 * 8);
-       int last_item = last_line * 8 +
-           DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
-       u64 *pmap = (u64 *)bmap->bitmap;
-       int line, item, offset;
-       u8 str_last_line[200] = { 0 };
-
-       if (!weight || !check)
+       unsigned int bit, weight, nbits;
+       unsigned long *b;
+
+       if (!check)
+               goto end;
+
+       weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+       if (!weight)
                goto end;
 
        DP_NOTICE(p_hwfn,
                  "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
                  bmap->name, bmap->max_count, weight);
 
-       /* print aligned non-zero lines, if any */
-       for (item = 0, line = 0; line < last_line; line++, item += 8)
-               if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+       for (bit = 0; bit < bmap->max_count; bit += 512) {
+               b =  bmap->bitmap + BITS_TO_LONGS(bit);
+               nbits = min(bmap->max_count - bit, 512U);
+
+               if (!bitmap_empty(b, nbits))
                        DP_NOTICE(p_hwfn,
-                                 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
-                                 line,
-                                 pmap[item],
-                                 pmap[item + 1],
-                                 pmap[item + 2],
-                                 pmap[item + 3],
-                                 pmap[item + 4],
-                                 pmap[item + 5],
-                                 pmap[item + 6], pmap[item + 7]);
-
-       /* print last unaligned non-zero line, if any */
-       if ((bmap->max_count % (64 * 8)) &&
-           (bitmap_weight((unsigned long *)&pmap[item],
-                          bmap->max_count - item * 64))) {
-               offset = sprintf(str_last_line, "line 0x%04x: ", line);
-               for (; item < last_item; item++)
-                       offset += sprintf(str_last_line + offset,
-                                         "0x%016llx ", pmap[item]);
-               DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+                                 "line 0x%04x: %*pb\n", bit / 512, nbits, b);
        }
 
 end:
index 071b4ae..134ecfc 100644 (file)
@@ -76,7 +76,7 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
         * We delay for a short while if an async destroy QP is still expected.
         * Beyond the added delay we clear the bitmap anyway.
         */
-       while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+       while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
                /* If the HW device is during recovery, all resources are
                 * immediately reset without receiving a per-cid indication
                 * from HW. In this case we don't expect the cid bitmap to be
index f4919e7..032b8c0 100644 (file)
@@ -298,6 +298,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1;
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
+               efx->tx_channel_offset = 0;
                efx->n_xdp_channels = 0;
                efx->xdp_channel_offset = efx->n_channels;
                rc = pci_enable_msi(efx->pci_dev);
@@ -318,6 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
+               efx->tx_channel_offset = 1;
                efx->n_xdp_channels = 0;
                efx->xdp_channel_offset = efx->n_channels;
                efx->legacy_irq = efx->pci_dev->irq;
@@ -954,10 +956,6 @@ int efx_set_channels(struct efx_nic *efx)
        struct efx_channel *channel;
        int rc;
 
-       efx->tx_channel_offset =
-               efx_separate_tx_channels ?
-               efx->n_channels - efx->n_tx_channels : 0;
-
        if (efx->xdp_tx_queue_count) {
                EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
 
index 318db90..723bbee 100644 (file)
@@ -1530,7 +1530,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
 
 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
 {
-       return true;
+       return channel && channel->channel >= channel->efx->tx_channel_offset;
 }
 
 static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
index 2465cf4..017212a 100644 (file)
@@ -299,6 +299,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1;
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
+               efx->tx_channel_offset = 0;
                efx->n_xdp_channels = 0;
                efx->xdp_channel_offset = efx->n_channels;
                rc = pci_enable_msi(efx->pci_dev);
@@ -319,6 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
+               efx->tx_channel_offset = 1;
                efx->n_xdp_channels = 0;
                efx->xdp_channel_offset = efx->n_channels;
                efx->legacy_irq = efx->pci_dev->irq;
@@ -958,10 +960,6 @@ int efx_siena_set_channels(struct efx_nic *efx)
        struct efx_channel *channel;
        int rc;
 
-       efx->tx_channel_offset =
-               efx_siena_separate_tx_channels ?
-               efx->n_channels - efx->n_tx_channels : 0;
-
        if (efx->xdp_tx_queue_count) {
                EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
 
index a8f6c36..c4a97fb 100644 (file)
@@ -1529,7 +1529,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
 
 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
 {
-       return true;
+       return channel && channel->channel >= channel->efx->tx_channel_offset;
 }
 
 static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
index 0b0be08..f9f8093 100644 (file)
@@ -1161,6 +1161,7 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0     0x7aac
 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1     0x7aad
 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G       0x54ac
+#define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G       0x51ac
 
 static const struct pci_device_id intel_eth_pci_id_table[] = {
        { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
@@ -1179,6 +1180,7 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
        { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
        { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
        { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
+       { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
index 3b81d4e..d1a7cf4 100644 (file)
@@ -7129,9 +7129,9 @@ int stmmac_dvr_probe(struct device *device,
                /* MDIO bus Registration */
                ret = stmmac_mdio_register(ndev);
                if (ret < 0) {
-                       dev_err(priv->device,
-                               "%s: MDIO bus (id: %d) registration failed",
-                               __func__, priv->plat->bus_id);
+                       dev_err_probe(priv->device, ret,
+                                     "%s: MDIO bus (id: %d) registration failed\n",
+                                     __func__, priv->plat->bus_id);
                        goto error_mdio_register;
                }
        }
index 9bc625f..03d3d1f 100644 (file)
@@ -482,7 +482,7 @@ int stmmac_mdio_register(struct net_device *ndev)
 
        err = of_mdiobus_register(new_bus, mdio_node);
        if (err != 0) {
-               dev_err(dev, "Cannot register the MDIO bus\n");
+               dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
                goto bus_register_fail;
        }
 
index 34197c6..fb92d4c 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/kmemleak.h>
 #include <linux/module.h>
@@ -1788,6 +1789,7 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
        if (IS_ERR(cpts)) {
                int ret = PTR_ERR(cpts);
 
+               of_node_put(node);
                if (ret == -EOPNOTSUPP) {
                        dev_info(dev, "cpts disabled\n");
                        return 0;
@@ -1981,7 +1983,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
 
        phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
 
-       phylink = phylink_create(&port->slave.phylink_config, dev->fwnode, port->slave.phy_if,
+       phylink = phylink_create(&port->slave.phylink_config,
+                                of_node_to_fwnode(port->slave.phy_node),
+                                port->slave.phy_if,
                                 &am65_cpsw_phylink_mac_ops);
        if (IS_ERR(phylink))
                return PTR_ERR(phylink);
@@ -2662,9 +2666,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
        if (!node)
                return -ENOENT;
        common->port_num = of_get_child_count(node);
+       of_node_put(node);
        if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
                return -ENOENT;
-       of_node_put(node);
 
        common->rx_flow_id_base = -1;
        init_completion(&common->tdown_complete);
index 385aa63..d3b3255 100644 (file)
@@ -1095,7 +1095,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
 
        ret = gsi_trans_page_add(trans, page, len, offset);
        if (ret)
-               __free_pages(page, get_order(buffer_size));
+               put_page(page);
        else
                trans->data = page;     /* transaction owns page now */
 
@@ -1418,11 +1418,8 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
        } else {
                struct page *page = trans->data;
 
-               if (page) {
-                       u32 buffer_size = endpoint->config.rx.buffer_size;
-
-                       __free_pages(page, get_order(buffer_size));
-               }
+               if (page)
+                       put_page(page);
        }
 }
 
index 832f09a..817577e 100644 (file)
@@ -99,6 +99,7 @@ struct pcpu_secy_stats {
  * struct macsec_dev - private data
  * @secy: SecY config
  * @real_dev: pointer to underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
  * @stats: MACsec device stats
  * @secys: linked list of SecY's on the underlying device
  * @gro_cells: pointer to the Generic Receive Offload cell
@@ -107,6 +108,7 @@ struct pcpu_secy_stats {
 struct macsec_dev {
        struct macsec_secy secy;
        struct net_device *real_dev;
+       netdevice_tracker dev_tracker;
        struct pcpu_secy_stats __percpu *stats;
        struct list_head secys;
        struct gro_cells gro_cells;
@@ -3459,6 +3461,9 @@ static int macsec_dev_init(struct net_device *dev)
        if (is_zero_ether_addr(dev->broadcast))
                memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
 
+       /* Get macsec's reference to real_dev */
+       dev_hold_track(real_dev, &macsec->dev_tracker, GFP_KERNEL);
+
        return 0;
 }
 
@@ -3704,6 +3709,8 @@ static void macsec_free_netdev(struct net_device *dev)
        free_percpu(macsec->stats);
        free_percpu(macsec->secy.tx_sc.stats);
 
+       /* Get rid of the macsec's reference to real_dev */
+       dev_put_track(macsec->real_dev, &macsec->dev_tracker);
 }
 
 static void macsec_setup(struct net_device *dev)
index 7392600..6a467e7 100644 (file)
@@ -433,20 +433,21 @@ static void at803x_context_restore(struct phy_device *phydev,
 static int at803x_set_wol(struct phy_device *phydev,
                          struct ethtool_wolinfo *wol)
 {
-       struct net_device *ndev = phydev->attached_dev;
-       const u8 *mac;
        int ret, irq_enabled;
-       unsigned int i;
-       static const unsigned int offsets[] = {
-               AT803X_LOC_MAC_ADDR_32_47_OFFSET,
-               AT803X_LOC_MAC_ADDR_16_31_OFFSET,
-               AT803X_LOC_MAC_ADDR_0_15_OFFSET,
-       };
-
-       if (!ndev)
-               return -ENODEV;
 
        if (wol->wolopts & WAKE_MAGIC) {
+               struct net_device *ndev = phydev->attached_dev;
+               const u8 *mac;
+               unsigned int i;
+               static const unsigned int offsets[] = {
+                       AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+                       AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+                       AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+               };
+
+               if (!ndev)
+                       return -ENODEV;
+
                mac = (const u8 *) ndev->dev_addr;
 
                if (!is_valid_ether_addr(mac))
@@ -857,6 +858,9 @@ static int at803x_probe(struct phy_device *phydev)
        if (phydev->drv->phy_id == ATH8031_PHY_ID) {
                int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
                int mode_cfg;
+               struct ethtool_wolinfo wol = {
+                       .wolopts = 0,
+               };
 
                if (ccr < 0)
                        goto err;
@@ -872,6 +876,13 @@ static int at803x_probe(struct phy_device *phydev)
                        priv->is_fiber = true;
                        break;
                }
+
+               /* Disable WOL by default */
+               ret = at803x_set_wol(phydev, &wol);
+               if (ret < 0) {
+                       phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret);
+                       goto err;
+               }
        }
 
        return 0;
index c65fb5f..03abe62 100644 (file)
@@ -180,7 +180,7 @@ static void fixed_phy_del(int phy_addr)
                        if (fp->link_gpiod)
                                gpiod_put(fp->link_gpiod);
                        kfree(fp);
-                       ida_simple_remove(&phy_fixed_ida, phy_addr);
+                       ida_free(&phy_fixed_ida, phy_addr);
                        return;
                }
        }
@@ -244,13 +244,13 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
        }
 
        /* Get the next available PHY address, up to PHY_MAX_ADDR */
-       phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+       phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
        if (phy_addr < 0)
                return ERR_PTR(phy_addr);
 
        ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
        if (ret < 0) {
-               ida_simple_remove(&phy_fixed_ida, phy_addr);
+               ida_free(&phy_fixed_ida, phy_addr);
                return ERR_PTR(ret);
        }
 
index cdca00c..d55f59c 100644 (file)
@@ -441,7 +441,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
         * .bind which is called before usbnet sets up dev->maxpacket
         */
        if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
-           val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+           val % usb_maxpacket(dev->udev, dev->out) == 0)
                val++;
 
        /* we might need to flush any pending tx buffers if running */
@@ -465,7 +465,7 @@ static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
        usbnet_update_max_qlen(dev);
 
        /* never pad more than 3 full USB packets per transfer */
-       ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+       ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out),
                                  CDC_NCM_MIN_TX_PKT, ctx->tx_max);
 }
 
index 636a405..3226ab3 100644 (file)
@@ -4421,7 +4421,7 @@ static int lan78xx_probe(struct usb_interface *intf,
                goto out4;
 
        period = ep_intr->desc.bInterval;
-       maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+       maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
        buf = kmalloc(maxp, GFP_KERNEL);
        if (!buf) {
                ret = -ENOMEM;
@@ -4439,7 +4439,7 @@ static int lan78xx_probe(struct usb_interface *intf,
                dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
        }
 
-       dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+       dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
 
        /* Reject broken descriptors. */
        if (dev->maxpacket == 0) {
index 79f8bd8..571a399 100644 (file)
@@ -1366,6 +1366,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1250, 0)}, /* Telit LE910Cx */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
@@ -1388,6 +1389,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
        {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
        {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)},    /* Cinterion MV31 RmNet */
+       {QMI_FIXED_INTF(0x1e2d, 0x00b9, 0)},    /* Cinterion MV31 RmNet based on new baseline */
        {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
index 4e70dec..f79333f 100644 (file)
@@ -333,7 +333,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
        net->hard_header_len += sizeof (struct rndis_data_hdr);
        dev->hard_mtu = net->mtu + net->hard_header_len;
 
-       dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
+       dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
        if (dev->maxpacket == 0) {
                netif_dbg(dev, probe, dev->net,
                          "dev->maxpacket can't be 0\n");
index 36b24ec..1cb6dab 100644 (file)
@@ -229,7 +229,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
        pipe = usb_rcvintpipe (dev->udev,
                        dev->status->desc.bEndpointAddress
                                & USB_ENDPOINT_NUMBER_MASK);
-       maxp = usb_maxpacket (dev->udev, pipe, 0);
+       maxp = usb_maxpacket(dev->udev, pipe);
 
        /* avoid 1 msec chatter:  min 8 msec poll rate */
        period = max ((int) dev->status->desc.bInterval,
@@ -1789,7 +1789,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 
        if (!dev->rx_urb_size)
                dev->rx_urb_size = dev->hard_mtu;
-       dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+       dev->maxpacket = usb_maxpacket(dev->udev, dev->out);
        if (dev->maxpacket == 0) {
                /* that is a broken device */
                status = -ENODEV;
index 6fc69c4..bd50f52 100644 (file)
@@ -1090,7 +1090,7 @@ struct iwl_causes_list {
        u8 addr;
 };
 
-#define CAUSE(reg, mask)                                               \
+#define IWL_CAUSE(reg, mask)                                           \
        {                                                               \
                .mask_reg = reg,                                        \
                .bit = ilog2(mask),                                     \
@@ -1101,28 +1101,28 @@ struct iwl_causes_list {
        }
 
 static const struct iwl_causes_list causes_list_common[] = {
-       CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
-       CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
-       CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
-       CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
+       IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
+       IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
+       IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
+       IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
 };
 
 static const struct iwl_causes_list causes_list_pre_bz[] = {
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
 };
 
 static const struct iwl_causes_list causes_list_bz[] = {
-       CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
+       IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
 };
 
 static void iwl_pcie_map_list(struct iwl_trans *trans,
index 4e3de68..b0b3f59 100644 (file)
@@ -1053,7 +1053,6 @@ static int lbs_set_authtype(struct lbs_private *priv,
  */
 #define LBS_ASSOC_MAX_CMD_SIZE                     \
        (sizeof(struct cmd_ds_802_11_associate)    \
-        - 512 /* cmd_ds_802_11_associate.iebuf */ \
         + LBS_MAX_SSID_TLV_SIZE                   \
         + LBS_MAX_CHANNEL_TLV_SIZE                \
         + LBS_MAX_CF_PARAM_TLV_SIZE               \
@@ -1130,8 +1129,7 @@ static int lbs_associate(struct lbs_private *priv,
        if (sme->ie && sme->ie_len)
                pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len);
 
-       len = (sizeof(*cmd) - sizeof(cmd->iebuf)) +
-               (u16)(pos - (u8 *) &cmd->iebuf);
+       len = sizeof(*cmd) + (u16)(pos - (u8 *) &cmd->iebuf);
        cmd->hdr.size = cpu_to_le16(len);
 
        lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_CMD", (u8 *) cmd,
index ceff4b9..a202b71 100644 (file)
@@ -528,7 +528,8 @@ struct cmd_ds_802_11_associate {
        __le16 listeninterval;
        __le16 bcnperiod;
        u8 dtimperiod;
-       u8 iebuf[512];    /* Enough for required and most optional IEs */
+       /* 512 permitted - enough for required and most optional IEs */
+       u8 iebuf[];
 } __packed;
 
 struct cmd_ds_802_11_associate_response {
@@ -537,7 +538,8 @@ struct cmd_ds_802_11_associate_response {
        __le16 capability;
        __le16 statuscode;
        __le16 aid;
-       u8 iebuf[512];
+       /* max 512 */
+       u8 iebuf[];
 } __packed;
 
 struct cmd_ds_802_11_set_wep {
index a85e192..1bb92ca 100644 (file)
@@ -1068,7 +1068,7 @@ int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
 
        INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
 
-       usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+       usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
        if (usb->data_len < 32)
                usb->data_len = 32;
 
index 74c3d8c..0827bc8 100644 (file)
@@ -586,10 +586,10 @@ static void rt2x00usb_assign_endpoint(struct data_queue *queue,
 
        if (queue->qid == QID_RX) {
                pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
-               queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
+               queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
        } else {
                pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
-               queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
+               queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe);
        }
 
        if (!queue->usb_maxpacket)
index 090610e..c3ae631 100644 (file)
@@ -1602,6 +1602,16 @@ free:
        return ret;
 }
 
+void rtw_fw_update_beacon_work(struct work_struct *work)
+{
+       struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+                                             update_beacon_work);
+
+       mutex_lock(&rtwdev->mutex);
+       rtw_fw_download_rsvd_page(rtwdev);
+       mutex_unlock(&rtwdev->mutex);
+}
+
 static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
                                  u32 *buf, u32 residue, u16 start_pg)
 {
index 734113f..7a37675 100644 (file)
@@ -809,6 +809,7 @@ void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
 void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
                           struct rtw_vif *rtwvif);
 int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev);
+void rtw_fw_update_beacon_work(struct work_struct *work);
 void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
 int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
                           u32 offset, u32 size, u32 *buf);
index 30903c5..4310362 100644 (file)
@@ -493,9 +493,7 @@ static int rtw_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
 {
        struct rtw_dev *rtwdev = hw->priv;
 
-       mutex_lock(&rtwdev->mutex);
-       rtw_fw_download_rsvd_page(rtwdev);
-       mutex_unlock(&rtwdev->mutex);
+       ieee80211_queue_work(hw, &rtwdev->update_beacon_work);
 
        return 0;
 }
index 14289f8..efabd5b 100644 (file)
@@ -1442,6 +1442,7 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
        mutex_unlock(&rtwdev->mutex);
 
        cancel_work_sync(&rtwdev->c2h_work);
+       cancel_work_sync(&rtwdev->update_beacon_work);
        cancel_delayed_work_sync(&rtwdev->watch_dog_work);
        cancel_delayed_work_sync(&coex->bt_relink_work);
        cancel_delayed_work_sync(&coex->bt_reenable_work);
@@ -1998,6 +1999,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
        INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
        INIT_WORK(&rtwdev->ips_work, rtw_ips_work);
        INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
+       INIT_WORK(&rtwdev->update_beacon_work, rtw_fw_update_beacon_work);
        INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
        skb_queue_head_init(&rtwdev->c2h_queue);
        skb_queue_head_init(&rtwdev->coex.queue);
index 0baaf5a..c02be4a 100644 (file)
@@ -2008,6 +2008,7 @@ struct rtw_dev {
        struct work_struct c2h_work;
        struct work_struct ips_work;
        struct work_struct fw_recovery_work;
+       struct work_struct update_beacon_work;
 
        /* used to protect txqs list */
        spinlock_t txq_lock;
index 2b92c22..d35dd94 100644 (file)
@@ -280,7 +280,7 @@ int wfx_hif_stop_scan(struct wfx_vif *wvif)
 }
 
 int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
-                struct ieee80211_channel *channel, const u8 *ssid, int ssidlen)
+                struct ieee80211_channel *channel, const u8 *ssid, int ssid_len)
 {
        int ret;
        struct wfx_hif_msg *hif;
@@ -288,8 +288,8 @@ int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
 
        WARN_ON(!conf->beacon_int);
        WARN_ON(!conf->basic_rates);
-       WARN_ON(sizeof(body->ssid) < ssidlen);
-       WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS");
+       WARN_ON(sizeof(body->ssid) < ssid_len);
+       WARN(!conf->ibss_joined && !ssid_len, "joining an unknown BSS");
        if (!hif)
                return -ENOMEM;
        body->infrastructure_bss_mode = !conf->ibss_joined;
@@ -300,8 +300,8 @@ int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
        body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
        memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
        if (ssid) {
-               body->ssid_length = cpu_to_le32(ssidlen);
-               memcpy(body->ssid, ssid, ssidlen);
+               body->ssid_length = cpu_to_le32(ssid_len);
+               memcpy(body->ssid, ssid, ssid_len);
        }
        wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
        ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
index bbfd3fa..e015bfb 100644 (file)
@@ -170,7 +170,7 @@ bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
  *
  * The PDS file is an array of Time-Length-Value structs.
  */
- int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
 {
        int ret, chunk_type, chunk_len, chunk_num = 0;
 
index e551fa2..329d7f4 100644 (file)
@@ -409,8 +409,8 @@ static void wfx_join(struct wfx_vif *wvif)
        struct ieee80211_bss_conf *conf = &vif->bss_conf;
        struct cfg80211_bss *bss = NULL;
        u8 ssid[IEEE80211_MAX_SSID_LEN];
-       const u8 *ssidie = NULL;
-       int ssidlen = 0;
+       const u8 *ssid_ie = NULL;
+       int ssid_len = 0;
        int ret;
 
        wfx_tx_lock_flush(wvif->wdev);
@@ -422,21 +422,21 @@ static void wfx_join(struct wfx_vif *wvif)
                return;
        }
 
-       rcu_read_lock(); /* protect ssidie */
+       rcu_read_lock(); /* protect ssid_ie */
        if (bss)
-               ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
-       if (ssidie) {
-               ssidlen = ssidie[1];
-               if (ssidlen > IEEE80211_MAX_SSID_LEN)
-                       ssidlen = IEEE80211_MAX_SSID_LEN;
-               memcpy(ssid, &ssidie[2], ssidlen);
+               ssid_ie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+       if (ssid_ie) {
+               ssid_len = ssid_ie[1];
+               if (ssid_len > IEEE80211_MAX_SSID_LEN)
+                       ssid_len = IEEE80211_MAX_SSID_LEN;
+               memcpy(ssid, &ssid_ie[2], ssid_len);
        }
        rcu_read_unlock();
 
        cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
 
        wvif->join_in_progress = true;
-       ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
+       ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssid_len);
        if (ret) {
                ieee80211_connection_loss(vif);
                wfx_reset(wvif);
index 0f7fd15..d93814c 100644 (file)
@@ -828,7 +828,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        break;
                }
 
-               work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+               work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
                if (!work_to_do)
                        break;
 
index 65ab907..8c0b954 100644 (file)
@@ -1386,7 +1386,7 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
                queue->tx_skbs[i] = NULL;
                get_page(queue->grant_tx_page[i]);
                gnttab_end_foreign_access(queue->grant_tx_ref[i],
-                                         (unsigned long)page_address(queue->grant_tx_page[i]));
+                                         queue->grant_tx_page[i]);
                queue->grant_tx_page[i] = NULL;
                queue->grant_tx_ref[i] = INVALID_GRANT_REF;
                add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
@@ -1418,8 +1418,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
                 * foreign access is ended (which may be deferred).
                 */
                get_page(page);
-               gnttab_end_foreign_access(ref,
-                                         (unsigned long)page_address(page));
+               gnttab_end_foreign_access(ref, page);
                queue->grant_rx_ref[id] = INVALID_GRANT_REF;
 
                kfree_skb(skb);
@@ -1760,7 +1759,7 @@ static void xennet_end_access(int ref, void *page)
 {
        /* This frees the page as a side-effect */
        if (ref != INVALID_GRANT_REF)
-               gnttab_end_foreign_access(ref, (unsigned long)page);
+               gnttab_end_foreign_access(ref, virt_to_page(page));
 }
 
 static void xennet_disconnect_backend(struct netfront_info *info)
index 72f7c95..24165da 100644 (file)
@@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work)
        nvme_init_request(rq, &ctrl->ka_cmd);
 
        rq->timeout = ctrl->kato * HZ;
+       rq->end_io = nvme_keep_alive_end_io;
        rq->end_io_data = ctrl;
        rq->rq_flags |= RQF_QUIET;
-       blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
+       blk_execute_rq_nowait(rq, false);
 }
 
 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -2227,8 +2228,16 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
        ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
        ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
        ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
-       ctrl->ctrl_config |= NVME_CC_ENABLE;
+       ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+       if (ret)
+               return ret;
 
+       /* Flush write to device (required if transport is PCI) */
+       ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
+       if (ret)
+               return ret;
+
+       ctrl->ctrl_config |= NVME_CC_ENABLE;
        ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
        if (ret)
                return ret;
index 7ae72c7..3c778bb 100644 (file)
@@ -1899,6 +1899,24 @@ nvme_fc_ctrl_ioerr_work(struct work_struct *work)
        nvme_fc_error_recovery(ctrl, "transport detected io error");
 }
 
+/*
+ * nvme_fc_io_getuuid - Routine called to get the appid field
+ * associated with request by the lldd
+ * @req:IO request from nvme fc to driver
+ * Returns: UUID if there is an appid associated with VM or
+ * NULL if the user/libvirt has not set the appid to VM
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
+{
+       struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+       struct request *rq = op->rq;
+
+       if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
+               return NULL;
+       return blkcg_get_fc_appid(rq->bio);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
+
 static void
 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 {
index 096b1b4..a2e89db 100644 (file)
@@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
                        blk_flags);
        if (IS_ERR(req))
                return PTR_ERR(req);
+       req->end_io = nvme_uring_cmd_end_io;
        req->end_io_data = ioucmd;
 
        /* to free bio on completion, as req->bio will be null at that time */
@@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
        pdu->meta_len = d.metadata_len;
 
-       blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
+       blk_execute_rq_nowait(req, false);
        return -EIOCBQUEUED;
 }
 
index 5a98a7d..48f4f6e 100644 (file)
@@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        }
        nvme_init_request(abort_req, &cmd);
 
+       abort_req->end_io = abort_endio;
        abort_req->end_io_data = NULL;
        abort_req->rq_flags |= RQF_QUIET;
-       blk_execute_rq_nowait(abort_req, false, abort_endio);
+       blk_execute_rq_nowait(abort_req, false);
 
        /*
         * The aborted req will be completed on receiving the abort req.
@@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
                return PTR_ERR(req);
        nvme_init_request(req, &cmd);
 
+       if (opcode == nvme_admin_delete_cq)
+               req->end_io = nvme_del_cq_end;
+       else
+               req->end_io = nvme_del_queue_end;
        req->end_io_data = nvmeq;
 
        init_completion(&nvmeq->delete_done);
        req->rq_flags |= RQF_QUIET;
-       blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
-                       nvme_del_cq_end : nvme_del_queue_end);
+       blk_execute_rq_nowait(req, false);
        return 0;
 }
 
@@ -3453,6 +3457,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
index 5247c24..b1f7efa 100644 (file)
@@ -97,7 +97,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
                id->sgls |= cpu_to_le32(1 << 20);
 
        /*
-        * When passsthru controller is setup using nvme-loop transport it will
+        * When passthru controller is setup using nvme-loop transport it will
         * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
         * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
         * code path with duplicate ctr subsynqn. In order to prevent that we
@@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
                req->p.rq = rq;
                queue_work(nvmet_wq, &req->p.work);
        } else {
+               rq->end_io = nvmet_passthru_req_done;
                rq->end_io_data = req;
-               blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
+               blk_execute_rq_nowait(rq, false);
        }
 
        if (ns)
index 555aa77..967d008 100644 (file)
@@ -304,6 +304,7 @@ config NVMEM_LAYERSCAPE_SFP
        tristate "Layerscape SFP (Security Fuse Processor) support"
        depends on ARCH_LAYERSCAPE || COMPILE_TEST
        depends on HAS_IOMEM
+       select REGMAP_MMIO
        help
          This driver provides support to read the eFuses on Freescale
          Layerscape SoC's. For example, the vendor provides a per part
@@ -324,4 +325,16 @@ config NVMEM_SUNPLUS_OCOTP
          This driver can also be built as a module. If so, the module
          will be called nvmem-sunplus-ocotp.
 
+config NVMEM_APPLE_EFUSES
+       tristate "Apple eFuse support"
+       depends on ARCH_APPLE || COMPILE_TEST
+       default ARCH_APPLE
+       help
+         Say y here to enable support for reading eFuses on Apple SoCs
+         such as the M1. These are e.g. used to store factory programmed
+         calibration data required for the PCIe or the USB-C PHY.
+
+         This driver can also be built as a module. If so, the module will
+         be called nvmem-apple-efuses.
+
 endif
index 891958e..00e136a 100644 (file)
@@ -65,3 +65,5 @@ obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP)    += nvmem-layerscape-sfp.o
 nvmem-layerscape-sfp-y         := layerscape-sfp.o
 obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP)      += nvmem_sunplus_ocotp.o
 nvmem_sunplus_ocotp-y          := sunplus-ocotp.o
+obj-$(CONFIG_NVMEM_APPLE_EFUSES)       += nvmem-apple-efuses.o
+nvmem-apple-efuses-y           := apple-efuses.o
diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c
new file mode 100644 (file)
index 0000000..9b7c871
--- /dev/null
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple SoC eFuse driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct apple_efuses_priv {
+       void __iomem *fuses;
+};
+
+static int apple_efuses_read(void *context, unsigned int offset, void *val,
+                            size_t bytes)
+{
+       struct apple_efuses_priv *priv = context;
+       u32 *dst = val;
+
+       while (bytes >= sizeof(u32)) {
+               *dst++ = readl_relaxed(priv->fuses + offset);
+               bytes -= sizeof(u32);
+               offset += sizeof(u32);
+       }
+
+       return 0;
+}
+
+static int apple_efuses_probe(struct platform_device *pdev)
+{
+       struct apple_efuses_priv *priv;
+       struct resource *res;
+       struct nvmem_config config = {
+               .dev = &pdev->dev,
+               .read_only = true,
+               .reg_read = apple_efuses_read,
+               .stride = sizeof(u32),
+               .word_size = sizeof(u32),
+               .name = "apple_efuses_nvmem",
+               .id = NVMEM_DEVID_AUTO,
+               .root_only = true,
+       };
+
+       priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+       if (IS_ERR(priv->fuses))
+               return PTR_ERR(priv->fuses);
+
+       config.priv = priv;
+       config.size = resource_size(res);
+
+       return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config));
+}
+
+static const struct of_device_id apple_efuses_of_match[] = {
+       { .compatible = "apple,efuses", },
+       {}
+};
+
+MODULE_DEVICE_TABLE(of, apple_efuses_of_match);
+
+static struct platform_driver apple_efuses_driver = {
+       .driver = {
+               .name = "apple_efuses",
+               .of_match_table = apple_efuses_of_match,
+       },
+       .probe = apple_efuses_probe,
+};
+
+module_platform_driver(apple_efuses_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
index a809751..dfea96c 100644 (file)
@@ -244,7 +244,7 @@ static const struct of_device_id bcm_otpc_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
 
-static const struct acpi_device_id bcm_otpc_acpi_ids[] = {
+static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = {
        { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
        { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
        { /* sentinel */ }
index 439f00b..450b927 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/nvmem-consumer.h>
 #include <linux/nvmem-provider.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -72,6 +73,7 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
                        return -ENOMEM;
                priv->cells[idx].offset = value - (char *)data;
                priv->cells[idx].bytes = strlen(value);
+               priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
        }
 
        return 0;
index f58d9bc..1e3c754 100644 (file)
@@ -467,6 +467,7 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
 
        cell->bit_offset = info->bit_offset;
        cell->nbits = info->nbits;
+       cell->np = info->np;
 
        if (cell->nbits)
                cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
index e591c15..e2b4245 100644 (file)
 #include <linux/nvmem-provider.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
+#include <linux/regmap.h>
 
 #define LAYERSCAPE_SFP_OTP_OFFSET      0x0200
 
 struct layerscape_sfp_priv {
-       void __iomem *base;
+       struct regmap *regmap;
 };
 
 struct layerscape_sfp_data {
        int size;
+       enum regmap_endian endian;
 };
 
 static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
@@ -29,15 +31,16 @@ static int layerscape_sfp_read(void *context, unsigned int offset, void *val,
 {
        struct layerscape_sfp_priv *priv = context;
 
-       memcpy_fromio(val, priv->base + LAYERSCAPE_SFP_OTP_OFFSET + offset,
-                     bytes);
-
-       return 0;
+       return regmap_bulk_read(priv->regmap,
+                               LAYERSCAPE_SFP_OTP_OFFSET + offset, val,
+                               bytes / 4);
 }
 
 static struct nvmem_config layerscape_sfp_nvmem_config = {
        .name = "fsl-sfp",
        .reg_read = layerscape_sfp_read,
+       .word_size = 4,
+       .stride = 4,
 };
 
 static int layerscape_sfp_probe(struct platform_device *pdev)
@@ -45,16 +48,26 @@ static int layerscape_sfp_probe(struct platform_device *pdev)
        const struct layerscape_sfp_data *data;
        struct layerscape_sfp_priv *priv;
        struct nvmem_device *nvmem;
+       struct regmap_config config = { 0 };
+       void __iomem *base;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
-       priv->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(priv->base))
-               return PTR_ERR(priv->base);
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        data = device_get_match_data(&pdev->dev);
+       config.reg_bits = 32;
+       config.reg_stride = 4;
+       config.val_bits = 32;
+       config.val_format_endian = data->endian;
+       config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4;
+       priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config);
+       if (IS_ERR(priv->regmap))
+               return PTR_ERR(priv->regmap);
 
        layerscape_sfp_nvmem_config.size = data->size;
        layerscape_sfp_nvmem_config.dev = &pdev->dev;
@@ -65,11 +78,18 @@ static int layerscape_sfp_probe(struct platform_device *pdev)
        return PTR_ERR_OR_ZERO(nvmem);
 }
 
+static const struct layerscape_sfp_data ls1021a_data = {
+       .size = 0x88,
+       .endian = REGMAP_ENDIAN_BIG,
+};
+
 static const struct layerscape_sfp_data ls1028a_data = {
        .size = 0x88,
+       .endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static const struct of_device_id layerscape_sfp_dt_ids[] = {
+       { .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data },
        { .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data },
        {},
 };
index 162132c..c1e893c 100644 (file)
@@ -217,9 +217,8 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
                goto err_clk_rate_set;
        }
 
-       ret = pm_runtime_get_sync(priv->dev);
+       ret = pm_runtime_resume_and_get(priv->dev);
        if (ret < 0) {
-               pm_runtime_put_noidle(priv->dev);
                dev_err(priv->dev, "Failed to enable power-domain\n");
                goto err_reg_enable;
        }
index 2dc59c2..52b928a 100644 (file)
@@ -71,7 +71,7 @@ struct sp_ocotp_data {
        int size;
 };
 
-const struct sp_ocotp_data  sp_otp_v0 = {
+static const struct sp_ocotp_data sp_otp_v0 = {
        .size = QAC628_OTP_SIZE,
 };
 
@@ -202,8 +202,6 @@ static int sp_ocotp_probe(struct platform_device *pdev)
                (int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK,
                (int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
 
-       dev_info(dev, "by Sunplus (C) 2020");
-
        return 0;
 }
 
index 7404072..84063ea 100644 (file)
@@ -456,103 +456,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
 
-/**
- * dev_pm_opp_find_level_exact() - search for an exact level
- * @dev:               device for which we do this operation
- * @level:             level to search for
- *
- * Return: Searches for exact match in the opp table and returns pointer to the
- * matching opp if found, else returns ERR_PTR in case of error and should
- * be handled using IS_ERR. Error return values can be:
- * EINVAL:     for bad pointer
- * ERANGE:     no match found for search
- * ENODEV:     if device not found in list of registered devices
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
-                                              unsigned int level)
-{
-       struct opp_table *opp_table;
-       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
-       opp_table = _find_opp_table(dev);
-       if (IS_ERR(opp_table)) {
-               int r = PTR_ERR(opp_table);
-
-               dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
-               return ERR_PTR(r);
-       }
-
-       mutex_lock(&opp_table->lock);
-
-       list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
-               if (temp_opp->level == level) {
-                       opp = temp_opp;
-
-                       /* Increment the reference count of OPP */
-                       dev_pm_opp_get(opp);
-                       break;
-               }
-       }
-
-       mutex_unlock(&opp_table->lock);
-       dev_pm_opp_put_opp_table(opp_table);
-
-       return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
-
-/**
- * dev_pm_opp_find_level_ceil() - search for an rounded up level
- * @dev:               device for which we do this operation
- * @level:             level to search for
- *
- * Return: Searches for rounded up match in the opp table and returns pointer
- * to the  matching opp if found, else returns ERR_PTR in case of error and
- * should be handled using IS_ERR. Error return values can be:
- * EINVAL:     for bad pointer
- * ERANGE:     no match found for search
- * ENODEV:     if device not found in list of registered devices
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
-                                             unsigned int *level)
-{
-       struct opp_table *opp_table;
-       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
-       opp_table = _find_opp_table(dev);
-       if (IS_ERR(opp_table)) {
-               int r = PTR_ERR(opp_table);
-
-               dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
-               return ERR_PTR(r);
-       }
-
-       mutex_lock(&opp_table->lock);
-
-       list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
-               if (temp_opp->available && temp_opp->level >= *level) {
-                       opp = temp_opp;
-                       *level = opp->level;
-
-                       /* Increment the reference count of OPP */
-                       dev_pm_opp_get(opp);
-                       break;
-               }
-       }
-
-       mutex_unlock(&opp_table->lock);
-       dev_pm_opp_put_opp_table(opp_table);
-
-       return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
-
 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
                                                   unsigned long *freq)
 {
@@ -729,6 +632,223 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
 
+/**
+ * dev_pm_opp_find_level_exact() - search for an exact level
+ * @dev:               device for which we do this operation
+ * @level:             level to search for
+ *
+ * Return: Searches for exact match in the opp table and returns pointer to the
+ * matching opp if found, else returns ERR_PTR in case of error and should
+ * be handled using IS_ERR. Error return values can be:
+ * EINVAL:     for bad pointer
+ * ERANGE:     no match found for search
+ * ENODEV:     if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+                                              unsigned int level)
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               int r = PTR_ERR(opp_table);
+
+               dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+               return ERR_PTR(r);
+       }
+
+       mutex_lock(&opp_table->lock);
+
+       list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+               if (temp_opp->level == level) {
+                       opp = temp_opp;
+
+                       /* Increment the reference count of OPP */
+                       dev_pm_opp_get(opp);
+                       break;
+               }
+       }
+
+       mutex_unlock(&opp_table->lock);
+       dev_pm_opp_put_opp_table(opp_table);
+
+       return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
+
+/**
+ * dev_pm_opp_find_level_ceil() - search for an rounded up level
+ * @dev:               device for which we do this operation
+ * @level:             level to search for
+ *
+ * Return: Searches for rounded up match in the opp table and returns pointer
+ * to the  matching opp if found, else returns ERR_PTR in case of error and
+ * should be handled using IS_ERR. Error return values can be:
+ * EINVAL:     for bad pointer
+ * ERANGE:     no match found for search
+ * ENODEV:     if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+                                             unsigned int *level)
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table)) {
+               int r = PTR_ERR(opp_table);
+
+               dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+               return ERR_PTR(r);
+       }
+
+       mutex_lock(&opp_table->lock);
+
+       list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+               if (temp_opp->available && temp_opp->level >= *level) {
+                       opp = temp_opp;
+                       *level = opp->level;
+
+                       /* Increment the reference count of OPP */
+                       dev_pm_opp_get(opp);
+                       break;
+               }
+       }
+
+       mutex_unlock(&opp_table->lock);
+       dev_pm_opp_put_opp_table(opp_table);
+
+       return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
+
+/**
+ * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
+ * @dev:       device for which we do this operation
+ * @freq:      start bandwidth
+ * @index:     which bandwidth to compare, in case of OPPs with several values
+ *
+ * Search for the matching floor *available* OPP from a starting bandwidth
+ * for a device.
+ *
+ * Return: matching *opp and refreshes *bw accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL:     for bad pointer
+ * ERANGE:     no match found for search
+ * ENODEV:     if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+                                          unsigned int *bw, int index)
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+       if (!dev || !bw) {
+               dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
+               return ERR_PTR(-EINVAL);
+       }
+
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table))
+               return ERR_CAST(opp_table);
+
+       if (index >= opp_table->path_count)
+               return ERR_PTR(-EINVAL);
+
+       mutex_lock(&opp_table->lock);
+
+       list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+               if (temp_opp->available && temp_opp->bandwidth) {
+                       if (temp_opp->bandwidth[index].peak >= *bw) {
+                               opp = temp_opp;
+                               *bw = opp->bandwidth[index].peak;
+
+                               /* Increment the reference count of OPP */
+                               dev_pm_opp_get(opp);
+                               break;
+                       }
+               }
+       }
+
+       mutex_unlock(&opp_table->lock);
+       dev_pm_opp_put_opp_table(opp_table);
+
+       return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
+
+/**
+ * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
+ * @dev:       device for which we do this operation
+ * @freq:      start bandwidth
+ * @index:     which bandwidth to compare, in case of OPPs with several values
+ *
+ * Search for the matching floor *available* OPP from a starting bandwidth
+ * for a device.
+ *
+ * Return: matching *opp and refreshes *bw accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL:     for bad pointer
+ * ERANGE:     no match found for search
+ * ENODEV:     if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+                                           unsigned int *bw, int index)
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+       if (!dev || !bw) {
+               dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
+               return ERR_PTR(-EINVAL);
+       }
+
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table))
+               return ERR_CAST(opp_table);
+
+       if (index >= opp_table->path_count)
+               return ERR_PTR(-EINVAL);
+
+       mutex_lock(&opp_table->lock);
+
+       list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+               if (temp_opp->available && temp_opp->bandwidth) {
+                       /* go to the next node, before choosing prev */
+                       if (temp_opp->bandwidth[index].peak > *bw)
+                               break;
+                       opp = temp_opp;
+               }
+       }
+
+       /* Increment the reference count of OPP */
+       if (!IS_ERR(opp))
+               dev_pm_opp_get(opp);
+       mutex_unlock(&opp_table->lock);
+       dev_pm_opp_put_opp_table(opp_table);
+
+       if (!IS_ERR(opp))
+               *bw = opp->bandwidth[index].peak;
+
+       return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
+
 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
                            struct dev_pm_opp_supply *supply)
 {
@@ -1486,9 +1606,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put);
  */
 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 {
-       struct dev_pm_opp *opp;
+       struct dev_pm_opp *opp = NULL, *iter;
        struct opp_table *opp_table;
-       bool found = false;
 
        opp_table = _find_opp_table(dev);
        if (IS_ERR(opp_table))
@@ -1496,16 +1615,16 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 
        mutex_lock(&opp_table->lock);
 
-       list_for_each_entry(opp, &opp_table->opp_list, node) {
-               if (opp->rate == freq) {
-                       found = true;
+       list_for_each_entry(iter, &opp_table->opp_list, node) {
+               if (iter->rate == freq) {
+                       opp = iter;
                        break;
                }
        }
 
        mutex_unlock(&opp_table->lock);
 
-       if (found) {
+       if (opp) {
                dev_pm_opp_put(opp);
 
                /* Drop the reference taken by dev_pm_opp_add() */
@@ -2019,10 +2138,9 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
        for (i = 0; i < count; i++) {
                reg = regulator_get_optional(dev, names[i]);
                if (IS_ERR(reg)) {
-                       ret = PTR_ERR(reg);
-                       if (ret != -EPROBE_DEFER)
-                               dev_err(dev, "%s: no regulator (%s) found: %d\n",
-                                       __func__, names[i], ret);
+                       ret = dev_err_probe(dev, PTR_ERR(reg),
+                                           "%s: no regulator (%s) found\n",
+                                           __func__, names[i]);
                        goto free_regulators;
                }
 
@@ -2168,11 +2286,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
        /* Find clk for the device */
        opp_table->clk = clk_get(dev, name);
        if (IS_ERR(opp_table->clk)) {
-               ret = PTR_ERR(opp_table->clk);
-               if (ret != -EPROBE_DEFER) {
-                       dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
-                               ret);
-               }
+               ret = dev_err_probe(dev, PTR_ERR(opp_table->clk),
+                                   "%s: Couldn't find clock\n", __func__);
                goto err;
        }
 
index 3fcc1f9..1b6e5c5 100644 (file)
@@ -195,14 +195,18 @@ void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
 static void opp_migrate_dentry(struct opp_device *opp_dev,
                               struct opp_table *opp_table)
 {
-       struct opp_device *new_dev;
+       struct opp_device *new_dev = NULL, *iter;
        const struct device *dev;
        struct dentry *dentry;
 
        /* Look for next opp-dev */
-       list_for_each_entry(new_dev, &opp_table->dev_list, node)
-               if (new_dev != opp_dev)
+       list_for_each_entry(iter, &opp_table->dev_list, node)
+               if (iter != opp_dev) {
+                       new_dev = iter;
                        break;
+               }
+
+       BUG_ON(!new_dev);
 
        /* new_dev is guaranteed to be valid here */
        dev = new_dev->dev;
index 485ea98..3039492 100644 (file)
@@ -437,11 +437,11 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
 
        /* Checking only first OPP is sufficient */
        np = of_get_next_available_child(opp_np, NULL);
+       of_node_put(opp_np);
        if (!np) {
                dev_err(dev, "OPP table empty\n");
                return -EINVAL;
        }
-       of_node_put(opp_np);
 
        prop = of_find_property(np, "opp-peak-kBps", NULL);
        of_node_put(np);
index 375c0c4..e61058e 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pci.h>
 #include <linux/pci-ecam.h>
 #include <linux/printk.h>
-#include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
@@ -196,8 +195,6 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie,
 static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
 static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
 static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
-static int brcm_pcie_linkup(struct brcm_pcie *pcie);
-static int brcm_pcie_add_bus(struct pci_bus *bus);
 
 enum {
        RGR1_SW_INIT_1,
@@ -286,14 +283,6 @@ static const struct pcie_cfg_data bcm2711_cfg = {
        .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
 };
 
-struct subdev_regulators {
-       unsigned int num_supplies;
-       struct regulator_bulk_data supplies[];
-};
-
-static int pci_subdev_regulators_add_bus(struct pci_bus *bus);
-static void pci_subdev_regulators_remove_bus(struct pci_bus *bus);
-
 struct brcm_msi {
        struct device           *dev;
        void __iomem            *base;
@@ -331,9 +320,6 @@ struct brcm_pcie {
        u32                     hw_rev;
        void                    (*perst_set)(struct brcm_pcie *pcie, u32 val);
        void                    (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
-       bool                    refusal_mode;
-       struct subdev_regulators *sr;
-       bool                    ep_wakeup_capable;
 };
 
 static inline bool is_bmips(const struct brcm_pcie *pcie)
@@ -450,99 +436,6 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
        return ssc && pll ? 0 : -EIO;
 }
 
-static void *alloc_subdev_regulators(struct device *dev)
-{
-       static const char * const supplies[] = {
-               "vpcie3v3",
-               "vpcie3v3aux",
-               "vpcie12v",
-       };
-       const size_t size = sizeof(struct subdev_regulators)
-               + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
-       struct subdev_regulators *sr;
-       int i;
-
-       sr = devm_kzalloc(dev, size, GFP_KERNEL);
-       if (sr) {
-               sr->num_supplies = ARRAY_SIZE(supplies);
-               for (i = 0; i < ARRAY_SIZE(supplies); i++)
-                       sr->supplies[i].supply = supplies[i];
-       }
-
-       return sr;
-}
-
-static int pci_subdev_regulators_add_bus(struct pci_bus *bus)
-{
-       struct device *dev = &bus->dev;
-       struct subdev_regulators *sr;
-       int ret;
-
-       if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
-               return 0;
-
-       if (dev->driver_data)
-               dev_err(dev, "dev.driver_data unexpectedly non-NULL\n");
-
-       sr = alloc_subdev_regulators(dev);
-       if (!sr)
-               return -ENOMEM;
-
-       dev->driver_data = sr;
-       ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
-       if (ret)
-               return ret;
-
-       ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
-       if (ret) {
-               dev_err(dev, "failed to enable regulators for downstream device\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-static int brcm_pcie_add_bus(struct pci_bus *bus)
-{
-       struct device *dev = &bus->dev;
-       struct brcm_pcie *pcie = (struct brcm_pcie *) bus->sysdata;
-       int ret;
-
-       if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
-               return 0;
-
-       ret = pci_subdev_regulators_add_bus(bus);
-       if (ret)
-               return ret;
-
-       /* Grab the regulators for suspend/resume */
-       pcie->sr = bus->dev.driver_data;
-
-       /*
-        * If we have failed linkup there is no point to return an error as
-        * currently it will cause a WARNING() from pci_alloc_child_bus().
-        * We return 0 and turn on the "refusal_mode" so that any further
-        * accesses to the pci_dev just get 0xffffffff
-        */
-       if (brcm_pcie_linkup(pcie) != 0)
-               pcie->refusal_mode = true;
-
-       return 0;
-}
-
-static void pci_subdev_regulators_remove_bus(struct pci_bus *bus)
-{
-       struct device *dev = &bus->dev;
-       struct subdev_regulators *sr = dev->driver_data;
-
-       if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
-               return;
-
-       if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
-               dev_err(dev, "failed to disable regulators for downstream device\n");
-       dev->driver_data = NULL;
-}
-
 /* Limits operation to a specific generation (1, 2, or 3) */
 static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
 {
@@ -858,18 +751,6 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
        /* Accesses to the RC go right to the RC registers if slot==0 */
        if (pci_is_root_bus(bus))
                return PCI_SLOT(devfn) ? NULL : base + where;
-       if (pcie->refusal_mode) {
-               /*
-                * At this point we do not have link.  There will be a CPU
-                * abort -- a quirk with this controller --if Linux tries
-                * to read any config-space registers besides those
-                * targeting the host bridge.  To prevent this we hijack
-                * the address to point to a safe access that will return
-                * 0xffffffff.
-                */
-               writel(0xffffffff, base + PCIE_MISC_RC_BAR2_CONFIG_HI);
-               return base + PCIE_MISC_RC_BAR2_CONFIG_HI + (where & 0x3);
-       }
 
        /* For devices, write to the config space index register */
        idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
@@ -898,8 +779,6 @@ static struct pci_ops brcm_pcie_ops = {
        .map_bus = brcm_pcie_map_conf,
        .read = pci_generic_config_read,
        .write = pci_generic_config_write,
-       .add_bus = brcm_pcie_add_bus,
-       .remove_bus = pci_subdev_regulators_remove_bus,
 };
 
 static struct pci_ops brcm_pcie_ops32 = {
@@ -1047,9 +926,16 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
 
 static int brcm_pcie_setup(struct brcm_pcie *pcie)
 {
+       struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
        u64 rc_bar2_offset, rc_bar2_size;
        void __iomem *base = pcie->base;
-       int ret, memc;
+       struct device *dev = pcie->dev;
+       struct resource_entry *entry;
+       bool ssc_good = false;
+       struct resource *res;
+       int num_out_wins = 0;
+       u16 nlw, cls, lnksta;
+       int i, ret, memc;
        u32 tmp, burst, aspm_support;
 
        /* Reset the bridge */
@@ -1139,40 +1025,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
        if (pcie->gen)
                brcm_pcie_set_gen(pcie, pcie->gen);
 
-       /* Don't advertise L0s capability if 'aspm-no-l0s' */
-       aspm_support = PCIE_LINK_STATE_L1;
-       if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
-               aspm_support |= PCIE_LINK_STATE_L0S;
-       tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
-       u32p_replace_bits(&tmp, aspm_support,
-               PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
-       writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
-
-       /*
-        * For config space accesses on the RC, show the right class for
-        * a PCIe-PCIe bridge (the default setting is to be EP mode).
-        */
-       tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
-       u32p_replace_bits(&tmp, 0x060400,
-                         PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
-       writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
-
-       return 0;
-}
-
-static int brcm_pcie_linkup(struct brcm_pcie *pcie)
-{
-       struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
-       struct device *dev = pcie->dev;
-       void __iomem *base = pcie->base;
-       struct resource_entry *entry;
-       struct resource *res;
-       int num_out_wins = 0;
-       u16 nlw, cls, lnksta;
-       bool ssc_good = false;
-       u32 tmp;
-       int ret, i;
-
        /* Unassert the fundamental reset */
        pcie->perst_set(pcie, 0);
 
@@ -1223,6 +1075,24 @@ static int brcm_pcie_linkup(struct brcm_pcie *pcie)
                num_out_wins++;
        }
 
+       /* Don't advertise L0s capability if 'aspm-no-l0s' */
+       aspm_support = PCIE_LINK_STATE_L1;
+       if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+               aspm_support |= PCIE_LINK_STATE_L0S;
+       tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+       u32p_replace_bits(&tmp, aspm_support,
+               PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+       writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
+       /*
+        * For config space accesses on the RC, show the right class for
+        * a PCIe-PCIe bridge (the default setting is to be EP mode).
+        */
+       tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+       u32p_replace_bits(&tmp, 0x060400,
+                         PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+       writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
        if (pcie->ssc) {
                ret = brcm_pcie_set_ssc(pcie);
                if (ret == 0)
@@ -1351,21 +1221,9 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
        pcie->bridge_sw_init_set(pcie, 1);
 }
 
-static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
-{
-       bool *ret = data;
-
-       if (device_may_wakeup(&dev->dev)) {
-               *ret = true;
-               dev_info(&dev->dev, "disable cancelled for wake-up device\n");
-       }
-       return (int) *ret;
-}
-
 static int brcm_pcie_suspend(struct device *dev)
 {
        struct brcm_pcie *pcie = dev_get_drvdata(dev);
-       struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
        int ret;
 
        brcm_pcie_turn_off(pcie);
@@ -1383,25 +1241,6 @@ static int brcm_pcie_suspend(struct device *dev)
                return ret;
        }
 
-       if (pcie->sr) {
-               /*
-                * Now turn off the regulators, but if at least one
-                * downstream device is enabled as a wake-up source, do not
-                * turn off regulators.
-                */
-               pcie->ep_wakeup_capable = false;
-               pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
-                            &pcie->ep_wakeup_capable);
-               if (!pcie->ep_wakeup_capable) {
-                       ret = regulator_bulk_disable(pcie->sr->num_supplies,
-                                                    pcie->sr->supplies);
-                       if (ret) {
-                               dev_err(dev, "Could not turn off regulators\n");
-                               reset_control_reset(pcie->rescal);
-                               return ret;
-                       }
-               }
-       }
        clk_disable_unprepare(pcie->clk);
 
        return 0;
@@ -1419,28 +1258,9 @@ static int brcm_pcie_resume(struct device *dev)
        if (ret)
                return ret;
 
-       if (pcie->sr) {
-               if (pcie->ep_wakeup_capable) {
-                       /*
-                        * We are resuming from a suspend.  In the suspend we
-                        * did not disable the power supplies, so there is
-                        * no need to enable them (and falsely increase their
-                        * usage count).
-                        */
-                       pcie->ep_wakeup_capable = false;
-               } else {
-                       ret = regulator_bulk_enable(pcie->sr->num_supplies,
-                                                   pcie->sr->supplies);
-                       if (ret) {
-                               dev_err(dev, "Could not turn on regulators\n");
-                               goto err_disable_clk;
-                       }
-               }
-       }
-
        ret = reset_control_reset(pcie->rescal);
        if (ret)
-               goto err_regulator;
+               goto err_disable_clk;
 
        ret = brcm_phy_start(pcie);
        if (ret)
@@ -1461,10 +1281,6 @@ static int brcm_pcie_resume(struct device *dev)
        if (ret)
                goto err_reset;
 
-       ret = brcm_pcie_linkup(pcie);
-       if (ret)
-               goto err_reset;
-
        if (pcie->msi)
                brcm_msi_set_regs(pcie->msi);
 
@@ -1472,9 +1288,6 @@ static int brcm_pcie_resume(struct device *dev)
 
 err_reset:
        reset_control_rearm(pcie->rescal);
-err_regulator:
-       if (pcie->sr)
-               regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
 err_disable_clk:
        clk_disable_unprepare(pcie->clk);
        return ret;
@@ -1606,17 +1419,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, pcie);
 
-       ret = pci_host_probe(bridge);
-       if (!ret && !brcm_pcie_link_up(pcie))
-               ret = -ENODEV;
-
-       if (ret) {
-               brcm_pcie_remove(pdev);
-               return ret;
-       }
-
-       return 0;
-
+       return pci_host_probe(bridge);
 fail:
        __brcm_pcie_remove(pcie);
        return ret;
@@ -1625,8 +1428,8 @@ fail:
 MODULE_DEVICE_TABLE(of, brcm_pcie_match);
 
 static const struct dev_pm_ops brcm_pcie_pm_ops = {
-       .suspend_noirq = brcm_pcie_suspend,
-       .resume_noirq = brcm_pcie_resume,
+       .suspend = brcm_pcie_suspend,
+       .resume = brcm_pcie_resume,
 };
 
 static struct platform_driver brcm_pcie_driver = {
index 6c1b813..196834e 100644 (file)
@@ -369,7 +369,6 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
 
        dev_dbg(dev, "Parsing dma-ranges property...\n");
        for_each_of_pci_range(&parser, &range) {
-               struct resource_entry *entry;
                /*
                 * If we failed translation or got a zero-sized region
                 * then skip this range
@@ -393,12 +392,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
                        goto failed;
                }
 
-               /* Keep the resource list sorted */
-               resource_list_for_each_entry(entry, ib_resources)
-                       if (entry->res->start > res->start)
-                               break;
-
-               pci_add_resource_offset(&entry->node, res,
+               pci_add_resource_offset(ib_resources, res,
                                        res->start - range.pci_addr);
        }
 
index 2f3b69a..49238dd 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of_device.h>
 #include <linux/acpi.h>
 #include <linux/dma-map-ops.h>
+#include <linux/iommu.h>
 #include "pci.h"
 #include "pcie/portdrv.h"
 
@@ -1620,6 +1621,7 @@ static int pci_bus_num_vf(struct device *dev)
  */
 static int pci_dma_configure(struct device *dev)
 {
+       struct pci_driver *driver = to_pci_driver(dev->driver);
        struct device *bridge;
        int ret = 0;
 
@@ -1635,9 +1637,24 @@ static int pci_dma_configure(struct device *dev)
        }
 
        pci_put_host_bridge_device(bridge);
+
+       if (!ret && !driver->driver_managed_dma) {
+               ret = iommu_device_use_default_domain(dev);
+               if (ret)
+                       arch_teardown_dma_ops(dev);
+       }
+
        return ret;
 }
 
+static void pci_dma_cleanup(struct device *dev)
+{
+       struct pci_driver *driver = to_pci_driver(dev->driver);
+
+       if (!driver->driver_managed_dma)
+               iommu_device_unuse_default_domain(dev);
+}
+
 struct bus_type pci_bus_type = {
        .name           = "pci",
        .match          = pci_bus_match,
@@ -1651,6 +1668,7 @@ struct bus_type pci_bus_type = {
        .pm             = PCI_PM_OPS_PTR,
        .num_vf         = pci_bus_num_vf,
        .dma_configure  = pci_dma_configure,
+       .dma_cleanup    = pci_dma_cleanup,
 };
 EXPORT_SYMBOL(pci_bus_type);
 
index e408099..d1f4c1c 100644 (file)
@@ -36,6 +36,7 @@ static struct pci_driver stub_driver = {
        .name           = "pci-stub",
        .id_table       = NULL, /* only dynamic id's */
        .probe          = pci_stub_probe,
+       .driver_managed_dma = true,
 };
 
 static int __init pci_stub_init(void)
index c263ffc..fc804e0 100644 (file)
@@ -567,31 +567,11 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       char *driver_override, *old, *cp;
-
-       /* We need to keep extra room for a newline */
-       if (count >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, count, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
-
-       cp = strchr(driver_override, '\n');
-       if (cp)
-               *cp = '\0';
-
-       device_lock(dev);
-       old = pdev->driver_override;
-       if (strlen(driver_override)) {
-               pdev->driver_override = driver_override;
-       } else {
-               kfree(driver_override);
-               pdev->driver_override = NULL;
-       }
-       device_unlock(dev);
+       int ret;
 
-       kfree(old);
+       ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
index 8761828..cfaf40a 100644 (file)
@@ -2967,6 +2967,8 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
                        DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
                },
+       },
+       {
                /*
                 * Downstream device is not accessible after putting a root port
                 * into D3cold and back into D0 on Elo i2.
index 4b88016..7f8788a 100644 (file)
@@ -202,6 +202,8 @@ static struct pci_driver pcie_portdriver = {
 
        .err_handler    = &pcie_portdrv_err_handler,
 
+       .driver_managed_dma = true,
+
        .driver.pm      = PCIE_PORTDRV_PM_OPS,
 };
 
index ec977f0..bf495bf 100644 (file)
@@ -151,7 +151,7 @@ config TCIC
 
 config PCMCIA_ALCHEMY_DEVBOARD
        tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
-       depends on MIPS_ALCHEMY && PCMCIA
+       depends on MIPS_DB1XXX && PCMCIA
        help
          Enable this driver of you want PCMCIA support on your Alchemy
          Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
index c43267b..c59ddde 100644 (file)
@@ -50,18 +50,5 @@ sa1100_cs-$(CONFIG_SA1100_SIMPAD)            += sa1100_simpad.o
 
 pxa2xx-obj-$(CONFIG_MACH_MAINSTONE)            += pxa2xx_mainstone.o
 pxa2xx-obj-$(CONFIG_PXA_SHARPSL)               += pxa2xx_sharpsl.o
-pxa2xx-obj-$(CONFIG_ARCOM_PCMCIA)              += pxa2xx_viper.o
-pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA)            += pxa2xx_trizeps4.o
-pxa2xx-obj-$(CONFIG_MACH_PALMTX)               += pxa2xx_palmtx.o
-pxa2xx-obj-$(CONFIG_MACH_PALMTC)               += pxa2xx_palmtc.o
-pxa2xx-obj-$(CONFIG_MACH_PALMLD)               += pxa2xx_palmld.o
-pxa2xx-obj-$(CONFIG_MACH_E740)                 += pxa2xx_e740.o
-pxa2xx-obj-$(CONFIG_MACH_VPAC270)              += pxa2xx_vpac270.o
-pxa2xx-obj-$(CONFIG_MACH_BALLOON3)             += pxa2xx_balloon3.o
-pxa2xx-obj-$(CONFIG_MACH_COLIBRI)              += pxa2xx_colibri.o
-pxa2xx-obj-$(CONFIG_MACH_COLIBRI320)           += pxa2xx_colibri.o
-pxa2xx-obj-$(CONFIG_MACH_H4700)                        += pxa2xx_hx4700.o
-
 obj-$(CONFIG_PCMCIA_PXA2XX)                    += pxa2xx_base.o $(pxa2xx-obj-y)
-
 obj-$(CONFIG_PCMCIA_XXS1500)                   += xxs1500_ss.o
index 16f5731..bb06311 100644 (file)
@@ -327,10 +327,11 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
 {
        struct bcm63xx_pcmcia_socket *skt;
        struct pcmcia_socket *sock;
-       struct resource *res, *irq_res;
+       struct resource *res;
        unsigned int regmem_size = 0, iomem_size = 0;
        u32 val;
        int ret;
+       int irq;
 
        skt = kzalloc(sizeof(*skt), GFP_KERNEL);
        if (!skt)
@@ -342,9 +343,9 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
        /* make sure we have all resources we need */
        skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-       irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       irq = platform_get_irq(pdev, 0);
        skt->pd = pdev->dev.platform_data;
-       if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) {
+       if (!skt->common_res || !skt->attr_res || (irq < 0) || !skt->pd) {
                ret = -EINVAL;
                goto err;
        }
@@ -380,7 +381,7 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
        sock->dev.parent = &pdev->dev;
        sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
        sock->io_offset = (unsigned long)skt->io_base;
-       sock->pci_irq = irq_res->start;
+       sock->pci_irq = irq;
 
 #ifdef CONFIG_CARDBUS
        sock->cb_dev = bcm63xx_cb_dev;
index d6d2f75..0ea41f1 100644 (file)
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/platform_device.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/soc/pxa/smemc.h>
 
-#include <mach/hardware.h>
-#include <mach/smemc.h>
 #include <asm/io.h>
 #include <asm/irq.h>
-#include <mach/pxa2xx-regs.h>
 #include <asm/mach-types.h>
 
 #include <pcmcia/ss.h>
@@ -113,7 +112,7 @@ static inline u_int pxa2xx_pcmcia_cmd_time(u_int mem_clk_10khz,
        return (300000 * (pcmcia_mcxx_asst + 1) / mem_clk_10khz);
 }
 
-static int pxa2xx_pcmcia_set_mcmem( int sock, int speed, int clock )
+static uint32_t pxa2xx_pcmcia_mcmem(int sock, int speed, int clock)
 {
        uint32_t val;
 
@@ -124,12 +123,10 @@ static int pxa2xx_pcmcia_set_mcmem( int sock, int speed, int clock )
                | ((pxa2xx_mcxx_hold(speed, clock)
                & MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
 
-       __raw_writel(val, MCMEM(sock));
-
-       return 0;
+       return val;
 }
 
-static int pxa2xx_pcmcia_set_mcio( int sock, int speed, int clock )
+static int pxa2xx_pcmcia_mcio(int sock, int speed, int clock)
 {
        uint32_t val;
 
@@ -140,12 +137,11 @@ static int pxa2xx_pcmcia_set_mcio( int sock, int speed, int clock )
                | ((pxa2xx_mcxx_hold(speed, clock)
                & MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
 
-       __raw_writel(val, MCIO(sock));
 
-       return 0;
+       return val;
 }
 
-static int pxa2xx_pcmcia_set_mcatt( int sock, int speed, int clock )
+static int pxa2xx_pcmcia_mcatt(int sock, int speed, int clock)
 {
        uint32_t val;
 
@@ -156,31 +152,26 @@ static int pxa2xx_pcmcia_set_mcatt( int sock, int speed, int clock )
                | ((pxa2xx_mcxx_hold(speed, clock)
                & MCXX_HOLD_MASK) << MCXX_HOLD_SHIFT);
 
-       __raw_writel(val, MCATT(sock));
 
-       return 0;
+       return val;
 }
 
-static int pxa2xx_pcmcia_set_mcxx(struct soc_pcmcia_socket *skt, unsigned int clk)
+static int pxa2xx_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
 {
+       unsigned long clk = clk_get_rate(skt->clk) / 10000;
        struct soc_pcmcia_timing timing;
        int sock = skt->nr;
 
        soc_common_pcmcia_get_timing(skt, &timing);
 
-       pxa2xx_pcmcia_set_mcmem(sock, timing.mem, clk);
-       pxa2xx_pcmcia_set_mcatt(sock, timing.attr, clk);
-       pxa2xx_pcmcia_set_mcio(sock, timing.io, clk);
+       pxa_smemc_set_pcmcia_timing(sock,
+               pxa2xx_pcmcia_mcmem(sock, timing.mem, clk),
+               pxa2xx_pcmcia_mcatt(sock, timing.attr, clk),
+               pxa2xx_pcmcia_mcio(sock, timing.io, clk));
 
        return 0;
 }
 
-static int pxa2xx_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
-{
-       unsigned long clk = clk_get_rate(skt->clk);
-       return pxa2xx_pcmcia_set_mcxx(skt, clk / 10000);
-}
-
 #ifdef CONFIG_CPU_FREQ
 
 static int
@@ -215,18 +206,13 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
 
 void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
 {
-       /*
-        * We have at least one socket, so set MECR:CIT
-        * (Card Is There)
-        */
-       uint32_t mecr = MECR_CIT;
+       int nr = 1;
 
-       /* Set MECR:NOS (Number Of Sockets) */
        if ((ops->first + ops->nr) > 1 ||
            machine_is_viper() || machine_is_arcom_zeus())
-               mecr |= MECR_NOS;
+               nr = 2;
 
-       __raw_writel(mecr, MECR);
+       pxa_smemc_set_pcmcia_socket(nr);
 }
 EXPORT_SYMBOL(pxa2xx_configure_sockets);
 
index 5fdd25a..b3ba858 100644 (file)
 #include <linux/platform_device.h>
 
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
 #include <asm/irq.h>
 #include <asm/hardware/scoop.h>
 
-#include "soc_common.h"
+#include <pcmcia/soc_common.h>
 
 #define        NO_KEEP_VS 0x0001
 #define SCOOP_DEV platform_scoop_config->devs
index 6b6c578..ad1141f 100644 (file)
@@ -394,7 +394,7 @@ static int do_validate_mem(struct pcmcia_socket *s,
  * do_mem_probe() checks a memory region for use by the PCMCIA subsystem.
  * To do so, the area is split up into sensible parts, and then passed
  * into the @validate() function. Only if @validate() and @fallback() fail,
- * the area is marked as unavaibale for use by the PCMCIA subsystem. The
+ * the area is marked as unavailable for use by the PCMCIA subsystem. The
  * function returns the size of the usable memory area.
  */
 static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num,
index 29fdd17..bce664b 100644 (file)
@@ -17,7 +17,6 @@
 
 #include <pcmcia/ss.h>
 
-#include <mach/hardware.h>
 #include <asm/hardware/sa1111.h>
 #include <asm/mach-types.h>
 #include <asm/irq.h>
index 7feb8d6..f1b5160 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 
-#include <mach/hardware.h>
 #include <asm/hardware/sa1111.h>
 #include <asm/mach-types.h>
 
index 3a8c84b..61b0c89 100644 (file)
@@ -46,8 +46,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/spinlock.h>
 #include <linux/timer.h>
-
-#include <mach/hardware.h>
+#include <linux/pci.h>
 
 #include "soc_common.h"
 
@@ -784,8 +783,7 @@ void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt)
        /* should not be required; violates some lowlevel drivers */
        soc_common_pcmcia_config_skt(skt, &dead_socket);
 
-       iounmap(skt->virt_io);
-       skt->virt_io = NULL;
+       iounmap(PCI_IOBASE + skt->res_io_io.start);
        release_resource(&skt->res_attr);
        release_resource(&skt->res_mem);
        release_resource(&skt->res_io);
@@ -818,11 +816,12 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
        if (ret)
                goto out_err_4;
 
-       skt->virt_io = ioremap(skt->res_io.start, 0x10000);
-       if (skt->virt_io == NULL) {
-               ret = -ENOMEM;
+       skt->res_io_io = (struct resource)
+                DEFINE_RES_IO_NAMED(skt->nr * 0x1000 + 0x10000, 0x1000,
+                                    "PCMCIA I/O");
+       ret = pci_remap_iospace(&skt->res_io_io, skt->res_io.start);
+       if (ret)
                goto out_err_5;
-       }
 
        /*
         * We initialize default socket timing here, because
@@ -840,7 +839,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
        skt->socket.resource_ops = &pccard_static_ops;
        skt->socket.irq_mask = 0;
        skt->socket.map_size = PAGE_SIZE;
-       skt->socket.io_offset = (unsigned long)skt->virt_io;
+       skt->socket.io_offset = (unsigned long)skt->res_io_io.start;
 
        skt->status = soc_common_pcmcia_skt_state(skt);
 
@@ -874,7 +873,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
  out_err_7:
        soc_pcmcia_hw_shutdown(skt);
  out_err_6:
-       iounmap(skt->virt_io);
+       iounmap(PCI_IOBASE + skt->res_io_io.start);
  out_err_5:
        release_resource(&skt->res_attr);
  out_err_4:
index 222e81c..17ef05a 100644 (file)
 /* include the world */
 #include <linux/clk.h>
 #include <linux/cpufreq.h>
-#include <pcmcia/ss.h>
 #include <pcmcia/cistpl.h>
-
+#include <pcmcia/soc_common.h>
 
 struct device;
 struct gpio_desc;
 struct pcmcia_low_level;
 struct regulator;
 
-struct soc_pcmcia_regulator {
-       struct regulator        *reg;
-       bool                    on;
-};
-
-/*
- * This structure encapsulates per-socket state which we might need to
- * use when responding to a Card Services query of some kind.
- */
-struct soc_pcmcia_socket {
-       struct pcmcia_socket    socket;
-
-       /*
-        * Info from low level handler
-        */
-       unsigned int            nr;
-       struct clk              *clk;
-
-       /*
-        * Core PCMCIA state
-        */
-       const struct pcmcia_low_level *ops;
-
-       unsigned int            status;
-       socket_state_t          cs_state;
-
-       unsigned short          spd_io[MAX_IO_WIN];
-       unsigned short          spd_mem[MAX_WIN];
-       unsigned short          spd_attr[MAX_WIN];
-
-       struct resource         res_skt;
-       struct resource         res_io;
-       struct resource         res_mem;
-       struct resource         res_attr;
-       void __iomem            *virt_io;
-
-       struct {
-               int             gpio;
-               struct gpio_desc *desc;
-               unsigned int    irq;
-               const char      *name;
-       } stat[6];
-#define SOC_STAT_CD            0       /* Card detect */
-#define SOC_STAT_BVD1          1       /* BATDEAD / IOSTSCHG */
-#define SOC_STAT_BVD2          2       /* BATWARN / IOSPKR */
-#define SOC_STAT_RDY           3       /* Ready / Interrupt */
-#define SOC_STAT_VS1           4       /* Voltage sense 1 */
-#define SOC_STAT_VS2           5       /* Voltage sense 2 */
-
-       struct gpio_desc        *gpio_reset;
-       struct gpio_desc        *gpio_bus_enable;
-       struct soc_pcmcia_regulator vcc;
-       struct soc_pcmcia_regulator vpp;
-
-       unsigned int            irq_state;
-
-#ifdef CONFIG_CPU_FREQ
-       struct notifier_block   cpufreq_nb;
-#endif
-       struct timer_list       poll_timer;
-       struct list_head        node;
-       void *driver_data;
-};
-
 struct skt_dev_info {
        int nskt;
        struct soc_pcmcia_socket skt[];
 };
 
-struct pcmcia_state {
-  unsigned detect: 1,
-            ready: 1,
-             bvd1: 1,
-             bvd2: 1,
-           wrprot: 1,
-            vs_3v: 1,
-            vs_Xv: 1;
-};
-
-struct pcmcia_low_level {
-       struct module *owner;
-
-       /* first socket in system */
-       int first;
-       /* nr of sockets */
-       int nr;
-
-       int (*hw_init)(struct soc_pcmcia_socket *);
-       void (*hw_shutdown)(struct soc_pcmcia_socket *);
-
-       void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *);
-       int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *);
-
-       /*
-        * Enable card status IRQs on (re-)initialisation.  This can
-        * be called at initialisation, power management event, or
-        * pcmcia event.
-        */
-       void (*socket_init)(struct soc_pcmcia_socket *);
-
-       /*
-        * Disable card status IRQs and PCMCIA bus on suspend.
-        */
-       void (*socket_suspend)(struct soc_pcmcia_socket *);
-
-       /*
-        * Hardware specific timing routines.
-        * If provided, the get_timing routine overrides the SOC default.
-        */
-       unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int);
-       int (*set_timing)(struct soc_pcmcia_socket *);
-       int (*show_timing)(struct soc_pcmcia_socket *, char *);
-
-#ifdef CONFIG_CPU_FREQ
-       /*
-        * CPUFREQ support.
-        */
-       int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *);
-#endif
-};
-
-
 struct soc_pcmcia_timing {
        unsigned short io;
        unsigned short mem;
index 82b63e6..300b0f2 100644 (file)
@@ -64,6 +64,7 @@ config USB_LGM_PHY
 config PHY_CAN_TRANSCEIVER
        tristate "CAN transceiver PHY"
        select GENERIC_PHY
+       select MULTIPLEXER
        help
          This option enables support for CAN transceivers as a PHY. This
          driver provides function for putting the transceivers in various
index f0bc87d..3900f16 100644 (file)
 #define SUN6I_DPHY_TX_CTL_REG          0x04
 #define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT       BIT(28)
 
+#define SUN6I_DPHY_RX_CTL_REG          0x08
+#define SUN6I_DPHY_RX_CTL_EN_DBC       BIT(31)
+#define SUN6I_DPHY_RX_CTL_RX_CLK_FORCE BIT(24)
+#define SUN6I_DPHY_RX_CTL_RX_D3_FORCE  BIT(23)
+#define SUN6I_DPHY_RX_CTL_RX_D2_FORCE  BIT(22)
+#define SUN6I_DPHY_RX_CTL_RX_D1_FORCE  BIT(21)
+#define SUN6I_DPHY_RX_CTL_RX_D0_FORCE  BIT(20)
+
 #define SUN6I_DPHY_TX_TIME0_REG                0x10
 #define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n)                (((n) & 0xff) << 24)
 #define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n)      (((n) & 0xff) << 16)
 #define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n)      (((n) & 0xff) << 8)
 #define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n)      ((n) & 0xff)
 
+#define SUN6I_DPHY_RX_TIME0_REG                0x30
+#define SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(n)      (((n) & 0xff) << 24)
+#define SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(n)  (((n) & 0xff) << 16)
+#define SUN6I_DPHY_RX_TIME0_LP_RX(n)           (((n) & 0xff) << 8)
+
+#define SUN6I_DPHY_RX_TIME1_REG                0x34
+#define SUN6I_DPHY_RX_TIME1_RX_DLY(n)          (((n) & 0xfff) << 20)
+#define SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(n)   ((n) & 0xfffff)
+
+#define SUN6I_DPHY_RX_TIME2_REG                0x38
+#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA1(n)      (((n) & 0xff) << 8)
+#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(n)      ((n) & 0xff)
+
+#define SUN6I_DPHY_RX_TIME3_REG                0x40
+#define SUN6I_DPHY_RX_TIME3_LPRST_DLY(n)       (((n) & 0xffff) << 16)
+
 #define SUN6I_DPHY_ANA0_REG            0x4c
 #define SUN6I_DPHY_ANA0_REG_PWS                        BIT(31)
 #define SUN6I_DPHY_ANA0_REG_DMPC               BIT(28)
 #define SUN6I_DPHY_ANA0_REG_DMPD(n)            (((n) & 0xf) << 24)
 #define SUN6I_DPHY_ANA0_REG_SLV(n)             (((n) & 7) << 12)
 #define SUN6I_DPHY_ANA0_REG_DEN(n)             (((n) & 0xf) << 8)
+#define SUN6I_DPHY_ANA0_REG_SFB(n)             (((n) & 3) << 2)
 
 #define SUN6I_DPHY_ANA1_REG            0x50
 #define SUN6I_DPHY_ANA1_REG_VTTMODE            BIT(31)
 
 #define SUN6I_DPHY_DBG5_REG            0xf4
 
+enum sun6i_dphy_direction {
+       SUN6I_DPHY_DIRECTION_TX,
+       SUN6I_DPHY_DIRECTION_RX,
+};
+
 struct sun6i_dphy {
        struct clk                              *bus_clk;
        struct clk                              *mod_clk;
@@ -92,6 +122,8 @@ struct sun6i_dphy {
 
        struct phy                              *phy;
        struct phy_configure_opts_mipi_dphy     config;
+
+       enum sun6i_dphy_direction               direction;
 };
 
 static int sun6i_dphy_init(struct phy *phy)
@@ -119,9 +151,8 @@ static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
        return 0;
 }
 
-static int sun6i_dphy_power_on(struct phy *phy)
+static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy)
 {
-       struct sun6i_dphy *dphy = phy_get_drvdata(phy);
        u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0);
 
        regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
@@ -211,12 +242,129 @@ static int sun6i_dphy_power_on(struct phy *phy)
        return 0;
 }
 
+static int sun6i_dphy_rx_power_on(struct sun6i_dphy *dphy)
+{
+       /* Physical clock rate is actually half of symbol rate with DDR. */
+       unsigned long mipi_symbol_rate = dphy->config.hs_clk_rate;
+       unsigned long dphy_clk_rate;
+       unsigned int rx_dly;
+       unsigned int lprst_dly;
+       u32 value;
+
+       dphy_clk_rate = clk_get_rate(dphy->mod_clk);
+       if (!dphy_clk_rate)
+               return -EINVAL;
+
+       /* Hardcoded timing parameters from the Allwinner BSP. */
+       regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME0_REG,
+                    SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(255) |
+                    SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(255) |
+                    SUN6I_DPHY_RX_TIME0_LP_RX(255));
+
+       /*
+        * Formula from the Allwinner BSP, with hardcoded coefficients
+        * (probably internal divider/multiplier).
+        */
+       rx_dly = 8 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 8));
+
+       /*
+        * The Allwinner BSP has an alternative formula for LP_RX_ULPS_WP:
+        * lp_ulps_wp_cnt = lp_ulps_wp_ms * lp_clk / 1000
+        * but does not use it and hardcodes 255 instead.
+        */
+       regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME1_REG,
+                    SUN6I_DPHY_RX_TIME1_RX_DLY(rx_dly) |
+                    SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(255));
+
+       /* HS_RX_ANA0 value is hardcoded in the Allwinner BSP. */
+       regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME2_REG,
+                    SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(4));
+
+       /*
+        * Formula from the Allwinner BSP, with hardcoded coefficients
+        * (probably internal divider/multiplier).
+        */
+       lprst_dly = 4 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 2));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME3_REG,
+                    SUN6I_DPHY_RX_TIME3_LPRST_DLY(lprst_dly));
+
+       /* Analog parameters are hardcoded in the Allwinner BSP. */
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
+                    SUN6I_DPHY_ANA0_REG_PWS |
+                    SUN6I_DPHY_ANA0_REG_SLV(7) |
+                    SUN6I_DPHY_ANA0_REG_SFB(2));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
+                    SUN6I_DPHY_ANA1_REG_SVTT(4));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
+                    SUN6I_DPHY_ANA4_REG_DMPLVC |
+                    SUN6I_DPHY_ANA4_REG_DMPLVD(1));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
+                    SUN6I_DPHY_ANA2_REG_ENIB);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
+                    SUN6I_DPHY_ANA3_EN_LDOR |
+                    SUN6I_DPHY_ANA3_EN_LDOC |
+                    SUN6I_DPHY_ANA3_EN_LDOD);
+
+       /*
+        * Delay comes from the Allwinner BSP, likely for internal regulator
+        * ramp-up.
+        */
+       udelay(3);
+
+       value = SUN6I_DPHY_RX_CTL_EN_DBC | SUN6I_DPHY_RX_CTL_RX_CLK_FORCE;
+
+       /*
+        * Rx data lane force-enable bits are used as regular RX enable by the
+        * Allwinner BSP.
+        */
+       if (dphy->config.lanes >= 1)
+               value |= SUN6I_DPHY_RX_CTL_RX_D0_FORCE;
+       if (dphy->config.lanes >= 2)
+               value |= SUN6I_DPHY_RX_CTL_RX_D1_FORCE;
+       if (dphy->config.lanes >= 3)
+               value |= SUN6I_DPHY_RX_CTL_RX_D2_FORCE;
+       if (dphy->config.lanes == 4)
+               value |= SUN6I_DPHY_RX_CTL_RX_D3_FORCE;
+
+       regmap_write(dphy->regs, SUN6I_DPHY_RX_CTL_REG, value);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
+                    SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) |
+                    SUN6I_DPHY_GCTL_EN);
+
+       return 0;
+}
+
+static int sun6i_dphy_power_on(struct phy *phy)
+{
+       struct sun6i_dphy *dphy = phy_get_drvdata(phy);
+
+       switch (dphy->direction) {
+       case SUN6I_DPHY_DIRECTION_TX:
+               return sun6i_dphy_tx_power_on(dphy);
+       case SUN6I_DPHY_DIRECTION_RX:
+               return sun6i_dphy_rx_power_on(dphy);
+       default:
+               return -EINVAL;
+       }
+}
+
 static int sun6i_dphy_power_off(struct phy *phy)
 {
        struct sun6i_dphy *dphy = phy_get_drvdata(phy);
 
-       regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
-                          SUN6I_DPHY_ANA1_REG_VTTMODE, 0);
+       regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, 0);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, 0);
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG, 0);
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG, 0);
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG, 0);
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG, 0);
 
        return 0;
 }
@@ -253,7 +401,9 @@ static int sun6i_dphy_probe(struct platform_device *pdev)
 {
        struct phy_provider *phy_provider;
        struct sun6i_dphy *dphy;
+       const char *direction;
        void __iomem *regs;
+       int ret;
 
        dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
        if (!dphy)
@@ -290,6 +440,14 @@ static int sun6i_dphy_probe(struct platform_device *pdev)
                return PTR_ERR(dphy->phy);
        }
 
+       dphy->direction = SUN6I_DPHY_DIRECTION_TX;
+
+       ret = of_property_read_string(pdev->dev.of_node, "allwinner,direction",
+                                     &direction);
+
+       if (!ret && !strncmp(direction, "rx", 2))
+               dphy->direction = SUN6I_DPHY_DIRECTION_RX;
+
        phy_set_drvdata(dphy->phy, dphy);
        phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
 
index 6b917f7..73fb99c 100644 (file)
@@ -83,6 +83,7 @@
 #define SIERRA_DFE_BIASTRIM_PREG                       0x04C
 #define SIERRA_DRVCTRL_ATTEN_PREG                      0x06A
 #define SIERRA_DRVCTRL_BOOST_PREG                      0x06F
+#define SIERRA_TX_RCVDET_OVRD_PREG                     0x072
 #define SIERRA_CLKPATHCTRL_TMR_PREG                    0x081
 #define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG               0x085
 #define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG               0x086
@@ -1684,6 +1685,66 @@ static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
        .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
 };
 
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
+       {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+       {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+       {0x0004, SIERRA_PSC_LN_A3_PREG},
+       {0x0004, SIERRA_PSC_LN_A4_PREG},
+       {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+       {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+       {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+       {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+       {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+       {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+       {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+       {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+       {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+       {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+       {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+       {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+       {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+       {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+       {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+       {0x0041, SIERRA_DEQ_GLUT0},
+       {0x0082, SIERRA_DEQ_GLUT1},
+       {0x00C3, SIERRA_DEQ_GLUT2},
+       {0x0145, SIERRA_DEQ_GLUT3},
+       {0x0186, SIERRA_DEQ_GLUT4},
+       {0x09E7, SIERRA_DEQ_ALUT0},
+       {0x09A6, SIERRA_DEQ_ALUT1},
+       {0x0965, SIERRA_DEQ_ALUT2},
+       {0x08E3, SIERRA_DEQ_ALUT3},
+       {0x00FA, SIERRA_DEQ_DFETAP0},
+       {0x00FA, SIERRA_DEQ_DFETAP1},
+       {0x00FA, SIERRA_DEQ_DFETAP2},
+       {0x00FA, SIERRA_DEQ_DFETAP3},
+       {0x00FA, SIERRA_DEQ_DFETAP4},
+       {0x000F, SIERRA_DEQ_PRECUR_PREG},
+       {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+       {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+       {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+       {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+       {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+       {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+       {0x002B, SIERRA_CPI_TRIM_PREG},
+       {0x0003, SIERRA_EPI_CTRL_PREG},
+       {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+       {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+       {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+       {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+       {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
+       .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
+       .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
+};
+
 /* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
 static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
        {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
@@ -1765,6 +1826,69 @@ static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
        .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
 };
 
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
+       {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+       {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+       {0x0004, SIERRA_PSC_LN_A3_PREG},
+       {0x0004, SIERRA_PSC_LN_A4_PREG},
+       {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+       {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+       {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+       {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+       {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+       {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+       {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+       {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+       {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+       {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+       {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+       {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+       {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+       {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+       {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+       {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+       {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+       {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+       {0x0041, SIERRA_DEQ_GLUT0},
+       {0x0082, SIERRA_DEQ_GLUT1},
+       {0x00C3, SIERRA_DEQ_GLUT2},
+       {0x0145, SIERRA_DEQ_GLUT3},
+       {0x0186, SIERRA_DEQ_GLUT4},
+       {0x09E7, SIERRA_DEQ_ALUT0},
+       {0x09A6, SIERRA_DEQ_ALUT1},
+       {0x0965, SIERRA_DEQ_ALUT2},
+       {0x08E3, SIERRA_DEQ_ALUT3},
+       {0x00FA, SIERRA_DEQ_DFETAP0},
+       {0x00FA, SIERRA_DEQ_DFETAP1},
+       {0x00FA, SIERRA_DEQ_DFETAP2},
+       {0x00FA, SIERRA_DEQ_DFETAP3},
+       {0x00FA, SIERRA_DEQ_DFETAP4},
+       {0x000F, SIERRA_DEQ_PRECUR_PREG},
+       {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+       {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+       {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+       {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+       {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+       {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+       {0x002B, SIERRA_CPI_TRIM_PREG},
+       {0x0003, SIERRA_EPI_CTRL_PREG},
+       {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+       {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+       {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+       {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+       {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
+       .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
+       .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
+};
+
 /* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
 static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
        {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -1840,6 +1964,69 @@ static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
        .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
 };
 
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
+       {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+       {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+       {0x0004, SIERRA_PSC_LN_A3_PREG},
+       {0x0004, SIERRA_PSC_LN_A4_PREG},
+       {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+       {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+       {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+       {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+       {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+       {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+       {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+       {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+       {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+       {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+       {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+       {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+       {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+       {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+       {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+       {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+       {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+       {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+       {0x0041, SIERRA_DEQ_GLUT0},
+       {0x0082, SIERRA_DEQ_GLUT1},
+       {0x00C3, SIERRA_DEQ_GLUT2},
+       {0x0145, SIERRA_DEQ_GLUT3},
+       {0x0186, SIERRA_DEQ_GLUT4},
+       {0x09E7, SIERRA_DEQ_ALUT0},
+       {0x09A6, SIERRA_DEQ_ALUT1},
+       {0x0965, SIERRA_DEQ_ALUT2},
+       {0x08E3, SIERRA_DEQ_ALUT3},
+       {0x00FA, SIERRA_DEQ_DFETAP0},
+       {0x00FA, SIERRA_DEQ_DFETAP1},
+       {0x00FA, SIERRA_DEQ_DFETAP2},
+       {0x00FA, SIERRA_DEQ_DFETAP3},
+       {0x00FA, SIERRA_DEQ_DFETAP4},
+       {0x000F, SIERRA_DEQ_PRECUR_PREG},
+       {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+       {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+       {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+       {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+       {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+       {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+       {0x002B, SIERRA_CPI_TRIM_PREG},
+       {0x0003, SIERRA_EPI_CTRL_PREG},
+       {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+       {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+       {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+       {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+       {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
+       .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
+       .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
+};
+
 /* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
 static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
        {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -2299,9 +2486,9 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = {
                                [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
                        },
                        [TYPE_QSGMII] = {
-                               [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
-                               [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
-                               [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+                               [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
+                               [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
+                               [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
                        },
                },
                [TYPE_USB] = {
index a95572b..e625b32 100644 (file)
@@ -4,17 +4,33 @@
  * Copyright 2019 Purism SPC
  */
 
+#include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/svc/misc.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+/* Control and Status Registers(CSR) */
+#define PHY_CTRL                       0x00
+#define  CCM_MASK                      GENMASK(7, 5)
+#define  CCM(n)                                FIELD_PREP(CCM_MASK, (n))
+#define  CCM_1_2V                      0x5
+#define  CA_MASK                       GENMASK(4, 2)
+#define  CA_3_51MA                     0x4
+#define  CA(n)                         FIELD_PREP(CA_MASK, (n))
+#define  RFB                           BIT(1)
+#define  LVDS_EN                       BIT(0)
 
 /* DPHY registers */
 #define DPHY_PD_DPHY                   0x00
 #define PWR_ON 0
 #define PWR_OFF        1
 
+#define MIN_VCO_FREQ 640000000
+#define MAX_VCO_FREQ 1500000000
+
+#define MIN_LVDS_REFCLK_FREQ 24000000
+#define MAX_LVDS_REFCLK_FREQ 150000000
+
 enum mixel_dphy_devtype {
        MIXEL_IMX8MQ,
+       MIXEL_IMX8QXP,
 };
 
 struct mixel_dphy_devdata {
@@ -65,6 +88,7 @@ struct mixel_dphy_devdata {
        u8 reg_rxlprp;
        u8 reg_rxcdrp;
        u8 reg_rxhs_settle;
+       bool is_combo;  /* MIPI DPHY and LVDS PHY combo */
 };
 
 static const struct mixel_dphy_devdata mixel_dphy_devdata[] = {
@@ -74,6 +98,10 @@ static const struct mixel_dphy_devdata mixel_dphy_devdata[] = {
                .reg_rxlprp = 0x40,
                .reg_rxcdrp = 0x44,
                .reg_rxhs_settle = 0x48,
+               .is_combo = false,
+       },
+       [MIXEL_IMX8QXP] = {
+               .is_combo = true,
        },
 };
 
@@ -95,8 +123,12 @@ struct mixel_dphy_cfg {
 struct mixel_dphy_priv {
        struct mixel_dphy_cfg cfg;
        struct regmap *regmap;
+       struct regmap *lvds_regmap;
        struct clk *phy_ref_clk;
        const struct mixel_dphy_devdata *devdata;
+       struct imx_sc_ipc *ipc_handle;
+       bool is_slave;
+       int id;
 };
 
 static const struct regmap_config mixel_dphy_regmap_config = {
@@ -317,7 +349,8 @@ static int mixel_dphy_set_pll_params(struct phy *phy)
        return 0;
 }
 
-static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+static int
+mixel_dphy_configure_mipi_dphy(struct phy *phy, union phy_configure_opts *opts)
 {
        struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
        struct mixel_dphy_cfg cfg = { 0 };
@@ -345,15 +378,126 @@ static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
        return 0;
 }
 
+static int
+mixel_dphy_configure_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
+{
+       struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+       struct phy_configure_opts_lvds *lvds_opts = &opts->lvds;
+       unsigned long data_rate;
+       unsigned long fvco;
+       u32 rsc;
+       u32 co;
+       int ret;
+
+       priv->is_slave = lvds_opts->is_slave;
+
+       /* LVDS interface pins */
+       regmap_write(priv->lvds_regmap, PHY_CTRL,
+                    CCM(CCM_1_2V) | CA(CA_3_51MA) | RFB);
+
+       /* enable MODE8 only for slave LVDS PHY */
+       rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
+       ret = imx_sc_misc_set_control(priv->ipc_handle, rsc, IMX_SC_C_DUAL_MODE,
+                                     lvds_opts->is_slave);
+       if (ret) {
+               dev_err(&phy->dev, "Failed to configure MODE8: %d\n", ret);
+               return ret;
+       }
+
+       /*
+        * Choose an appropriate divider ratio to meet the requirement of
+        * PLL VCO frequency range.
+        *
+        *  -----  640MHz ~ 1500MHz   ------------      ---------------
+        * | VCO | ----------------> | CO divider | -> | LVDS data rate|
+        *  -----       FVCO          ------------      ---------------
+        *                            1/2/4/8 div     7 * differential_clk_rate
+        */
+       data_rate = 7 * lvds_opts->differential_clk_rate;
+       for (co = 1; co <= 8; co *= 2) {
+               fvco = data_rate * co;
+
+               if (fvco >= MIN_VCO_FREQ)
+                       break;
+       }
+
+       if (fvco < MIN_VCO_FREQ || fvco > MAX_VCO_FREQ) {
+               dev_err(&phy->dev, "VCO frequency %lu is out of range\n", fvco);
+               return -ERANGE;
+       }
+
+       /*
+        * CO is configurable, while CN and CM are not,
+        * as fixed ratios 1 and 7 are applied respectively.
+        */
+       phy_write(phy, __ffs(co), DPHY_CO);
+
+       /* set reference clock rate */
+       clk_set_rate(priv->phy_ref_clk, lvds_opts->differential_clk_rate);
+
+       return ret;
+}
+
+static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+       if (!opts) {
+               dev_err(&phy->dev, "No configuration options\n");
+               return -EINVAL;
+       }
+
+       if (phy->attrs.mode == PHY_MODE_MIPI_DPHY)
+               return mixel_dphy_configure_mipi_dphy(phy, opts);
+       else if (phy->attrs.mode == PHY_MODE_LVDS)
+               return mixel_dphy_configure_lvds_phy(phy, opts);
+
+       dev_err(&phy->dev,
+               "Failed to configure PHY with invalid PHY mode: %d\n", phy->attrs.mode);
+
+       return -EINVAL;
+}
+
+static int
+mixel_dphy_validate_lvds_phy(struct phy *phy, union phy_configure_opts *opts)
+{
+       struct phy_configure_opts_lvds *lvds_cfg = &opts->lvds;
+
+       if (lvds_cfg->bits_per_lane_and_dclk_cycle != 7) {
+               dev_err(&phy->dev, "Invalid bits per LVDS data lane: %u\n",
+                       lvds_cfg->bits_per_lane_and_dclk_cycle);
+               return -EINVAL;
+       }
+
+       if (lvds_cfg->lanes != 4) {
+               dev_err(&phy->dev, "Invalid LVDS data lanes: %u\n", lvds_cfg->lanes);
+               return -EINVAL;
+       }
+
+       if (lvds_cfg->differential_clk_rate < MIN_LVDS_REFCLK_FREQ ||
+           lvds_cfg->differential_clk_rate > MAX_LVDS_REFCLK_FREQ) {
+               dev_err(&phy->dev,
+                       "Invalid LVDS differential clock rate: %lu\n",
+                       lvds_cfg->differential_clk_rate);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int mixel_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
                               union phy_configure_opts *opts)
 {
-       struct mixel_dphy_cfg cfg = { 0 };
+       if (mode == PHY_MODE_MIPI_DPHY) {
+               struct mixel_dphy_cfg mipi_dphy_cfg = { 0 };
 
-       if (mode != PHY_MODE_MIPI_DPHY)
-               return -EINVAL;
+               return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy,
+                                                  &mipi_dphy_cfg);
+       } else if (mode == PHY_MODE_LVDS) {
+               return mixel_dphy_validate_lvds_phy(phy, opts);
+       }
 
-       return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+       dev_err(&phy->dev,
+               "Failed to validate PHY with invalid PHY mode: %d\n", mode);
+       return -EINVAL;
 }
 
 static int mixel_dphy_init(struct phy *phy)
@@ -373,27 +517,75 @@ static int mixel_dphy_exit(struct phy *phy)
        return 0;
 }
 
-static int mixel_dphy_power_on(struct phy *phy)
+static int mixel_dphy_power_on_mipi_dphy(struct phy *phy)
 {
        struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
        u32 locked;
        int ret;
 
-       ret = clk_prepare_enable(priv->phy_ref_clk);
-       if (ret < 0)
-               return ret;
-
        phy_write(phy, PWR_ON, DPHY_PD_PLL);
        ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
                                       locked, PLL_LOCK_SLEEP,
                                       PLL_LOCK_TIMEOUT);
        if (ret < 0) {
                dev_err(&phy->dev, "Could not get DPHY lock (%d)!\n", ret);
-               goto clock_disable;
+               return ret;
        }
        phy_write(phy, PWR_ON, DPHY_PD_DPHY);
 
        return 0;
+}
+
+static int mixel_dphy_power_on_lvds_phy(struct phy *phy)
+{
+       struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+       u32 locked;
+       int ret;
+
+       regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, LVDS_EN);
+
+       phy_write(phy, PWR_ON, DPHY_PD_DPHY);
+       phy_write(phy, PWR_ON, DPHY_PD_PLL);
+
+       /* do not wait for slave LVDS PHY being locked */
+       if (priv->is_slave)
+               return 0;
+
+       ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked,
+                                      locked, PLL_LOCK_SLEEP,
+                                      PLL_LOCK_TIMEOUT);
+       if (ret < 0) {
+               dev_err(&phy->dev, "Could not get LVDS PHY lock (%d)!\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int mixel_dphy_power_on(struct phy *phy)
+{
+       struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+       int ret;
+
+       ret = clk_prepare_enable(priv->phy_ref_clk);
+       if (ret < 0)
+               return ret;
+
+       if (phy->attrs.mode == PHY_MODE_MIPI_DPHY) {
+               ret = mixel_dphy_power_on_mipi_dphy(phy);
+       } else if (phy->attrs.mode == PHY_MODE_LVDS) {
+               ret = mixel_dphy_power_on_lvds_phy(phy);
+       } else {
+               dev_err(&phy->dev,
+                       "Failed to power on PHY with invalid PHY mode: %d\n",
+                                                       phy->attrs.mode);
+               ret = -EINVAL;
+       }
+
+       if (ret)
+               goto clock_disable;
+
+       return 0;
 clock_disable:
        clk_disable_unprepare(priv->phy_ref_clk);
        return ret;
@@ -406,16 +598,51 @@ static int mixel_dphy_power_off(struct phy *phy)
        phy_write(phy, PWR_OFF, DPHY_PD_PLL);
        phy_write(phy, PWR_OFF, DPHY_PD_DPHY);
 
+       if (phy->attrs.mode == PHY_MODE_LVDS)
+               regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, 0);
+
        clk_disable_unprepare(priv->phy_ref_clk);
 
        return 0;
 }
 
+static int mixel_dphy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+       struct mixel_dphy_priv *priv = phy_get_drvdata(phy);
+       int ret;
+
+       if (priv->devdata->is_combo && mode != PHY_MODE_LVDS) {
+               dev_err(&phy->dev, "Failed to set PHY mode for combo PHY\n");
+               return -EINVAL;
+       }
+
+       if (!priv->devdata->is_combo && mode != PHY_MODE_MIPI_DPHY) {
+               dev_err(&phy->dev, "Failed to set PHY mode to MIPI DPHY\n");
+               return -EINVAL;
+       }
+
+       if (priv->devdata->is_combo) {
+               u32 rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0;
+
+               ret = imx_sc_misc_set_control(priv->ipc_handle,
+                                             rsc, IMX_SC_C_MODE,
+                                             mode == PHY_MODE_LVDS);
+               if (ret) {
+                       dev_err(&phy->dev,
+                               "Failed to set PHY mode via SCU ipc: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 static const struct phy_ops mixel_dphy_phy_ops = {
        .init = mixel_dphy_init,
        .exit = mixel_dphy_exit,
        .power_on = mixel_dphy_power_on,
        .power_off = mixel_dphy_power_off,
+       .set_mode = mixel_dphy_set_mode,
        .configure = mixel_dphy_configure,
        .validate = mixel_dphy_validate,
        .owner = THIS_MODULE,
@@ -424,6 +651,8 @@ static const struct phy_ops mixel_dphy_phy_ops = {
 static const struct of_device_id mixel_dphy_of_match[] = {
        { .compatible = "fsl,imx8mq-mipi-dphy",
          .data = &mixel_dphy_devdata[MIXEL_IMX8MQ] },
+       { .compatible = "fsl,imx8qxp-mipi-dphy",
+         .data = &mixel_dphy_devdata[MIXEL_IMX8QXP] },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, mixel_dphy_of_match);
@@ -436,6 +665,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
        struct mixel_dphy_priv *priv;
        struct phy *phy;
        void __iomem *base;
+       int ret;
 
        if (!np)
                return -ENODEV;
@@ -467,6 +697,30 @@ static int mixel_dphy_probe(struct platform_device *pdev)
        dev_dbg(dev, "phy_ref clock rate: %lu\n",
                clk_get_rate(priv->phy_ref_clk));
 
+       if (priv->devdata->is_combo) {
+               priv->lvds_regmap =
+                       syscon_regmap_lookup_by_phandle(np, "fsl,syscon");
+               if (IS_ERR(priv->lvds_regmap)) {
+                       ret = PTR_ERR(priv->lvds_regmap);
+                       dev_err_probe(dev, ret, "Failed to get LVDS regmap\n");
+                       return ret;
+               }
+
+               priv->id = of_alias_get_id(np, "mipi_dphy");
+               if (priv->id < 0) {
+                       dev_err(dev, "Failed to get phy node alias id: %d\n",
+                               priv->id);
+                       return priv->id;
+               }
+
+               ret = imx_scu_get_handle(&priv->ipc_handle);
+               if (ret) {
+                       dev_err_probe(dev, ret,
+                                     "Failed to get SCU ipc handle\n");
+                       return ret;
+               }
+       }
+
        dev_set_drvdata(dev, priv);
 
        phy = devm_phy_create(dev, np, &mixel_dphy_phy_ops);
index f1eb03b..ad7d2ed 100644 (file)
@@ -94,15 +94,21 @@ static int imx8_pcie_phy_init(struct phy *phy)
                           IMX8MM_GPR_PCIE_CMN_RST);
        usleep_range(200, 500);
 
-       if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT) {
+       if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ||
+           pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
                /* Configure the pad as input */
                val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
                writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
                       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
-       } else if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT) {
+       } else {
                /* Configure the PHY to output the refclock via pad */
                writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
                       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+       }
+
+       if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT ||
+           pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
+               /* Source clock from SoC internal PLL */
                writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
                       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
                writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
index 5fb4217..d4bd419 100644 (file)
@@ -120,20 +120,16 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
                return PTR_ERR(hdmi_phy->regs);
 
        ref_clk = devm_clk_get(dev, "pll_ref");
-       if (IS_ERR(ref_clk)) {
-               ret = PTR_ERR(ref_clk);
-               dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
-                       ret);
-               return ret;
-       }
+       if (IS_ERR(ref_clk))
+               return dev_err_probe(dev, PTR_ERR(ref_clk),
+                                    "Failed to get PLL reference clock\n");
+
        ref_clk_name = __clk_get_name(ref_clk);
 
        ret = of_property_read_string(dev->of_node, "clock-output-names",
                                      &clk_init.name);
-       if (ret < 0) {
-               dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
 
        hdmi_phy->dev = dev;
        hdmi_phy->conf =
@@ -141,25 +137,19 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
        hdmi_phy->pll_hw.init = &clk_init;
        hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
-       if (IS_ERR(hdmi_phy->pll)) {
-               ret = PTR_ERR(hdmi_phy->pll);
-               dev_err(dev, "Failed to register PLL: %d\n", ret);
-               return ret;
-       }
+       if (IS_ERR(hdmi_phy->pll))
+               return dev_err_probe(dev, PTR_ERR(hdmi_phy->pll),
+                                   "Failed to register PLL\n");
 
        ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
                                   &hdmi_phy->ibias);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(dev, ret, "Failed to get ibias\n");
 
        ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
                                   &hdmi_phy->ibias_up);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(dev, ret, "Failed to get ibias_up\n");
 
        dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
        hdmi_phy->drv_imp_clk = 0x30;
@@ -168,17 +158,15 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        hdmi_phy->drv_imp_d0 = 0x30;
 
        phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
-       if (IS_ERR(phy)) {
-               dev_err(dev, "Failed to create HDMI PHY\n");
-               return PTR_ERR(phy);
-       }
+       if (IS_ERR(phy))
+               return dev_err_probe(dev, PTR_ERR(phy), "Cannot create HDMI PHY\n");
+
        phy_set_drvdata(phy, hdmi_phy);
 
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-       if (IS_ERR(phy_provider)) {
-               dev_err(dev, "Failed to register HDMI PHY\n");
-               return PTR_ERR(phy_provider);
-       }
+       if (IS_ERR(phy_provider))
+               return dev_err_probe(dev, PTR_ERR(phy_provider),
+                                    "Failed to register HDMI PHY\n");
 
        if (hdmi_phy->conf->pll_default_off)
                hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
index 67b005d..2850693 100644 (file)
@@ -154,11 +154,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
                return PTR_ERR(mipi_tx->regs);
 
        ref_clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(ref_clk)) {
-               ret = PTR_ERR(ref_clk);
-               dev_err(dev, "Failed to get reference clock: %d\n", ret);
-               return ret;
-       }
+       if (IS_ERR(ref_clk))
+               return dev_err_probe(dev, PTR_ERR(ref_clk),
+                                    "Failed to get reference clock\n");
 
        ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
                                   &mipi_tx->mipitx_drive);
@@ -178,27 +176,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
 
        ret = of_property_read_string(dev->of_node, "clock-output-names",
                                      &clk_init.name);
-       if (ret < 0) {
-               dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(dev, ret, "Failed to read clock-output-names\n");
 
        clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
 
        mipi_tx->pll_hw.init = &clk_init;
        mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
-       if (IS_ERR(mipi_tx->pll)) {
-               ret = PTR_ERR(mipi_tx->pll);
-               dev_err(dev, "Failed to register PLL: %d\n", ret);
-               return ret;
-       }
+       if (IS_ERR(mipi_tx->pll))
+               return dev_err_probe(dev, PTR_ERR(mipi_tx->pll), "Failed to register PLL\n");
 
        phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
-       if (IS_ERR(phy)) {
-               ret = PTR_ERR(phy);
-               dev_err(dev, "Failed to create MIPI D-PHY: %d\n", ret);
-               return ret;
-       }
+       if (IS_ERR(phy))
+               return dev_err_probe(dev, PTR_ERR(phy), "Failed to create MIPI D-PHY\n");
+
        phy_set_drvdata(phy, mipi_tx);
 
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
index 6f3fe37..95c6dbb 100644 (file)
@@ -10,6 +10,7 @@
 #include<linux/module.h>
 #include<linux/gpio.h>
 #include<linux/gpio/consumer.h>
+#include <linux/mux/consumer.h>
 
 struct can_transceiver_data {
        u32 flags;
@@ -21,13 +22,22 @@ struct can_transceiver_phy {
        struct phy *generic_phy;
        struct gpio_desc *standby_gpio;
        struct gpio_desc *enable_gpio;
+       struct mux_state *mux_state;
 };
 
 /* Power on function */
 static int can_transceiver_phy_power_on(struct phy *phy)
 {
        struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy);
-
+       int ret;
+
+       if (can_transceiver_phy->mux_state) {
+               ret = mux_state_select(can_transceiver_phy->mux_state);
+               if (ret) {
+                       dev_err(&phy->dev, "Failed to select CAN mux: %d\n", ret);
+                       return ret;
+               }
+       }
        if (can_transceiver_phy->standby_gpio)
                gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 0);
        if (can_transceiver_phy->enable_gpio)
@@ -45,6 +55,8 @@ static int can_transceiver_phy_power_off(struct phy *phy)
                gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 1);
        if (can_transceiver_phy->enable_gpio)
                gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 0);
+       if (can_transceiver_phy->mux_state)
+               mux_state_deselect(can_transceiver_phy->mux_state);
 
        return 0;
 }
@@ -95,6 +107,16 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
        match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node);
        drvdata = match->data;
 
+       if (of_property_read_bool(dev->of_node, "mux-states")) {
+               struct mux_state *mux_state;
+
+               mux_state = devm_mux_state_get(dev, NULL);
+               if (IS_ERR(mux_state))
+                       return dev_err_probe(&pdev->dev, PTR_ERR(mux_state),
+                                            "failed to get mux\n");
+               can_transceiver_phy->mux_state = mux_state;
+       }
+
        phy = devm_phy_create(dev, dev->of_node,
                              &can_transceiver_phy_ops);
        if (IS_ERR(phy)) {
index 91e28d6..d93ddf1 100644 (file)
@@ -229,6 +229,17 @@ void phy_pm_runtime_forbid(struct phy *phy)
 }
 EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
 
+/**
+ * phy_init - phy internal initialization before phy operation
+ * @phy: the phy returned by phy_get()
+ *
+ * Used to allow phy's driver to perform phy internal initialization,
+ * such as PLL block powering, clock initialization or anything that's
+ * is required by the phy to perform the start of operation.
+ * Must be called before phy_power_on().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
 int phy_init(struct phy *phy)
 {
        int ret;
@@ -242,6 +253,9 @@ int phy_init(struct phy *phy)
        ret = 0; /* Override possible ret == -ENOTSUPP */
 
        mutex_lock(&phy->mutex);
+       if (phy->power_count > phy->init_count)
+               dev_warn(&phy->dev, "phy_power_on was called before phy_init\n");
+
        if (phy->init_count == 0 && phy->ops->init) {
                ret = phy->ops->init(phy);
                if (ret < 0) {
@@ -258,6 +272,14 @@ out:
 }
 EXPORT_SYMBOL_GPL(phy_init);
 
+/**
+ * phy_exit - Phy internal un-initialization
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called after phy_power_off().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
 int phy_exit(struct phy *phy)
 {
        int ret;
@@ -287,6 +309,14 @@ out:
 }
 EXPORT_SYMBOL_GPL(phy_exit);
 
+/**
+ * phy_power_on - Enable the phy and enter proper operation
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called after phy_init().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
 int phy_power_on(struct phy *phy)
 {
        int ret = 0;
@@ -329,6 +359,14 @@ out:
 }
 EXPORT_SYMBOL_GPL(phy_power_on);
 
+/**
+ * phy_power_off - Disable the phy.
+ * @phy: the phy returned by phy_get()
+ *
+ * Must be called before phy_exit().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
 int phy_power_off(struct phy *phy)
 {
        int ret;
@@ -432,7 +470,7 @@ EXPORT_SYMBOL_GPL(phy_reset);
  * runtime, which are otherwise lost after host controller reset and cannot
  * be applied in phy_init() or phy_power_on().
  *
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
  */
 int phy_calibrate(struct phy *phy)
 {
@@ -458,7 +496,7 @@ EXPORT_SYMBOL_GPL(phy_calibrate);
  * on the phy. The configuration will be applied on the current phy
  * mode, that can be changed using phy_set_mode().
  *
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
  */
 int phy_configure(struct phy *phy, union phy_configure_opts *opts)
 {
@@ -492,7 +530,7 @@ EXPORT_SYMBOL_GPL(phy_configure);
  * PHY, so calling it as many times as deemed fit will have no side
  * effect.
  *
- * Returns: 0 if successful, an negative error code otherwise
+ * Return: %0 if successful, a negative error code otherwise
  */
 int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
                 union phy_configure_opts *opts)
index b144ae1..c7309e9 100644 (file)
@@ -2535,6 +2535,50 @@ static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
        QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
 };
 
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_tx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+       QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+       QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+       QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0b),
+};
+
+static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
 static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
        QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
        QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
@@ -3177,7 +3221,7 @@ struct qmp_phy_combo_cfg {
  * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
  * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
  * @pcs_misc: iomapped memory space for lane's pcs_misc
- * @pipe_clk: pipe lock
+ * @pipe_clk: pipe clock
  * @index: lane index
  * @qmp: QMP phy to which this lane belongs
  * @lane_rst: lane's reset controller
@@ -4217,6 +4261,35 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
        .pwrdn_delay_max        = 1005,         /* us */
 };
 
+static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+       .type                   = PHY_TYPE_USB3,
+       .nlanes                 = 1,
+
+       .serdes_tbl             = sm8150_usb3_uniphy_serdes_tbl,
+       .serdes_tbl_num         = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+       .tx_tbl                 = sdx65_usb3_uniphy_tx_tbl,
+       .tx_tbl_num             = ARRAY_SIZE(sdx65_usb3_uniphy_tx_tbl),
+       .rx_tbl                 = sdx65_usb3_uniphy_rx_tbl,
+       .rx_tbl_num             = ARRAY_SIZE(sdx65_usb3_uniphy_rx_tbl),
+       .pcs_tbl                = sm8350_usb3_uniphy_pcs_tbl,
+       .pcs_tbl_num            = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl),
+       .clk_list               = qmp_v4_sdx55_usbphy_clk_l,
+       .num_clks               = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l),
+       .reset_list             = msm8996_usb3phy_reset_l,
+       .num_resets             = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+       .vreg_list              = qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+       .regs                   = sm8350_usb3_uniphy_regs_layout,
+
+       .start_ctrl             = SERDES_START | PCS_START,
+       .pwrdn_ctrl             = SW_PWRDN,
+       .phy_status             = PHYSTATUS,
+
+       .has_pwrdn_delay        = true,
+       .pwrdn_delay_min        = POWER_DOWN_DELAY_US_MIN,
+       .pwrdn_delay_max        = POWER_DOWN_DELAY_US_MAX,
+};
+
 static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
        .type                   = PHY_TYPE_UFS,
        .nlanes                 = 2,
@@ -5012,7 +5085,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
        ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
        if (ret) {
                dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
-               goto err_reg_enable;
+               goto err_unlock;
        }
 
        for (i = 0; i < cfg->num_resets; i++) {
@@ -5020,7 +5093,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
                if (ret) {
                        dev_err(qmp->dev, "%s reset assert failed\n",
                                cfg->reset_list[i]);
-                       goto err_rst_assert;
+                       goto err_disable_regulators;
                }
        }
 
@@ -5029,13 +5102,13 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
                if (ret) {
                        dev_err(qmp->dev, "%s reset deassert failed\n",
                                qphy->cfg->reset_list[i]);
-                       goto err_rst;
+                       goto err_assert_reset;
                }
        }
 
        ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
        if (ret)
-               goto err_rst;
+               goto err_assert_reset;
 
        if (cfg->has_phy_dp_com_ctrl) {
                qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
@@ -5077,12 +5150,12 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
 
        return 0;
 
-err_rst:
+err_assert_reset:
        while (++i < cfg->num_resets)
                reset_control_assert(qmp->resets[i]);
-err_rst_assert:
+err_disable_regulators:
        regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_reg_enable:
+err_unlock:
        mutex_unlock(&qmp->phy_mutex);
 
        return ret;
@@ -5188,14 +5261,14 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
                if (ret) {
                        dev_err(qmp->dev, "lane%d reset deassert failed\n",
                                qphy->index);
-                       goto err_lane_rst;
+                       return ret;
                }
        }
 
        ret = clk_prepare_enable(qphy->pipe_clk);
        if (ret) {
                dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
-               goto err_clk_enable;
+               goto err_reset_lane;
        }
 
        /* Tx, Rx, and PCS configurations */
@@ -5246,7 +5319,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
 
        ret = reset_control_deassert(qmp->ufs_reset);
        if (ret)
-               goto err_lane_rst;
+               goto err_disable_pipe_clk;
 
        qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
                               cfg->pcs_misc_tbl_num);
@@ -5285,17 +5358,17 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
                                         PHY_INIT_COMPLETE_TIMEOUT);
                if (ret) {
                        dev_err(qmp->dev, "phy initialization timed-out\n");
-                       goto err_pcs_ready;
+                       goto err_disable_pipe_clk;
                }
        }
        return 0;
 
-err_pcs_ready:
+err_disable_pipe_clk:
        clk_disable_unprepare(qphy->pipe_clk);
-err_clk_enable:
+err_reset_lane:
        if (cfg->has_lane_rst)
                reset_control_assert(qphy->lane_rst);
-err_lane_rst:
+
        return ret;
 }
 
@@ -5514,7 +5587,7 @@ static int qcom_qmp_phy_reset_init(struct device *dev, const struct qmp_phy_cfg
                struct reset_control *rst;
                const char *name = cfg->reset_list[i];
 
-               rst = devm_reset_control_get(dev, name);
+               rst = devm_reset_control_get_exclusive(dev, name);
                if (IS_ERR(rst)) {
                        dev_err(dev, "failed to get %s reset\n", name);
                        return PTR_ERR(rst);
@@ -5818,6 +5891,11 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
        .owner          = THIS_MODULE,
 };
 
+static void qcom_qmp_reset_control_put(void *data)
+{
+       reset_control_put(data);
+}
+
 static
 int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
                        void __iomem *serdes, const struct qmp_phy_cfg *cfg)
@@ -5890,7 +5968,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
         * all phys that don't need this.
         */
        snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
-       qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+       qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
        if (IS_ERR(qphy->pipe_clk)) {
                if (cfg->type == PHY_TYPE_PCIE ||
                    cfg->type == PHY_TYPE_USB3) {
@@ -5907,11 +5985,15 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
        /* Get lane reset, if any */
        if (cfg->has_lane_rst) {
                snprintf(prop_name, sizeof(prop_name), "lane%d", id);
-               qphy->lane_rst = of_reset_control_get(np, prop_name);
+               qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name);
                if (IS_ERR(qphy->lane_rst)) {
                        dev_err(dev, "failed to get lane%d reset\n", id);
                        return PTR_ERR(qphy->lane_rst);
                }
+               ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
+                                              qphy->lane_rst);
+               if (ret)
+                       return ret;
        }
 
        if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
@@ -6008,6 +6090,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
                .compatible = "qcom,sm6115-qmp-ufs-phy",
                .data = &sm6115_ufsphy_cfg,
        }, {
+               .compatible = "qcom,sm6350-qmp-ufs-phy",
+               .data = &sdm845_ufsphy_cfg,
+       }, {
                .compatible = "qcom,sm8150-qmp-ufs-phy",
                .data = &sm8150_ufsphy_cfg,
        }, {
@@ -6047,6 +6132,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
                .compatible = "qcom,sdx55-qmp-usb3-uni-phy",
                .data = &sdx55_usb3_uniphy_cfg,
        }, {
+               .compatible = "qcom,sdx65-qmp-usb3-uni-phy",
+               .data = &sdx65_usb3_uniphy_cfg,
+       }, {
                .compatible = "qcom,sm8350-qmp-usb3-phy",
                .data = &sm8350_usb3phy_cfg,
        }, {
index 4df9476..639452f 100644 (file)
@@ -327,7 +327,6 @@ static int rk_dphy_probe(struct platform_device *pdev)
        struct device_node *np = dev->of_node;
        const struct rk_dphy_drv_data *drv_data;
        struct phy_provider *phy_provider;
-       const struct of_device_id *of_id;
        struct rk_dphy *priv;
        struct phy *phy;
        unsigned int i;
@@ -347,11 +346,7 @@ static int rk_dphy_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       of_id = of_match_device(rk_dphy_dt_ids, dev);
-       if (!of_id)
-               return -EINVAL;
-
-       drv_data = of_id->data;
+       drv_data = of_device_get_match_data(dev);
        priv->drv_data = drv_data;
        priv->clks = devm_kcalloc(&pdev->dev, drv_data->num_clks,
                                  sizeof(*priv->clks), GFP_KERNEL);
index eca77e4..6711659 100644 (file)
@@ -116,11 +116,15 @@ struct rockchip_chg_det_reg {
  * @bvalid_det_en: vbus valid rise detection enable register.
  * @bvalid_det_st: vbus valid rise detection status register.
  * @bvalid_det_clr: vbus valid rise detection clear register.
+ * @id_det_en: id detection enable register.
+ * @id_det_st: id detection state register.
+ * @id_det_clr: id detection clear register.
  * @ls_det_en: linestate detection enable register.
  * @ls_det_st: linestate detection state register.
  * @ls_det_clr: linestate detection clear register.
  * @utmi_avalid: utmi vbus avalid status register.
  * @utmi_bvalid: utmi vbus bvalid status register.
+ * @utmi_id: utmi id state register.
  * @utmi_ls: utmi linestate state register.
  * @utmi_hstdet: utmi host disconnect register.
  */
@@ -129,11 +133,15 @@ struct rockchip_usb2phy_port_cfg {
        struct usb2phy_reg      bvalid_det_en;
        struct usb2phy_reg      bvalid_det_st;
        struct usb2phy_reg      bvalid_det_clr;
+       struct usb2phy_reg      id_det_en;
+       struct usb2phy_reg      id_det_st;
+       struct usb2phy_reg      id_det_clr;
        struct usb2phy_reg      ls_det_en;
        struct usb2phy_reg      ls_det_st;
        struct usb2phy_reg      ls_det_clr;
        struct usb2phy_reg      utmi_avalid;
        struct usb2phy_reg      utmi_bvalid;
+       struct usb2phy_reg      utmi_id;
        struct usb2phy_reg      utmi_ls;
        struct usb2phy_reg      utmi_hstdet;
 };
@@ -161,6 +169,7 @@ struct rockchip_usb2phy_cfg {
  * @suspended: phy suspended flag.
  * @vbus_attached: otg device vbus status.
  * @bvalid_irq: IRQ number assigned for vbus valid rise detection.
+ * @id_irq: IRQ number assigned for ID pin detection.
  * @ls_irq: IRQ number assigned for linestate detection.
  * @otg_mux_irq: IRQ number which multiplex otg-id/otg-bvalid/linestate
  *              irqs to one irq in otg-port.
@@ -179,6 +188,7 @@ struct rockchip_usb2phy_port {
        bool            suspended;
        bool            vbus_attached;
        int             bvalid_irq;
+       int             id_irq;
        int             ls_irq;
        int             otg_mux_irq;
        struct mutex    mutex;
@@ -253,7 +263,7 @@ static inline bool property_enabled(struct regmap *base,
                return false;
 
        tmp = (orig & mask) >> reg->bitstart;
-       return tmp == reg->enable;
+       return tmp != reg->disable;
 }
 
 static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
@@ -419,6 +429,19 @@ static int rockchip_usb2phy_init(struct phy *phy)
                        if (ret)
                                goto out;
 
+                       /* clear id status and enable id detect irq */
+                       ret = property_enable(rphy->grf,
+                                             &rport->port_cfg->id_det_clr,
+                                             true);
+                       if (ret)
+                               goto out;
+
+                       ret = property_enable(rphy->grf,
+                                             &rport->port_cfg->id_det_en,
+                                             true);
+                       if (ret)
+                               goto out;
+
                        schedule_delayed_work(&rport->otg_sm_work,
                                              OTG_SCHEDULE_DELAY * 3);
                } else {
@@ -905,27 +928,40 @@ static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data)
        if (!property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st))
                return IRQ_NONE;
 
-       mutex_lock(&rport->mutex);
-
        /* clear bvalid detect irq pending status */
        property_enable(rphy->grf, &rport->port_cfg->bvalid_det_clr, true);
 
-       mutex_unlock(&rport->mutex);
-
        rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
 
        return IRQ_HANDLED;
 }
 
-static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
+static irqreturn_t rockchip_usb2phy_id_irq(int irq, void *data)
 {
        struct rockchip_usb2phy_port *rport = data;
        struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+       bool id;
 
-       if (property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st))
-               return rockchip_usb2phy_bvalid_irq(irq, data);
-       else
+       if (!property_enabled(rphy->grf, &rport->port_cfg->id_det_st))
                return IRQ_NONE;
+
+       /* clear id detect irq pending status */
+       property_enable(rphy->grf, &rport->port_cfg->id_det_clr, true);
+
+       id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
+       extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
+{
+       irqreturn_t ret = IRQ_NONE;
+
+       ret |= rockchip_usb2phy_bvalid_irq(irq, data);
+       ret |= rockchip_usb2phy_id_irq(irq, data);
+
+       return ret;
 }
 
 static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
@@ -940,8 +976,14 @@ static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
                if (!rport->phy)
                        continue;
 
-               /* Handle linestate irq for both otg port and host port */
-               ret = rockchip_usb2phy_linestate_irq(irq, rport);
+               switch (rport->port_id) {
+               case USB2PHY_PORT_OTG:
+                       ret |= rockchip_usb2phy_otg_mux_irq(irq, rport);
+                       break;
+               case USB2PHY_PORT_HOST:
+                       ret |= rockchip_usb2phy_linestate_irq(irq, rport);
+                       break;
+               }
        }
 
        return ret;
@@ -1015,6 +1057,25 @@ static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy,
                                        "failed to request otg-bvalid irq handle\n");
                                return ret;
                        }
+
+                       rport->id_irq = of_irq_get_byname(child_np, "otg-id");
+                       if (rport->id_irq < 0) {
+                               dev_err(rphy->dev, "no otg-id irq provided\n");
+                               ret = rport->id_irq;
+                               return ret;
+                       }
+
+                       ret = devm_request_threaded_irq(rphy->dev, rport->id_irq,
+                                                       NULL,
+                                                       rockchip_usb2phy_id_irq,
+                                                       IRQF_ONESHOT,
+                                                       "rockchip_usb2phy_id",
+                                                       rport);
+                       if (ret) {
+                               dev_err(rphy->dev,
+                                       "failed to request otg-id irq handle\n");
+                               return ret;
+                       }
                }
                break;
        default:
@@ -1139,8 +1200,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
 
        else {
                rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
-                       if (IS_ERR(rphy->grf))
-                               return PTR_ERR(rphy->grf);
+               if (IS_ERR(rphy->grf))
+                       return PTR_ERR(rphy->grf);
        }
 
        if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) {
@@ -1289,10 +1350,14 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
                                .bvalid_det_en  = { 0x0680, 3, 3, 0, 1 },
                                .bvalid_det_st  = { 0x0690, 3, 3, 0, 1 },
                                .bvalid_det_clr = { 0x06a0, 3, 3, 0, 1 },
+                               .id_det_en      = { 0x0680, 6, 5, 0, 3 },
+                               .id_det_st      = { 0x0690, 6, 5, 0, 3 },
+                               .id_det_clr     = { 0x06a0, 6, 5, 0, 3 },
                                .ls_det_en      = { 0x0680, 2, 2, 0, 1 },
                                .ls_det_st      = { 0x0690, 2, 2, 0, 1 },
                                .ls_det_clr     = { 0x06a0, 2, 2, 0, 1 },
                                .utmi_bvalid    = { 0x0480, 4, 4, 0, 1 },
+                               .utmi_id        = { 0x0480, 1, 1, 0, 1 },
                                .utmi_ls        = { 0x0480, 3, 2, 0, 1 },
                        },
                        [USB2PHY_PORT_HOST] = {
@@ -1345,14 +1410,18 @@ static const struct rockchip_usb2phy_cfg rk3308_phy_cfgs[] = {
                .port_cfgs      = {
                        [USB2PHY_PORT_OTG] = {
                                .phy_sus        = { 0x0100, 8, 0, 0, 0x1d1 },
-                               .bvalid_det_en  = { 0x3020, 2, 2, 0, 1 },
-                               .bvalid_det_st  = { 0x3024, 2, 2, 0, 1 },
-                               .bvalid_det_clr = { 0x3028, 2, 2, 0, 1 },
+                               .bvalid_det_en  = { 0x3020, 3, 2, 0, 3 },
+                               .bvalid_det_st  = { 0x3024, 3, 2, 0, 3 },
+                               .bvalid_det_clr = { 0x3028, 3, 2, 0, 3 },
+                               .id_det_en      = { 0x3020, 5, 4, 0, 3 },
+                               .id_det_st      = { 0x3024, 5, 4, 0, 3 },
+                               .id_det_clr     = { 0x3028, 5, 4, 0, 3 },
                                .ls_det_en      = { 0x3020, 0, 0, 0, 1 },
                                .ls_det_st      = { 0x3024, 0, 0, 0, 1 },
                                .ls_det_clr     = { 0x3028, 0, 0, 0, 1 },
                                .utmi_avalid    = { 0x0120, 10, 10, 0, 1 },
                                .utmi_bvalid    = { 0x0120, 9, 9, 0, 1 },
+                               .utmi_id        = { 0x0120, 6, 6, 0, 1 },
                                .utmi_ls        = { 0x0120, 5, 4, 0, 1 },
                        },
                        [USB2PHY_PORT_HOST] = {
@@ -1388,14 +1457,18 @@ static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
                .port_cfgs      = {
                        [USB2PHY_PORT_OTG] = {
                                .phy_sus        = { 0x0100, 15, 0, 0, 0x1d1 },
-                               .bvalid_det_en  = { 0x0110, 2, 2, 0, 1 },
-                               .bvalid_det_st  = { 0x0114, 2, 2, 0, 1 },
-                               .bvalid_det_clr = { 0x0118, 2, 2, 0, 1 },
+                               .bvalid_det_en  = { 0x0110, 3, 2, 0, 3 },
+                               .bvalid_det_st  = { 0x0114, 3, 2, 0, 3 },
+                               .bvalid_det_clr = { 0x0118, 3, 2, 0, 3 },
+                               .id_det_en      = { 0x0110, 5, 4, 0, 3 },
+                               .id_det_st      = { 0x0114, 5, 4, 0, 3 },
+                               .id_det_clr     = { 0x0118, 5, 4, 0, 3 },
                                .ls_det_en      = { 0x0110, 0, 0, 0, 1 },
                                .ls_det_st      = { 0x0114, 0, 0, 0, 1 },
                                .ls_det_clr     = { 0x0118, 0, 0, 0, 1 },
                                .utmi_avalid    = { 0x0120, 10, 10, 0, 1 },
                                .utmi_bvalid    = { 0x0120, 9, 9, 0, 1 },
+                               .utmi_id        = { 0x0120, 6, 6, 0, 1 },
                                .utmi_ls        = { 0x0120, 5, 4, 0, 1 },
                        },
                        [USB2PHY_PORT_HOST] = {
@@ -1453,8 +1526,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
                                .bvalid_det_en  = { 0xe3c0, 3, 3, 0, 1 },
                                .bvalid_det_st  = { 0xe3e0, 3, 3, 0, 1 },
                                .bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 },
+                               .id_det_en      = { 0xe3c0, 5, 4, 0, 3 },
+                               .id_det_st      = { 0xe3e0, 5, 4, 0, 3 },
+                               .id_det_clr     = { 0xe3d0, 5, 4, 0, 3 },
                                .utmi_avalid    = { 0xe2ac, 7, 7, 0, 1 },
                                .utmi_bvalid    = { 0xe2ac, 12, 12, 0, 1 },
+                               .utmi_id        = { 0xe2ac, 8, 8, 0, 1 },
                        },
                        [USB2PHY_PORT_HOST] = {
                                .phy_sus        = { 0xe458, 1, 0, 0x2, 0x1 },
@@ -1488,8 +1565,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
                                .bvalid_det_en  = { 0xe3c0, 8, 8, 0, 1 },
                                .bvalid_det_st  = { 0xe3e0, 8, 8, 0, 1 },
                                .bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
+                               .id_det_en      = { 0xe3c0, 10, 9, 0, 3 },
+                               .id_det_st      = { 0xe3e0, 10, 9, 0, 3 },
+                               .id_det_clr     = { 0xe3d0, 10, 9, 0, 3 },
                                .utmi_avalid    = { 0xe2ac, 10, 10, 0, 1 },
                                .utmi_bvalid    = { 0xe2ac, 16, 16, 0, 1 },
+                               .utmi_id        = { 0xe2ac, 11, 11, 0, 1 },
                        },
                        [USB2PHY_PORT_HOST] = {
                                .phy_sus        = { 0xe468, 1, 0, 0x2, 0x1 },
@@ -1512,11 +1593,15 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
                .port_cfgs      = {
                        [USB2PHY_PORT_OTG] = {
                                .phy_sus        = { 0x0000, 8, 0, 0, 0x1d1 },
-                               .bvalid_det_en  = { 0x0080, 2, 2, 0, 1 },
-                               .bvalid_det_st  = { 0x0084, 2, 2, 0, 1 },
-                               .bvalid_det_clr = { 0x0088, 2, 2, 0, 1 },
+                               .bvalid_det_en  = { 0x0080, 3, 2, 0, 3 },
+                               .bvalid_det_st  = { 0x0084, 3, 2, 0, 3 },
+                               .bvalid_det_clr = { 0x0088, 3, 2, 0, 3 },
+                               .id_det_en      = { 0x0080, 5, 4, 0, 3 },
+                               .id_det_st      = { 0x0084, 5, 4, 0, 3 },
+                               .id_det_clr     = { 0x0088, 5, 4, 0, 3 },
                                .utmi_avalid    = { 0x00c0, 10, 10, 0, 1 },
                                .utmi_bvalid    = { 0x00c0, 9, 9, 0, 1 },
+                               .utmi_id        = { 0x00c0, 6, 6, 0, 1 },
                        },
                        [USB2PHY_PORT_HOST] = {
                                /* Select suspend control from controller */
index d2bbdc9..d76440a 100644 (file)
@@ -1105,15 +1105,14 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
        struct phy_provider *phy_provider;
        struct resource *res;
        const struct rockchip_usb3phy_port_cfg *phy_cfgs;
-       const struct of_device_id *match;
        int index, ret;
 
        tcphy = devm_kzalloc(dev, sizeof(*tcphy), GFP_KERNEL);
        if (!tcphy)
                return -ENOMEM;
 
-       match = of_match_device(dev->driver->of_match_table, dev);
-       if (!match || !match->data) {
+       phy_cfgs = of_device_get_match_data(dev);
+       if (!phy_cfgs) {
                dev_err(dev, "phy configs are not assigned!\n");
                return -EINVAL;
        }
@@ -1123,7 +1122,6 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
        if (IS_ERR(tcphy->base))
                return PTR_ERR(tcphy->base);
 
-       phy_cfgs = match->data;
        /* find out a proper config which can be matched with dt. */
        index = 0;
        while (phy_cfgs[index].reg) {
index 386389f..d8c5f91 100644 (file)
@@ -55,55 +55,6 @@ out:
 static int nr_packages;
 static struct device *cpu_hwmon_dev;
 
-static SENSOR_DEVICE_ATTR(name, 0444, NULL, NULL, 0);
-
-static struct attribute *cpu_hwmon_attributes[] = {
-       &sensor_dev_attr_name.dev_attr.attr,
-       NULL
-};
-
-/* Hwmon device attribute group */
-static struct attribute_group cpu_hwmon_attribute_group = {
-       .attrs = cpu_hwmon_attributes,
-};
-
-static ssize_t get_cpu_temp(struct device *dev,
-                       struct device_attribute *attr, char *buf);
-static ssize_t cpu_temp_label(struct device *dev,
-                       struct device_attribute *attr, char *buf);
-
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
-
-static const struct attribute *hwmon_cputemp[4][3] = {
-       {
-               &sensor_dev_attr_temp1_input.dev_attr.attr,
-               &sensor_dev_attr_temp1_label.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_temp2_input.dev_attr.attr,
-               &sensor_dev_attr_temp2_label.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_temp3_input.dev_attr.attr,
-               &sensor_dev_attr_temp3_label.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_temp4_input.dev_attr.attr,
-               &sensor_dev_attr_temp4_label.dev_attr.attr,
-               NULL
-       }
-};
-
 static ssize_t cpu_temp_label(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
@@ -121,24 +72,47 @@ static ssize_t get_cpu_temp(struct device *dev,
        return sprintf(buf, "%d\n", value);
 }
 
-static int create_sysfs_cputemp_files(struct kobject *kobj)
-{
-       int i, ret = 0;
-
-       for (i = 0; i < nr_packages; i++)
-               ret = sysfs_create_files(kobj, hwmon_cputemp[i]);
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
 
-       return ret;
-}
+static struct attribute *cpu_hwmon_attributes[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_temp1_label.dev_attr.attr,
+       &sensor_dev_attr_temp2_input.dev_attr.attr,
+       &sensor_dev_attr_temp2_label.dev_attr.attr,
+       &sensor_dev_attr_temp3_input.dev_attr.attr,
+       &sensor_dev_attr_temp3_label.dev_attr.attr,
+       &sensor_dev_attr_temp4_input.dev_attr.attr,
+       &sensor_dev_attr_temp4_label.dev_attr.attr,
+       NULL
+};
 
-static void remove_sysfs_cputemp_files(struct kobject *kobj)
+static umode_t cpu_hwmon_is_visible(struct kobject *kobj,
+                                   struct attribute *attr, int i)
 {
-       int i;
+       int id = i / 2;
 
-       for (i = 0; i < nr_packages; i++)
-               sysfs_remove_files(kobj, hwmon_cputemp[i]);
+       if (id < nr_packages)
+               return attr->mode;
+       return 0;
 }
 
+static struct attribute_group cpu_hwmon_group = {
+       .attrs = cpu_hwmon_attributes,
+       .is_visible = cpu_hwmon_is_visible,
+};
+
+static const struct attribute_group *cpu_hwmon_groups[] = {
+       &cpu_hwmon_group,
+       NULL
+};
+
 #define CPU_THERMAL_THRESHOLD 90000
 static struct delayed_work thermal_work;
 
@@ -159,50 +133,31 @@ static void do_thermal_timer(struct work_struct *work)
 
 static int __init loongson_hwmon_init(void)
 {
-       int ret;
-
        pr_info("Loongson Hwmon Enter...\n");
 
        if (cpu_has_csr())
                csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
                                  LOONGSON_CSRF_TEMP;
 
-       cpu_hwmon_dev = hwmon_device_register_with_info(NULL, "cpu_hwmon", NULL, NULL, NULL);
-       if (IS_ERR(cpu_hwmon_dev)) {
-               ret = PTR_ERR(cpu_hwmon_dev);
-               pr_err("hwmon_device_register fail!\n");
-               goto fail_hwmon_device_register;
-       }
-
        nr_packages = loongson_sysconf.nr_cpus /
                loongson_sysconf.cores_per_package;
 
-       ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
-       if (ret) {
-               pr_err("fail to create cpu temperature interface!\n");
-               goto fail_create_sysfs_cputemp_files;
+       cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon",
+                                                         NULL, cpu_hwmon_groups);
+       if (IS_ERR(cpu_hwmon_dev)) {
+               pr_err("hwmon_device_register fail!\n");
+               return PTR_ERR(cpu_hwmon_dev);
        }
 
        INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
        schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
 
-       return ret;
-
-fail_create_sysfs_cputemp_files:
-       sysfs_remove_group(&cpu_hwmon_dev->kobj,
-                               &cpu_hwmon_attribute_group);
-       hwmon_device_unregister(cpu_hwmon_dev);
-
-fail_hwmon_device_register:
-       return ret;
+       return 0;
 }
 
 static void __exit loongson_hwmon_exit(void)
 {
        cancel_delayed_work_sync(&thermal_work);
-       remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
-       sysfs_remove_group(&cpu_hwmon_dev->kobj,
-                               &cpu_hwmon_attribute_group);
        hwmon_device_unregister(cpu_hwmon_dev);
 }
 
index 97ac588..ec8a404 100644 (file)
@@ -3037,13 +3037,6 @@ static int ab8500_fg_bind(struct device *dev, struct device *master,
 {
        struct ab8500_fg *di = dev_get_drvdata(dev);
 
-       /* Create a work queue for running the FG algorithm */
-       di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
-       if (di->fg_wq == NULL) {
-               dev_err(dev, "failed to create work queue\n");
-               return -ENOMEM;
-       }
-
        di->bat_cap.max_mah_design = di->bm->bi->charge_full_design_uah;
        di->bat_cap.max_mah = di->bat_cap.max_mah_design;
        di->vbat_nom_uv = di->bm->bi->voltage_max_design_uv;
@@ -3067,8 +3060,7 @@ static void ab8500_fg_unbind(struct device *dev, struct device *master,
        if (ret)
                dev_err(dev, "failed to disable coulomb counter\n");
 
-       destroy_workqueue(di->fg_wq);
-       flush_scheduled_work();
+       flush_workqueue(di->fg_wq);
 }
 
 static const struct component_ops ab8500_fg_component_ops = {
@@ -3117,6 +3109,13 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
        ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
 
+       /* Create a work queue for running the FG algorithm */
+       di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
+       if (di->fg_wq == NULL) {
+               dev_err(dev, "failed to create work queue\n");
+               return -ENOMEM;
+       }
+
        /* Init work for running the fg algorithm instantly */
        INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
 
@@ -3227,6 +3226,8 @@ static int ab8500_fg_remove(struct platform_device *pdev)
 {
        struct ab8500_fg *di = platform_get_drvdata(pdev);
 
+       destroy_workqueue(di->fg_wq);
+       flush_scheduled_work();
        component_del(&pdev->dev, &ab8500_fg_component_ops);
        list_del(&di->node);
        ab8500_fg_sysfs_exit(di);
index 19746e6..15219ed 100644 (file)
@@ -865,17 +865,20 @@ static int axp288_charger_probe(struct platform_device *pdev)
        info->regmap_irqc = axp20x->regmap_irqc;
 
        info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
-       if (info->cable.edev == NULL) {
-               dev_dbg(dev, "%s is not ready, probe deferred\n",
-                       AXP288_EXTCON_DEV_NAME);
-               return -EPROBE_DEFER;
+       if (IS_ERR(info->cable.edev)) {
+               dev_err_probe(dev, PTR_ERR(info->cable.edev),
+                             "extcon_get_extcon_dev(%s) failed\n",
+                             AXP288_EXTCON_DEV_NAME);
+               return PTR_ERR(info->cable.edev);
        }
 
        if (acpi_dev_present(USB_HOST_EXTCON_HID, NULL, -1)) {
                info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_NAME);
-               if (info->otg.cable == NULL) {
-                       dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
-                       return -EPROBE_DEFER;
+               if (IS_ERR(info->otg.cable)) {
+                       dev_err_probe(dev, PTR_ERR(info->otg.cable),
+                                     "extcon_get_extcon_dev(%s) failed\n",
+                                     USB_HOST_EXTCON_NAME);
+                       return PTR_ERR(info->otg.cable);
                }
                dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
        }
index e9f285d..8e6f8a6 100644 (file)
@@ -90,6 +90,8 @@
 #define AXP288_REG_UPDATE_INTERVAL             (60 * HZ)
 #define AXP288_FG_INTR_NUM                     6
 
+#define AXP288_QUIRK_NO_BATTERY                        BIT(0)
+
 static bool no_current_sense_res;
 module_param(no_current_sense_res, bool, 0444);
 MODULE_PARM_DESC(no_current_sense_res, "No (or broken) current sense resistor");
@@ -524,7 +526,7 @@ static struct power_supply_desc fuel_gauge_desc = {
  * detection reports one despite it not being there.
  * Please keep this listed sorted alphabetically.
  */
-static const struct dmi_system_id axp288_no_battery_list[] = {
+static const struct dmi_system_id axp288_quirks[] = {
        {
                /* ACEPC T8 Cherry Trail Z8350 mini PC */
                .matches = {
@@ -534,6 +536,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
                        /* also match on somewhat unique bios-version */
                        DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
                },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {
                /* ACEPC T11 Cherry Trail Z8350 mini PC */
@@ -544,6 +547,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
                        /* also match on somewhat unique bios-version */
                        DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
                },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {
                /* Intel Cherry Trail Compute Stick, Windows version */
@@ -551,6 +555,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
                },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {
                /* Intel Cherry Trail Compute Stick, version without an OS */
@@ -558,34 +563,54 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
                },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {
                /* Meegopad T02 */
                .matches = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "MEEGOPAD T02"),
                },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {       /* Mele PCG03 Mini PC */
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "Mini PC"),
                },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {
                /* Minix Neo Z83-4 mini PC */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
-               }
+               },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {
-               /* Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks */
+               /*
+                * One Mix 1, this uses the "T3 MRD" boardname used by
+                * generic mini PCs, but it is a mini laptop so it does
+                * actually have a battery!
+                */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+                       DMI_MATCH(DMI_BIOS_DATE, "06/14/2018"),
+               },
+               .driver_data = NULL,
+       },
+       {
+               /*
+                * Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks
+                * This entry must be last because it is generic, this allows
+                * adding more specifuc quirks overriding this generic entry.
+                */
                .matches = {
                        DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
                        DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
                        DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
-                       DMI_MATCH(DMI_BIOS_VERSION, "5.11"),
                },
+               .driver_data = (void *)AXP288_QUIRK_NO_BATTERY,
        },
        {}
 };
@@ -665,7 +690,9 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                [BAT_D_CURR] = "axp288-chrg-d-curr",
                [BAT_VOLT] = "axp288-batt-volt",
        };
+       const struct dmi_system_id *dmi_id;
        struct device *dev = &pdev->dev;
+       unsigned long quirks = 0;
        int i, pirq, ret;
 
        /*
@@ -675,7 +702,11 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        if (!acpi_quirk_skip_acpi_ac_and_battery())
                return -ENODEV;
 
-       if (dmi_check_system(axp288_no_battery_list))
+       dmi_id = dmi_first_match(axp288_quirks);
+       if (dmi_id)
+               quirks = (unsigned long)dmi_id->driver_data;
+
+       if (quirks & AXP288_QUIRK_NO_BATTERY)
                return -ENODEV;
 
        info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
index aa1a589..27f5c76 100644 (file)
@@ -455,11 +455,9 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
        if (!info)
                return -EINVAL;
 
-       ret = pm_runtime_get_sync(bdi->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0)
                return ret;
-       }
 
        ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
        if (ret)
@@ -490,11 +488,9 @@ static ssize_t bq24190_sysfs_store(struct device *dev,
        if (ret < 0)
                return ret;
 
-       ret = pm_runtime_get_sync(bdi->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0)
                return ret;
-       }
 
        ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v);
        if (ret)
@@ -512,10 +508,9 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
        union power_supply_propval val = { .intval = bdi->charge_type };
        int ret;
 
-       ret = pm_runtime_get_sync(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
        if (ret < 0) {
                dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
-               pm_runtime_put_noidle(bdi->dev);
                return ret;
        }
 
@@ -551,10 +546,9 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
        int ret;
        u8 val;
 
-       ret = pm_runtime_get_sync(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
        if (ret < 0) {
                dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
-               pm_runtime_put_noidle(bdi->dev);
                return ret;
        }
 
@@ -1128,11 +1122,9 @@ static int bq24190_charger_get_property(struct power_supply *psy,
 
        dev_dbg(bdi->dev, "prop: %d\n", psp);
 
-       ret = pm_runtime_get_sync(bdi->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0)
                return ret;
-       }
 
        switch (psp) {
        case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -1204,11 +1196,9 @@ static int bq24190_charger_set_property(struct power_supply *psy,
 
        dev_dbg(bdi->dev, "prop: %d\n", psp);
 
-       ret = pm_runtime_get_sync(bdi->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0)
                return ret;
-       }
 
        switch (psp) {
        case POWER_SUPPLY_PROP_ONLINE:
@@ -1477,11 +1467,9 @@ static int bq24190_battery_get_property(struct power_supply *psy,
        dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n");
        dev_dbg(bdi->dev, "prop: %d\n", psp);
 
-       ret = pm_runtime_get_sync(bdi->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0)
                return ret;
-       }
 
        switch (psp) {
        case POWER_SUPPLY_PROP_STATUS:
@@ -1525,11 +1513,9 @@ static int bq24190_battery_set_property(struct power_supply *psy,
        dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n");
        dev_dbg(bdi->dev, "prop: %d\n", psp);
 
-       ret = pm_runtime_get_sync(bdi->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(bdi->dev);
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0)
                return ret;
-       }
 
        switch (psp) {
        case POWER_SUPPLY_PROP_ONLINE:
@@ -1683,10 +1669,9 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
        int error;
 
        bdi->irq_event = true;
-       error = pm_runtime_get_sync(bdi->dev);
+       error = pm_runtime_resume_and_get(bdi->dev);
        if (error < 0) {
                dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
-               pm_runtime_put_noidle(bdi->dev);
                return IRQ_NONE;
        }
        bq24190_check_status(bdi);
@@ -1921,11 +1906,9 @@ static int bq24190_remove(struct i2c_client *client)
        struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
        int error;
 
-       error = pm_runtime_get_sync(bdi->dev);
-       if (error < 0) {
+       error = pm_runtime_resume_and_get(bdi->dev);
+       if (error < 0)
                dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
-               pm_runtime_put_noidle(bdi->dev);
-       }
 
        bq24190_register_reset(bdi);
        if (bdi->battery)
@@ -1982,11 +1965,9 @@ static __maybe_unused int bq24190_pm_suspend(struct device *dev)
        struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
        int error;
 
-       error = pm_runtime_get_sync(bdi->dev);
-       if (error < 0) {
+       error = pm_runtime_resume_and_get(bdi->dev);
+       if (error < 0)
                dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
-               pm_runtime_put_noidle(bdi->dev);
-       }
 
        bq24190_register_reset(bdi);
 
@@ -2007,11 +1988,9 @@ static __maybe_unused int bq24190_pm_resume(struct device *dev)
        bdi->f_reg = 0;
        bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
 
-       error = pm_runtime_get_sync(bdi->dev);
-       if (error < 0) {
+       error = pm_runtime_resume_and_get(bdi->dev);
+       if (error < 0)
                dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
-               pm_runtime_put_noidle(bdi->dev);
-       }
 
        bq24190_register_reset(bdi);
        bq24190_set_config(bdi);
index 72e727c..35e6a39 100644 (file)
@@ -1572,14 +1572,6 @@ static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg)
  */
 static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di)
 {
-       int flags;
-
-       if (di->opts & BQ27XXX_O_ZERO) {
-               flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true);
-               if (flags >= 0 && (flags & BQ27000_FLAG_CI))
-                       return -ENODATA;
-       }
-
        return bq27xxx_battery_read_charge(di, BQ27XXX_REG_NAC);
 }
 
@@ -1742,6 +1734,18 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags)
                return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF);
 }
 
+/*
+ * Returns true if reported battery capacity is inaccurate
+ */
+static bool bq27xxx_battery_capacity_inaccurate(struct bq27xxx_device_info *di,
+                                                u16 flags)
+{
+       if (di->opts & BQ27XXX_O_HAS_CI)
+               return (flags & BQ27000_FLAG_CI);
+       else
+               return false;
+}
+
 static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
 {
        /* Unlikely but important to return first */
@@ -1751,6 +1755,8 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
                return POWER_SUPPLY_HEALTH_COLD;
        if (unlikely(bq27xxx_battery_dead(di, di->cache.flags)))
                return POWER_SUPPLY_HEALTH_DEAD;
+       if (unlikely(bq27xxx_battery_capacity_inaccurate(di, di->cache.flags)))
+               return POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED;
 
        return POWER_SUPPLY_HEALTH_GOOD;
 }
@@ -1758,7 +1764,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
 void bq27xxx_battery_update(struct bq27xxx_device_info *di)
 {
        struct bq27xxx_reg_cache cache = {0, };
-       bool has_ci_flag = di->opts & BQ27XXX_O_HAS_CI;
        bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
 
        cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
@@ -1766,30 +1771,19 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
                cache.flags = -1; /* read error */
        if (cache.flags >= 0) {
                cache.temperature = bq27xxx_battery_read_temperature(di);
-               if (has_ci_flag && (cache.flags & BQ27000_FLAG_CI)) {
-                       dev_info_once(di->dev, "battery is not calibrated! ignoring capacity values\n");
-                       cache.capacity = -ENODATA;
-                       cache.energy = -ENODATA;
-                       cache.time_to_empty = -ENODATA;
-                       cache.time_to_empty_avg = -ENODATA;
-                       cache.time_to_full = -ENODATA;
-                       cache.charge_full = -ENODATA;
-                       cache.health = -ENODATA;
-               } else {
-                       if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
-                               cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
-                       if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
-                               cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
-                       if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
-                               cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
-
-                       cache.charge_full = bq27xxx_battery_read_fcc(di);
-                       cache.capacity = bq27xxx_battery_read_soc(di);
-                       if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
-                               cache.energy = bq27xxx_battery_read_energy(di);
-                       di->cache.flags = cache.flags;
-                       cache.health = bq27xxx_battery_read_health(di);
-               }
+               if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+                       cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+               if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+                       cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+               if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+                       cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+
+               cache.charge_full = bq27xxx_battery_read_fcc(di);
+               cache.capacity = bq27xxx_battery_read_soc(di);
+               if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+                       cache.energy = bq27xxx_battery_read_energy(di);
+               di->cache.flags = cache.flags;
+               cache.health = bq27xxx_battery_read_health(di);
                if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
                        cache.cycle_count = bq27xxx_battery_read_cyct(di);
 
index d67edb7..92db794 100644 (file)
@@ -985,13 +985,10 @@ static int charger_extcon_init(struct charger_manager *cm,
        cable->nb.notifier_call = charger_extcon_notifier;
 
        cable->extcon_dev = extcon_get_extcon_dev(cable->extcon_name);
-       if (IS_ERR_OR_NULL(cable->extcon_dev)) {
+       if (IS_ERR(cable->extcon_dev)) {
                pr_err("Cannot find extcon_dev for %s (cable: %s)\n",
                        cable->extcon_name, cable->name);
-               if (cable->extcon_dev == NULL)
-                       return -EPROBE_DEFER;
-               else
-                       return PTR_ERR(cable->extcon_dev);
+               return PTR_ERR(cable->extcon_dev);
        }
 
        for (i = 0; i < ARRAY_SIZE(extcon_mapping); i++) {
index 127c73b..1ec3535 100644 (file)
@@ -242,10 +242,10 @@ static int max8997_battery_probe(struct platform_device *pdev)
                dev_info(&pdev->dev, "couldn't get charger regulator\n");
        }
        charger->edev = extcon_get_extcon_dev("max8997-muic");
-       if (IS_ERR_OR_NULL(charger->edev)) {
-               if (!charger->edev)
-                       return -EPROBE_DEFER;
-               dev_info(charger->dev, "couldn't get extcon device\n");
+       if (IS_ERR(charger->edev)) {
+               dev_err_probe(charger->dev, PTR_ERR(charger->edev),
+                             "couldn't get extcon device: max8997-muic\n");
+               return PTR_ERR(charger->edev);
        }
 
        if (!IS_ERR(charger->reg) && !IS_ERR_OR_NULL(charger->edev)) {
index d925cb1..fad5890 100644 (file)
@@ -616,7 +616,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
                goto out_put_node;
        }
 
-       info = devm_kmalloc(&psy->dev, sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&psy->dev, sizeof(*info), GFP_KERNEL);
        if (!info) {
                err = -ENOMEM;
                goto out_put_node;
index 32cc31c..73d4aca 100644 (file)
 #include <linux/delay.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <asm/mach-types.h>
-#include <mach/tosa.h>
 
 static DEFINE_MUTEX(bat_lock); /* protects gpio pins */
 static struct work_struct bat_work;
@@ -28,22 +27,23 @@ struct tosa_bat {
        struct mutex work_lock; /* protects data */
 
        bool (*is_present)(struct tosa_bat *bat);
-       int gpio_full;
-       int gpio_charge_off;
+       struct gpio_desc *gpiod_full;
+       struct gpio_desc *gpiod_charge_off;
 
        int technology;
 
-       int gpio_bat;
+       struct gpio_desc *gpiod_bat;
        int adc_bat;
        int adc_bat_divider;
        int bat_max;
        int bat_min;
 
-       int gpio_temp;
+       struct gpio_desc *gpiod_temp;
        int adc_temp;
        int adc_temp_divider;
 };
 
+static struct gpio_desc *jacket_detect;
 static struct tosa_bat tosa_bat_main;
 static struct tosa_bat tosa_bat_jacket;
 
@@ -51,15 +51,15 @@ static unsigned long tosa_read_bat(struct tosa_bat *bat)
 {
        unsigned long value = 0;
 
-       if (bat->gpio_bat < 0 || bat->adc_bat < 0)
+       if (!bat->gpiod_bat || bat->adc_bat < 0)
                return 0;
 
        mutex_lock(&bat_lock);
-       gpio_set_value(bat->gpio_bat, 1);
+       gpiod_set_value(bat->gpiod_bat, 1);
        msleep(5);
        value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
                        bat->adc_bat);
-       gpio_set_value(bat->gpio_bat, 0);
+       gpiod_set_value(bat->gpiod_bat, 0);
        mutex_unlock(&bat_lock);
 
        value = value * 1000000 / bat->adc_bat_divider;
@@ -71,15 +71,15 @@ static unsigned long tosa_read_temp(struct tosa_bat *bat)
 {
        unsigned long value = 0;
 
-       if (bat->gpio_temp < 0 || bat->adc_temp < 0)
+       if (!bat->gpiod_temp || bat->adc_temp < 0)
                return 0;
 
        mutex_lock(&bat_lock);
-       gpio_set_value(bat->gpio_temp, 1);
+       gpiod_set_value(bat->gpiod_temp, 1);
        msleep(5);
        value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
                        bat->adc_temp);
-       gpio_set_value(bat->gpio_temp, 0);
+       gpiod_set_value(bat->gpiod_temp, 0);
        mutex_unlock(&bat_lock);
 
        value = value * 10000 / bat->adc_temp_divider;
@@ -136,7 +136,7 @@ static int tosa_bat_get_property(struct power_supply *psy,
 
 static bool tosa_jacket_bat_is_present(struct tosa_bat *bat)
 {
-       return gpio_get_value(TOSA_GPIO_JACKET_DETECT) == 0;
+       return gpiod_get_value(jacket_detect) == 0;
 }
 
 static void tosa_bat_external_power_changed(struct power_supply *psy)
@@ -166,23 +166,23 @@ static void tosa_bat_update(struct tosa_bat *bat)
                bat->full_chrg = -1;
        } else if (power_supply_am_i_supplied(psy)) {
                if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) {
-                       gpio_set_value(bat->gpio_charge_off, 0);
+                       gpiod_set_value(bat->gpiod_charge_off, 0);
                        mdelay(15);
                }
 
-               if (gpio_get_value(bat->gpio_full)) {
+               if (gpiod_get_value(bat->gpiod_full)) {
                        if (old == POWER_SUPPLY_STATUS_CHARGING ||
                                        bat->full_chrg == -1)
                                bat->full_chrg = tosa_read_bat(bat);
 
-                       gpio_set_value(bat->gpio_charge_off, 1);
+                       gpiod_set_value(bat->gpiod_charge_off, 1);
                        bat->status = POWER_SUPPLY_STATUS_FULL;
                } else {
-                       gpio_set_value(bat->gpio_charge_off, 0);
+                       gpiod_set_value(bat->gpiod_charge_off, 0);
                        bat->status = POWER_SUPPLY_STATUS_CHARGING;
                }
        } else {
-               gpio_set_value(bat->gpio_charge_off, 1);
+               gpiod_set_value(bat->gpiod_charge_off, 1);
                bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
        }
 
@@ -251,18 +251,18 @@ static struct tosa_bat tosa_bat_main = {
        .full_chrg = -1,
        .psy = NULL,
 
-       .gpio_full = TOSA_GPIO_BAT0_CRG,
-       .gpio_charge_off = TOSA_GPIO_CHARGE_OFF,
+       .gpiod_full = NULL,
+       .gpiod_charge_off = NULL,
 
        .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
 
-       .gpio_bat = TOSA_GPIO_BAT0_V_ON,
+       .gpiod_bat = NULL,
        .adc_bat = WM97XX_AUX_ID3,
        .adc_bat_divider = 414,
        .bat_max = 4310000,
        .bat_min = 1551 * 1000000 / 414,
 
-       .gpio_temp = TOSA_GPIO_BAT1_TH_ON,
+       .gpiod_temp = NULL,
        .adc_temp = WM97XX_AUX_ID2,
        .adc_temp_divider = 10000,
 };
@@ -273,18 +273,18 @@ static struct tosa_bat tosa_bat_jacket = {
        .psy = NULL,
 
        .is_present = tosa_jacket_bat_is_present,
-       .gpio_full = TOSA_GPIO_BAT1_CRG,
-       .gpio_charge_off = TOSA_GPIO_CHARGE_OFF_JC,
+       .gpiod_full = NULL,
+       .gpiod_charge_off = NULL,
 
        .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
 
-       .gpio_bat = TOSA_GPIO_BAT1_V_ON,
+       .gpiod_bat = NULL,
        .adc_bat = WM97XX_AUX_ID3,
        .adc_bat_divider = 414,
        .bat_max = 4310000,
        .bat_min = 1551 * 1000000 / 414,
 
-       .gpio_temp = TOSA_GPIO_BAT0_TH_ON,
+       .gpiod_temp = NULL,
        .adc_temp = WM97XX_AUX_ID2,
        .adc_temp_divider = 10000,
 };
@@ -294,36 +294,20 @@ static struct tosa_bat tosa_bat_bu = {
        .full_chrg = -1,
        .psy = NULL,
 
-       .gpio_full = -1,
-       .gpio_charge_off = -1,
+       .gpiod_full = NULL,
+       .gpiod_charge_off = NULL,
 
        .technology = POWER_SUPPLY_TECHNOLOGY_LiMn,
 
-       .gpio_bat = TOSA_GPIO_BU_CHRG_ON,
+       .gpiod_bat = NULL,
        .adc_bat = WM97XX_AUX_ID4,
        .adc_bat_divider = 1266,
 
-       .gpio_temp = -1,
+       .gpiod_temp = NULL,
        .adc_temp = -1,
        .adc_temp_divider = -1,
 };
 
-static struct gpio tosa_bat_gpios[] = {
-       { TOSA_GPIO_CHARGE_OFF,    GPIOF_OUT_INIT_HIGH, "main charge off" },
-       { TOSA_GPIO_CHARGE_OFF_JC, GPIOF_OUT_INIT_HIGH, "jacket charge off" },
-       { TOSA_GPIO_BAT_SW_ON,     GPIOF_OUT_INIT_LOW,  "battery switch" },
-       { TOSA_GPIO_BAT0_V_ON,     GPIOF_OUT_INIT_LOW,  "main battery" },
-       { TOSA_GPIO_BAT1_V_ON,     GPIOF_OUT_INIT_LOW,  "jacket battery" },
-       { TOSA_GPIO_BAT1_TH_ON,    GPIOF_OUT_INIT_LOW,  "main battery temp" },
-       { TOSA_GPIO_BAT0_TH_ON,    GPIOF_OUT_INIT_LOW,  "jacket battery temp" },
-       { TOSA_GPIO_BU_CHRG_ON,    GPIOF_OUT_INIT_LOW,  "backup battery" },
-       { TOSA_GPIO_BAT0_CRG,      GPIOF_IN,            "main battery full" },
-       { TOSA_GPIO_BAT1_CRG,      GPIOF_IN,            "jacket battery full" },
-       { TOSA_GPIO_BAT0_LOW,      GPIOF_IN,            "main battery low" },
-       { TOSA_GPIO_BAT1_LOW,      GPIOF_IN,            "jacket battery low" },
-       { TOSA_GPIO_JACKET_DETECT, GPIOF_IN,            "jacket detect" },
-};
-
 #ifdef CONFIG_PM
 static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
 {
@@ -343,19 +327,83 @@ static int tosa_bat_resume(struct platform_device *dev)
 #define tosa_bat_resume NULL
 #endif
 
-static int tosa_bat_probe(struct platform_device *dev)
+static int tosa_bat_probe(struct platform_device *pdev)
 {
        int ret;
        struct power_supply_config main_psy_cfg = {},
                                   jacket_psy_cfg = {},
                                   bu_psy_cfg = {};
+       struct device *dev = &pdev->dev;
+       struct gpio_desc *dummy;
 
        if (!machine_is_tosa())
                return -ENODEV;
 
-       ret = gpio_request_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
-       if (ret)
-               return ret;
+       /* Main charging control GPIOs */
+       tosa_bat_main.gpiod_charge_off = devm_gpiod_get(dev, "main charge off", GPIOD_OUT_HIGH);
+       if (IS_ERR(tosa_bat_main.gpiod_charge_off))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_charge_off),
+                                    "no main charger GPIO\n");
+       tosa_bat_jacket.gpiod_charge_off = devm_gpiod_get(dev, "jacket charge off", GPIOD_OUT_HIGH);
+       if (IS_ERR(tosa_bat_jacket.gpiod_charge_off))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_charge_off),
+                                    "no jacket charger GPIO\n");
+
+       /* Per-battery output check (routes battery voltage to ADC) */
+       tosa_bat_main.gpiod_bat = devm_gpiod_get(dev, "main battery", GPIOD_OUT_LOW);
+       if (IS_ERR(tosa_bat_main.gpiod_bat))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_bat),
+                                    "no main battery GPIO\n");
+       tosa_bat_jacket.gpiod_bat = devm_gpiod_get(dev, "jacket battery", GPIOD_OUT_LOW);
+       if (IS_ERR(tosa_bat_jacket.gpiod_bat))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_bat),
+                                    "no jacket battery GPIO\n");
+       tosa_bat_bu.gpiod_bat = devm_gpiod_get(dev, "backup battery", GPIOD_OUT_LOW);
+       if (IS_ERR(tosa_bat_bu.gpiod_bat))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_bu.gpiod_bat),
+                                    "no backup battery GPIO\n");
+
+       /* Battery full detect GPIOs (using PXA SoC GPIOs) */
+       tosa_bat_main.gpiod_full = devm_gpiod_get(dev, "main battery full", GPIOD_IN);
+       if (IS_ERR(tosa_bat_main.gpiod_full))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_full),
+                                    "no main battery full GPIO\n");
+       tosa_bat_jacket.gpiod_full = devm_gpiod_get(dev, "jacket battery full", GPIOD_IN);
+       if (IS_ERR(tosa_bat_jacket.gpiod_full))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_full),
+                                    "no jacket battery full GPIO\n");
+
+       /* Battery temperature GPIOs (routes thermistor voltage to ADC) */
+       tosa_bat_main.gpiod_temp = devm_gpiod_get(dev, "main battery temp", GPIOD_OUT_LOW);
+       if (IS_ERR(tosa_bat_main.gpiod_temp))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_temp),
+                                    "no main battery temp GPIO\n");
+       tosa_bat_jacket.gpiod_temp = devm_gpiod_get(dev, "jacket battery temp", GPIOD_OUT_LOW);
+       if (IS_ERR(tosa_bat_jacket.gpiod_temp))
+               return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_temp),
+                                    "no jacket battery temp GPIO\n");
+
+       /* Jacket detect GPIO */
+       jacket_detect = devm_gpiod_get(dev, "jacket detect", GPIOD_IN);
+       if (IS_ERR(jacket_detect))
+               return dev_err_probe(dev, PTR_ERR(jacket_detect),
+                                    "no jacket detect GPIO\n");
+
+       /* Battery low indication GPIOs (not used, we just request them) */
+       dummy = devm_gpiod_get(dev, "main battery low", GPIOD_IN);
+       if (IS_ERR(dummy))
+               return dev_err_probe(dev, PTR_ERR(dummy),
+                                    "no main battery low GPIO\n");
+       dummy = devm_gpiod_get(dev, "jacket battery low", GPIOD_IN);
+       if (IS_ERR(dummy))
+               return dev_err_probe(dev, PTR_ERR(dummy),
+                                    "no jacket battery low GPIO\n");
+
+       /* Battery switch GPIO (not used just requested) */
+       dummy = devm_gpiod_get(dev, "battery switch", GPIOD_OUT_LOW);
+       if (IS_ERR(dummy))
+               return dev_err_probe(dev, PTR_ERR(dummy),
+                                    "no battery switch GPIO\n");
 
        mutex_init(&tosa_bat_main.work_lock);
        mutex_init(&tosa_bat_jacket.work_lock);
@@ -363,7 +411,7 @@ static int tosa_bat_probe(struct platform_device *dev)
        INIT_WORK(&bat_work, tosa_bat_work);
 
        main_psy_cfg.drv_data = &tosa_bat_main;
-       tosa_bat_main.psy = power_supply_register(&dev->dev,
+       tosa_bat_main.psy = power_supply_register(dev,
                                                  &tosa_bat_main_desc,
                                                  &main_psy_cfg);
        if (IS_ERR(tosa_bat_main.psy)) {
@@ -372,7 +420,7 @@ static int tosa_bat_probe(struct platform_device *dev)
        }
 
        jacket_psy_cfg.drv_data = &tosa_bat_jacket;
-       tosa_bat_jacket.psy = power_supply_register(&dev->dev,
+       tosa_bat_jacket.psy = power_supply_register(dev,
                                                    &tosa_bat_jacket_desc,
                                                    &jacket_psy_cfg);
        if (IS_ERR(tosa_bat_jacket.psy)) {
@@ -381,28 +429,28 @@ static int tosa_bat_probe(struct platform_device *dev)
        }
 
        bu_psy_cfg.drv_data = &tosa_bat_bu;
-       tosa_bat_bu.psy = power_supply_register(&dev->dev, &tosa_bat_bu_desc,
+       tosa_bat_bu.psy = power_supply_register(dev, &tosa_bat_bu_desc,
                                                &bu_psy_cfg);
        if (IS_ERR(tosa_bat_bu.psy)) {
                ret = PTR_ERR(tosa_bat_bu.psy);
                goto err_psy_reg_bu;
        }
 
-       ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG),
+       ret = request_irq(gpiod_to_irq(tosa_bat_main.gpiod_full),
                                tosa_bat_gpio_isr,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                                "main full", &tosa_bat_main);
        if (ret)
                goto err_req_main;
 
-       ret = request_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG),
+       ret = request_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full),
                                tosa_bat_gpio_isr,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                                "jacket full", &tosa_bat_jacket);
        if (ret)
                goto err_req_jacket;
 
-       ret = request_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT),
+       ret = request_irq(gpiod_to_irq(jacket_detect),
                                tosa_bat_gpio_isr,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                                "jacket detect", &tosa_bat_jacket);
@@ -411,9 +459,9 @@ static int tosa_bat_probe(struct platform_device *dev)
                return 0;
        }
 
-       free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
+       free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
 err_req_jacket:
-       free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
+       free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
 err_req_main:
        power_supply_unregister(tosa_bat_bu.psy);
 err_psy_reg_bu:
@@ -425,15 +473,14 @@ err_psy_reg_main:
        /* see comment in tosa_bat_remove */
        cancel_work_sync(&bat_work);
 
-       gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
        return ret;
 }
 
 static int tosa_bat_remove(struct platform_device *dev)
 {
-       free_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT), &tosa_bat_jacket);
-       free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
-       free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
+       free_irq(gpiod_to_irq(jacket_detect), &tosa_bat_jacket);
+       free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
+       free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
 
        power_supply_unregister(tosa_bat_bu.psy);
        power_supply_unregister(tosa_bat_jacket.psy);
@@ -445,7 +492,6 @@ static int tosa_bat_remove(struct platform_device *dev)
         * unregistered now.
         */
        cancel_work_sync(&bat_work);
-       gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
        return 0;
 }
 
index cb258e1..c9d451b 100644 (file)
@@ -267,7 +267,7 @@ static int arm_tod_read_trig_sel_refclk(struct idtcm_channel *channel, u8 ref)
 static bool is_single_shot(u8 mask)
 {
        /* Treat single bit ToD masks as continuous trigger */
-       return mask <= 8 && is_power_of_2(mask);
+       return !(mask <= 8 && is_power_of_2(mask));
 }
 
 static int idtcm_extts_enable(struct idtcm_channel *channel,
index 21e3b05..904de8d 100644 (file)
@@ -572,6 +572,17 @@ config PWM_SUN4I
          To compile this driver as a module, choose M here: the module
          will be called pwm-sun4i.
 
+config PWM_SUNPLUS
+       tristate "Sunplus PWM support"
+       depends on ARCH_SUNPLUS || COMPILE_TEST
+       depends on HAS_IOMEM && OF
+       help
+         Generic PWM framework driver for the PWM controller on
+         Sunplus SoCs.
+
+         To compile this driver as a module, choose M here: the module
+         will be called pwm-sunplus.
+
 config PWM_TEGRA
        tristate "NVIDIA Tegra PWM support"
        depends on ARCH_TEGRA || COMPILE_TEST
@@ -640,4 +651,18 @@ config PWM_VT8500
          To compile this driver as a module, choose M here: the module
          will be called pwm-vt8500.
 
+config PWM_XILINX
+       tristate "Xilinx AXI Timer PWM support"
+       depends on OF_ADDRESS
+       depends on COMMON_CLK
+       select REGMAP_MMIO
+       help
+         PWM driver for Xilinx LogiCORE IP AXI timers. This timer is
+         typically a soft core which may be present in Xilinx FPGAs.
+         This device may also be present in Microblaze soft processors.
+         If you don't have this IP in your design, choose N.
+
+         To compile this driver as a module, choose M here: the module
+         will be called pwm-xilinx.
+
 endif
index 708840b..5c08bdb 100644 (file)
@@ -53,6 +53,7 @@ obj-$(CONFIG_PWM_STM32)               += pwm-stm32.o
 obj-$(CONFIG_PWM_STM32_LP)     += pwm-stm32-lp.o
 obj-$(CONFIG_PWM_STMPE)                += pwm-stmpe.o
 obj-$(CONFIG_PWM_SUN4I)                += pwm-sun4i.o
+obj-$(CONFIG_PWM_SUNPLUS)      += pwm-sunplus.o
 obj-$(CONFIG_PWM_TEGRA)                += pwm-tegra.o
 obj-$(CONFIG_PWM_TIECAP)       += pwm-tiecap.o
 obj-$(CONFIG_PWM_TIEHRPWM)     += pwm-tiehrpwm.o
@@ -60,3 +61,4 @@ obj-$(CONFIG_PWM_TWL)         += pwm-twl.o
 obj-$(CONFIG_PWM_TWL_LED)      += pwm-twl-led.o
 obj-$(CONFIG_PWM_VISCONTI)     += pwm-visconti.o
 obj-$(CONFIG_PWM_VT8500)       += pwm-vt8500.o
+obj-$(CONFIG_PWM_XILINX)       += pwm-xilinx.o
index 36f7ea3..3977a0f 100644 (file)
@@ -61,7 +61,7 @@ struct atmel_tcb_pwm_chip {
        struct atmel_tcb_channel bkup;
 };
 
-const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
+static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
 
 static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
 {
@@ -72,7 +72,8 @@ static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip,
                                      struct pwm_device *pwm,
                                      enum pwm_polarity polarity)
 {
-       struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+       struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+       struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
 
        tcbpwm->polarity = polarity;
 
@@ -97,7 +98,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
                return ret;
        }
 
-       pwm_set_chip_data(pwm, tcbpwm);
        tcbpwm->polarity = PWM_POLARITY_NORMAL;
        tcbpwm->duty = 0;
        tcbpwm->period = 0;
@@ -139,7 +139,7 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
 static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
-       struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+       struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
 
        clk_disable_unprepare(tcbpwmc->clk);
        tcbpwmc->pwms[pwm->hwpwm] = NULL;
@@ -149,7 +149,7 @@ static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
-       struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+       struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
        unsigned cmr;
        enum pwm_polarity polarity = tcbpwm->polarity;
 
@@ -206,7 +206,7 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
-       struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+       struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
        u32 cmr;
        enum pwm_polarity polarity = tcbpwm->polarity;
 
@@ -291,7 +291,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                                int duty_ns, int period_ns)
 {
        struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
-       struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
+       struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
        struct atmel_tcb_pwm_device *atcbpwm = NULL;
        int i = 0;
        int slowclk = 0;
index d7ad886..b0d9114 100644 (file)
@@ -23,29 +23,6 @@ static inline struct clps711x_chip *to_clps711x_chip(struct pwm_chip *chip)
        return container_of(chip, struct clps711x_chip, chip);
 }
 
-static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
-{
-       /* PWM0 - bits 4..7, PWM1 - bits 8..11 */
-       u32 shift = (n + 1) * 4;
-       unsigned long flags;
-       u32 tmp;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       tmp = readl(priv->pmpcon);
-       tmp &= ~(0xf << shift);
-       tmp |= v << shift;
-       writel(tmp, priv->pmpcon);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
-{
-       /* Duty cycle 0..15 max */
-       return DIV64_U64_ROUND_CLOSEST(v * 0xf, pwm->args.period);
-}
-
 static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct clps711x_chip *priv = to_clps711x_chip(chip);
@@ -60,44 +37,41 @@ static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
        return 0;
 }
 
-static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-                              int duty_ns, int period_ns)
+static int clps711x_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                             const struct pwm_state *state)
 {
        struct clps711x_chip *priv = to_clps711x_chip(chip);
-       unsigned int duty;
+       /* PWM0 - bits 4..7, PWM1 - bits 8..11 */
+       u32 shift = (pwm->hwpwm + 1) * 4;
+       unsigned long flags;
+       u32 pmpcon, val;
 
-       if (period_ns != pwm->args.period)
+       if (state->polarity != PWM_POLARITY_NORMAL)
                return -EINVAL;
 
-       duty = clps711x_get_duty(pwm, duty_ns);
-       clps711x_pwm_update_val(priv, pwm->hwpwm, duty);
-
-       return 0;
-}
+       if (state->period != pwm->args.period)
+               return -EINVAL;
 
-static int clps711x_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct clps711x_chip *priv = to_clps711x_chip(chip);
-       unsigned int duty;
+       if (state->enabled)
+               val = mul_u64_u64_div_u64(state->duty_cycle, 0xf, state->period);
+       else
+               val = 0;
 
-       duty = clps711x_get_duty(pwm, pwm_get_duty_cycle(pwm));
-       clps711x_pwm_update_val(priv, pwm->hwpwm, duty);
+       spin_lock_irqsave(&priv->lock, flags);
 
-       return 0;
-}
+       pmpcon = readl(priv->pmpcon);
+       pmpcon &= ~(0xf << shift);
+       pmpcon |= val << shift;
+       writel(pmpcon, priv->pmpcon);
 
-static void clps711x_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct clps711x_chip *priv = to_clps711x_chip(chip);
+       spin_unlock_irqrestore(&priv->lock, flags);
 
-       clps711x_pwm_update_val(priv, pwm->hwpwm, 0);
+       return 0;
 }
 
 static const struct pwm_ops clps711x_pwm_ops = {
        .request = clps711x_pwm_request,
-       .config = clps711x_pwm_config,
-       .enable = clps711x_pwm_enable,
-       .disable = clps711x_pwm_disable,
+       .apply = clps711x_pwm_apply,
        .owner = THIS_MODULE,
 };
 
index 5e29d9c..7f10f56 100644 (file)
 #include <linux/pwm.h>
 #include <linux/slab.h>
 
+#include <dt-bindings/mfd/cros_ec.h>
+
 /**
  * struct cros_ec_pwm_device - Driver data for EC PWM
  *
  * @dev: Device node
  * @ec: Pointer to EC device
  * @chip: PWM controller chip
+ * @use_pwm_type: Use PWM types instead of generic channels
  */
 struct cros_ec_pwm_device {
        struct device *dev;
        struct cros_ec_device *ec;
        struct pwm_chip chip;
+       bool use_pwm_type;
 };
 
 /**
@@ -58,14 +62,31 @@ static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
        kfree(channel);
 }
 
-static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
+static int cros_ec_dt_type_to_pwm_type(u8 dt_index, u8 *pwm_type)
 {
+       switch (dt_index) {
+       case CROS_EC_PWM_DT_KB_LIGHT:
+               *pwm_type = EC_PWM_TYPE_KB_LIGHT;
+               return 0;
+       case CROS_EC_PWM_DT_DISPLAY_LIGHT:
+               *pwm_type = EC_PWM_TYPE_DISPLAY_LIGHT;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int cros_ec_pwm_set_duty(struct cros_ec_pwm_device *ec_pwm, u8 index,
+                               u16 duty)
+{
+       struct cros_ec_device *ec = ec_pwm->ec;
        struct {
                struct cros_ec_command msg;
                struct ec_params_pwm_set_duty params;
        } __packed buf;
        struct ec_params_pwm_set_duty *params = &buf.params;
        struct cros_ec_command *msg = &buf.msg;
+       int ret;
 
        memset(&buf, 0, sizeof(buf));
 
@@ -75,14 +96,25 @@ static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
        msg->outsize = sizeof(*params);
 
        params->duty = duty;
-       params->pwm_type = EC_PWM_TYPE_GENERIC;
-       params->index = index;
+
+       if (ec_pwm->use_pwm_type) {
+               ret = cros_ec_dt_type_to_pwm_type(index, &params->pwm_type);
+               if (ret) {
+                       dev_err(ec->dev, "Invalid PWM type index: %d\n", index);
+                       return ret;
+               }
+               params->index = 0;
+       } else {
+               params->pwm_type = EC_PWM_TYPE_GENERIC;
+               params->index = index;
+       }
 
        return cros_ec_cmd_xfer_status(ec, msg);
 }
 
-static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
+static int cros_ec_pwm_get_duty(struct cros_ec_pwm_device *ec_pwm, u8 index)
 {
+       struct cros_ec_device *ec = ec_pwm->ec;
        struct {
                struct cros_ec_command msg;
                union {
@@ -102,8 +134,17 @@ static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
        msg->insize = sizeof(*resp);
        msg->outsize = sizeof(*params);
 
-       params->pwm_type = EC_PWM_TYPE_GENERIC;
-       params->index = index;
+       if (ec_pwm->use_pwm_type) {
+               ret = cros_ec_dt_type_to_pwm_type(index, &params->pwm_type);
+               if (ret) {
+                       dev_err(ec->dev, "Invalid PWM type index: %d\n", index);
+                       return ret;
+               }
+               params->index = 0;
+       } else {
+               params->pwm_type = EC_PWM_TYPE_GENERIC;
+               params->index = index;
+       }
 
        ret = cros_ec_cmd_xfer_status(ec, msg);
        if (ret < 0)
@@ -133,7 +174,7 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         */
        duty_cycle = state->enabled ? state->duty_cycle : 0;
 
-       ret = cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+       ret = cros_ec_pwm_set_duty(ec_pwm, pwm->hwpwm, duty_cycle);
        if (ret < 0)
                return ret;
 
@@ -149,7 +190,7 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
        struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
        int ret;
 
-       ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm);
+       ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm);
        if (ret < 0) {
                dev_err(chip->dev, "error getting initial duty: %d\n", ret);
                return;
@@ -204,13 +245,13 @@ static const struct pwm_ops cros_ec_pwm_ops = {
  * of PWMs it supports directly, so we have to read the pwm duty cycle for
  * subsequent channels until we get an error.
  */
-static int cros_ec_num_pwms(struct cros_ec_device *ec)
+static int cros_ec_num_pwms(struct cros_ec_pwm_device *ec_pwm)
 {
        int i, ret;
 
        /* The index field is only 8 bits */
        for (i = 0; i <= U8_MAX; i++) {
-               ret = cros_ec_pwm_get_duty(ec, i);
+               ret = cros_ec_pwm_get_duty(ec_pwm, i);
                /*
                 * We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM
                 * responses; everything else is treated as an error.
@@ -236,6 +277,7 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
 {
        struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
        struct device *dev = &pdev->dev;
+       struct device_node *np = pdev->dev.of_node;
        struct cros_ec_pwm_device *ec_pwm;
        struct pwm_chip *chip;
        int ret;
@@ -251,17 +293,26 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
        chip = &ec_pwm->chip;
        ec_pwm->ec = ec;
 
+       if (of_device_is_compatible(np, "google,cros-ec-pwm-type"))
+               ec_pwm->use_pwm_type = true;
+
        /* PWM chip */
        chip->dev = dev;
        chip->ops = &cros_ec_pwm_ops;
        chip->of_xlate = cros_ec_pwm_xlate;
        chip->of_pwm_n_cells = 1;
-       ret = cros_ec_num_pwms(ec);
-       if (ret < 0) {
-               dev_err(dev, "Couldn't find PWMs: %d\n", ret);
-               return ret;
+
+       if (ec_pwm->use_pwm_type) {
+               chip->npwm = CROS_EC_PWM_DT_COUNT;
+       } else {
+               ret = cros_ec_num_pwms(ec_pwm);
+               if (ret < 0) {
+                       dev_err(dev, "Couldn't find PWMs: %d\n", ret);
+                       return ret;
+               }
+               chip->npwm = ret;
        }
-       chip->npwm = ret;
+
        dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
 
        ret = pwmchip_add(chip);
@@ -288,6 +339,7 @@ static int cros_ec_pwm_remove(struct platform_device *dev)
 #ifdef CONFIG_OF
 static const struct of_device_id cros_ec_pwm_of_match[] = {
        { .compatible = "google,cros-ec-pwm" },
+       { .compatible = "google,cros-ec-pwm-type" },
        {},
 };
 MODULE_DEVICE_TABLE(of, cros_ec_pwm_of_match);
index ea17d44..215ef90 100644 (file)
@@ -93,7 +93,7 @@ static void lp3943_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 }
 
 static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-                            int duty_ns, int period_ns)
+                            u64 duty_ns, u64 period_ns)
 {
        struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
        struct lp3943 *lp3943 = lp3943_pwm->lp3943;
@@ -118,14 +118,20 @@ static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                reg_duty     = LP3943_REG_PWM1;
        }
 
-       period_ns = clamp(period_ns, LP3943_MIN_PERIOD, LP3943_MAX_PERIOD);
-       val       = (u8)(period_ns / LP3943_MIN_PERIOD - 1);
+       /*
+        * Note that after this clamping, period_ns fits into an int. This is
+        * helpful because we can resort to integer division below instead of
+        * the (more expensive) 64 bit division.
+        */
+       period_ns = clamp(period_ns, (u64)LP3943_MIN_PERIOD, (u64)LP3943_MAX_PERIOD);
+       val       = (u8)((int)period_ns / LP3943_MIN_PERIOD - 1);
 
        err = lp3943_write_byte(lp3943, reg_prescale, val);
        if (err)
                return err;
 
-       val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns);
+       duty_ns = min(duty_ns, period_ns);
+       val = (u8)((int)duty_ns * LP3943_MAX_DUTY / (int)period_ns);
 
        return lp3943_write_byte(lp3943, reg_duty, val);
 }
@@ -182,12 +188,34 @@ static void lp3943_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        lp3943_pwm_set_mode(lp3943_pwm, pwm_map, LP3943_GPIO_OUT_HIGH);
 }
 
+static int lp3943_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                           const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       lp3943_pwm_disable(chip, pwm);
+               return 0;
+       }
+
+       err = lp3943_pwm_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               err = lp3943_pwm_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops lp3943_pwm_ops = {
        .request        = lp3943_pwm_request,
        .free           = lp3943_pwm_free,
-       .config         = lp3943_pwm_config,
-       .enable         = lp3943_pwm_enable,
-       .disable        = lp3943_pwm_disable,
+       .apply          = lp3943_pwm_apply,
        .owner          = THIS_MODULE,
 };
 
index b909096..272e0b5 100644 (file)
@@ -226,14 +226,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        return 0;
 }
 
-static int lpc18xx_pwm_set_polarity(struct pwm_chip *chip,
-                                   struct pwm_device *pwm,
-                                   enum pwm_polarity polarity)
-{
-       return 0;
-}
-
-static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
        struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm];
@@ -249,7 +242,7 @@ static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
                           LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
                           LPC18XX_PWM_EVSTATEMSK_ALL);
 
-       if (pwm_get_polarity(pwm) == PWM_POLARITY_NORMAL) {
+       if (polarity == PWM_POLARITY_NORMAL) {
                set_event = lpc18xx_pwm->period_event;
                clear_event = lpc18xx_data->duty_event;
                res_action = LPC18XX_PWM_RES_SET;
@@ -308,11 +301,35 @@ static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
        clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
 }
 
+static int lpc18xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                            const struct pwm_state *state)
+{
+       int err;
+       bool enabled = pwm->state.enabled;
+
+       if (state->polarity != pwm->state.polarity && pwm->state.enabled) {
+               lpc18xx_pwm_disable(chip, pwm);
+               enabled = false;
+       }
+
+       if (!state->enabled) {
+               if (enabled)
+                       lpc18xx_pwm_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = lpc18xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!enabled)
+               err = lpc18xx_pwm_enable(chip, pwm, state->polarity);
+
+       return err;
+}
 static const struct pwm_ops lpc18xx_pwm_ops = {
-       .config = lpc18xx_pwm_config,
-       .set_polarity = lpc18xx_pwm_set_polarity,
-       .enable = lpc18xx_pwm_enable,
-       .disable = lpc18xx_pwm_disable,
+       .apply = lpc18xx_pwm_apply,
        .request = lpc18xx_pwm_request,
        .free = lpc18xx_pwm_free,
        .owner = THIS_MODULE,
index ddeab56..86a0ea0 100644 (file)
@@ -88,10 +88,33 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        clk_disable_unprepare(lpc32xx->clk);
 }
 
+static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                            const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       lpc32xx_pwm_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = lpc32xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               err = lpc32xx_pwm_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops lpc32xx_pwm_ops = {
-       .config = lpc32xx_pwm_config,
-       .enable = lpc32xx_pwm_enable,
-       .disable = lpc32xx_pwm_disable,
+       .apply = lpc32xx_pwm_apply,
        .owner = THIS_MODULE,
 };
 
index 568b13a..d28c087 100644 (file)
@@ -198,10 +198,33 @@ static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        pwm_mediatek_clk_disable(chip, pwm);
 }
 
+static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                             const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       pwm_mediatek_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = pwm_mediatek_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               err = pwm_mediatek_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops pwm_mediatek_ops = {
-       .config = pwm_mediatek_config,
-       .enable = pwm_mediatek_enable,
-       .disable = pwm_mediatek_disable,
+       .apply = pwm_mediatek_apply,
        .owner = THIS_MODULE,
 };
 
@@ -264,6 +287,12 @@ static const struct pwm_mediatek_of_data mt2712_pwm_data = {
        .has_ck_26m_sel = false,
 };
 
+static const struct pwm_mediatek_of_data mt6795_pwm_data = {
+       .num_pwms = 7,
+       .pwm45_fixup = false,
+       .has_ck_26m_sel = false,
+};
+
 static const struct pwm_mediatek_of_data mt7622_pwm_data = {
        .num_pwms = 6,
        .pwm45_fixup = false,
@@ -302,6 +331,7 @@ static const struct pwm_mediatek_of_data mt8516_pwm_data = {
 
 static const struct of_device_id pwm_mediatek_of_match[] = {
        { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
+       { .compatible = "mediatek,mt6795-pwm", .data = &mt6795_pwm_data },
        { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
        { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
        { .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
index e52e29f..6ff7302 100644 (file)
@@ -66,7 +66,7 @@ static int raspberrypi_pwm_get_property(struct rpi_firmware *firmware,
                                        u32 reg, u32 *val)
 {
        struct raspberrypi_pwm_prop msg = {
-               .reg = reg
+               .reg = cpu_to_le32(reg),
        };
        int ret;
 
index 4381df9..d731161 100644 (file)
@@ -89,71 +89,71 @@ struct tpu_device {
 
 #define to_tpu_device(c)       container_of(c, struct tpu_device, chip)
 
-static void tpu_pwm_write(struct tpu_pwm_device *pwm, int reg_nr, u16 value)
+static void tpu_pwm_write(struct tpu_pwm_device *tpd, int reg_nr, u16 value)
 {
-       void __iomem *base = pwm->tpu->base + TPU_CHANNEL_OFFSET
-                          + pwm->channel * TPU_CHANNEL_SIZE;
+       void __iomem *base = tpd->tpu->base + TPU_CHANNEL_OFFSET
+                          + tpd->channel * TPU_CHANNEL_SIZE;
 
        iowrite16(value, base + reg_nr);
 }
 
-static void tpu_pwm_set_pin(struct tpu_pwm_device *pwm,
+static void tpu_pwm_set_pin(struct tpu_pwm_device *tpd,
                            enum tpu_pin_state state)
 {
        static const char * const states[] = { "inactive", "PWM", "active" };
 
-       dev_dbg(&pwm->tpu->pdev->dev, "%u: configuring pin as %s\n",
-               pwm->channel, states[state]);
+       dev_dbg(&tpd->tpu->pdev->dev, "%u: configuring pin as %s\n",
+               tpd->channel, states[state]);
 
        switch (state) {
        case TPU_PIN_INACTIVE:
-               tpu_pwm_write(pwm, TPU_TIORn,
-                             pwm->polarity == PWM_POLARITY_INVERSED ?
+               tpu_pwm_write(tpd, TPU_TIORn,
+                             tpd->polarity == PWM_POLARITY_INVERSED ?
                              TPU_TIOR_IOA_1 : TPU_TIOR_IOA_0);
                break;
        case TPU_PIN_PWM:
-               tpu_pwm_write(pwm, TPU_TIORn,
-                             pwm->polarity == PWM_POLARITY_INVERSED ?
+               tpu_pwm_write(tpd, TPU_TIORn,
+                             tpd->polarity == PWM_POLARITY_INVERSED ?
                              TPU_TIOR_IOA_0_SET : TPU_TIOR_IOA_1_CLR);
                break;
        case TPU_PIN_ACTIVE:
-               tpu_pwm_write(pwm, TPU_TIORn,
-                             pwm->polarity == PWM_POLARITY_INVERSED ?
+               tpu_pwm_write(tpd, TPU_TIORn,
+                             tpd->polarity == PWM_POLARITY_INVERSED ?
                              TPU_TIOR_IOA_0 : TPU_TIOR_IOA_1);
                break;
        }
 }
 
-static void tpu_pwm_start_stop(struct tpu_pwm_device *pwm, int start)
+static void tpu_pwm_start_stop(struct tpu_pwm_device *tpd, int start)
 {
        unsigned long flags;
        u16 value;
 
-       spin_lock_irqsave(&pwm->tpu->lock, flags);
-       value = ioread16(pwm->tpu->base + TPU_TSTR);
+       spin_lock_irqsave(&tpd->tpu->lock, flags);
+       value = ioread16(tpd->tpu->base + TPU_TSTR);
 
        if (start)
-               value |= 1 << pwm->channel;
+               value |= 1 << tpd->channel;
        else
-               value &= ~(1 << pwm->channel);
+               value &= ~(1 << tpd->channel);
 
-       iowrite16(value, pwm->tpu->base + TPU_TSTR);
-       spin_unlock_irqrestore(&pwm->tpu->lock, flags);
+       iowrite16(value, tpd->tpu->base + TPU_TSTR);
+       spin_unlock_irqrestore(&tpd->tpu->lock, flags);
 }
 
-static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm)
+static int tpu_pwm_timer_start(struct tpu_pwm_device *tpd)
 {
        int ret;
 
-       if (!pwm->timer_on) {
+       if (!tpd->timer_on) {
                /* Wake up device and enable clock. */
-               pm_runtime_get_sync(&pwm->tpu->pdev->dev);
-               ret = clk_prepare_enable(pwm->tpu->clk);
+               pm_runtime_get_sync(&tpd->tpu->pdev->dev);
+               ret = clk_prepare_enable(tpd->tpu->clk);
                if (ret) {
-                       dev_err(&pwm->tpu->pdev->dev, "cannot enable clock\n");
+                       dev_err(&tpd->tpu->pdev->dev, "cannot enable clock\n");
                        return ret;
                }
-               pwm->timer_on = true;
+               tpd->timer_on = true;
        }
 
        /*
@@ -161,8 +161,8 @@ static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm)
         * completely. First drive the pin to the inactive state to avoid
         * glitches.
         */
-       tpu_pwm_set_pin(pwm, TPU_PIN_INACTIVE);
-       tpu_pwm_start_stop(pwm, false);
+       tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE);
+       tpu_pwm_start_stop(tpd, false);
 
        /*
         * - Clear TCNT on TGRB match
@@ -172,142 +172,168 @@ static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm)
         * - Output 1 until TGRA, output 0 until TGRB (active high polarity
         * - PWM mode
         */
-       tpu_pwm_write(pwm, TPU_TCRn, TPU_TCR_CCLR_TGRB | TPU_TCR_CKEG_RISING |
-                     pwm->prescaler);
-       tpu_pwm_write(pwm, TPU_TMDRn, TPU_TMDR_MD_PWM);
-       tpu_pwm_set_pin(pwm, TPU_PIN_PWM);
-       tpu_pwm_write(pwm, TPU_TGRAn, pwm->duty);
-       tpu_pwm_write(pwm, TPU_TGRBn, pwm->period);
+       tpu_pwm_write(tpd, TPU_TCRn, TPU_TCR_CCLR_TGRB | TPU_TCR_CKEG_RISING |
+                     tpd->prescaler);
+       tpu_pwm_write(tpd, TPU_TMDRn, TPU_TMDR_MD_PWM);
+       tpu_pwm_set_pin(tpd, TPU_PIN_PWM);
+       tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty);
+       tpu_pwm_write(tpd, TPU_TGRBn, tpd->period);
 
-       dev_dbg(&pwm->tpu->pdev->dev, "%u: TGRA 0x%04x TGRB 0x%04x\n",
-               pwm->channel, pwm->duty, pwm->period);
+       dev_dbg(&tpd->tpu->pdev->dev, "%u: TGRA 0x%04x TGRB 0x%04x\n",
+               tpd->channel, tpd->duty, tpd->period);
 
        /* Start the channel. */
-       tpu_pwm_start_stop(pwm, true);
+       tpu_pwm_start_stop(tpd, true);
 
        return 0;
 }
 
-static void tpu_pwm_timer_stop(struct tpu_pwm_device *pwm)
+static void tpu_pwm_timer_stop(struct tpu_pwm_device *tpd)
 {
-       if (!pwm->timer_on)
+       if (!tpd->timer_on)
                return;
 
        /* Disable channel. */
-       tpu_pwm_start_stop(pwm, false);
+       tpu_pwm_start_stop(tpd, false);
 
        /* Stop clock and mark device as idle. */
-       clk_disable_unprepare(pwm->tpu->clk);
-       pm_runtime_put(&pwm->tpu->pdev->dev);
+       clk_disable_unprepare(tpd->tpu->clk);
+       pm_runtime_put(&tpd->tpu->pdev->dev);
 
-       pwm->timer_on = false;
+       tpd->timer_on = false;
 }
 
 /* -----------------------------------------------------------------------------
  * PWM API
  */
 
-static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *_pwm)
+static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct tpu_device *tpu = to_tpu_device(chip);
-       struct tpu_pwm_device *pwm;
+       struct tpu_pwm_device *tpd;
 
-       if (_pwm->hwpwm >= TPU_CHANNEL_MAX)
+       if (pwm->hwpwm >= TPU_CHANNEL_MAX)
                return -EINVAL;
 
-       pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
-       if (pwm == NULL)
+       tpd = kzalloc(sizeof(*tpd), GFP_KERNEL);
+       if (tpd == NULL)
                return -ENOMEM;
 
-       pwm->tpu = tpu;
-       pwm->channel = _pwm->hwpwm;
-       pwm->polarity = PWM_POLARITY_NORMAL;
-       pwm->prescaler = 0;
-       pwm->period = 0;
-       pwm->duty = 0;
+       tpd->tpu = tpu;
+       tpd->channel = pwm->hwpwm;
+       tpd->polarity = PWM_POLARITY_NORMAL;
+       tpd->prescaler = 0;
+       tpd->period = 0;
+       tpd->duty = 0;
 
-       pwm->timer_on = false;
+       tpd->timer_on = false;
 
-       pwm_set_chip_data(_pwm, pwm);
+       pwm_set_chip_data(pwm, tpd);
 
        return 0;
 }
 
-static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *_pwm)
+static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
 
-       tpu_pwm_timer_stop(pwm);
-       kfree(pwm);
+       tpu_pwm_timer_stop(tpd);
+       kfree(tpd);
 }
 
-static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm,
-                         int duty_ns, int period_ns)
+static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+                         u64 duty_ns, u64 period_ns, bool enabled)
 {
-       static const unsigned int prescalers[] = { 1, 4, 16, 64 };
-       struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
        struct tpu_device *tpu = to_tpu_device(chip);
        unsigned int prescaler;
        bool duty_only = false;
        u32 clk_rate;
-       u32 period;
+       u64 period;
        u32 duty;
        int ret;
 
+       clk_rate = clk_get_rate(tpu->clk);
+       if (unlikely(clk_rate > NSEC_PER_SEC)) {
+               /*
+                * This won't happen in the nearer future, so this is only a
+                * safeguard to prevent the following calculation from
+                * overflowing. With this clk_rate * period_ns / NSEC_PER_SEC is
+                * not greater than period_ns and so fits into an u64.
+                */
+               return -EINVAL;
+       }
+
+       period = mul_u64_u64_div_u64(clk_rate, period_ns, NSEC_PER_SEC);
+
        /*
-        * Pick a prescaler to avoid overflowing the counter.
-        * TODO: Pick the highest acceptable prescaler.
+        * Find the minimal prescaler in [0..3] such that
+        *
+        *     period >> (2 * prescaler) < 0x10000
+        *
+        * This could be calculated using something like:
+        *
+        *     prescaler = max(ilog2(period) / 2, 7) - 7;
+        *
+        * but given there are only four allowed results and that ilog2 isn't
+        * cheap on all platforms using a switch statement is more effective.
         */
-       clk_rate = clk_get_rate(tpu->clk);
+       switch (period) {
+       case 1 ... 0xffff:
+               prescaler = 0;
+               break;
 
-       for (prescaler = 0; prescaler < ARRAY_SIZE(prescalers); ++prescaler) {
-               period = clk_rate / prescalers[prescaler]
-                      / (NSEC_PER_SEC / period_ns);
-               if (period <= 0xffff)
-                       break;
-       }
+       case 0x10000 ... 0x3ffff:
+               prescaler = 1;
+               break;
 
-       if (prescaler == ARRAY_SIZE(prescalers) || period == 0) {
-               dev_err(&tpu->pdev->dev, "clock rate mismatch\n");
-               return -ENOTSUPP;
+       case 0x40000 ... 0xfffff:
+               prescaler = 2;
+               break;
+
+       case 0x100000 ... 0x3fffff:
+               prescaler = 3;
+               break;
+
+       default:
+               return -EINVAL;
        }
 
-       if (duty_ns) {
-               duty = clk_rate / prescalers[prescaler]
-                    / (NSEC_PER_SEC / duty_ns);
-               if (duty > period)
-                       return -EINVAL;
-       } else {
+       period >>= 2 * prescaler;
+
+       if (duty_ns)
+               duty = mul_u64_u64_div_u64(clk_rate, duty_ns,
+                                          (u64)NSEC_PER_SEC << (2 * prescaler));
+       else
                duty = 0;
-       }
 
        dev_dbg(&tpu->pdev->dev,
                "rate %u, prescaler %u, period %u, duty %u\n",
-               clk_rate, prescalers[prescaler], period, duty);
+               clk_rate, 1 << (2 * prescaler), (u32)period, duty);
 
-       if (pwm->prescaler == prescaler && pwm->period == period)
+       if (tpd->prescaler == prescaler && tpd->period == period)
                duty_only = true;
 
-       pwm->prescaler = prescaler;
-       pwm->period = period;
-       pwm->duty = duty;
+       tpd->prescaler = prescaler;
+       tpd->period = period;
+       tpd->duty = duty;
 
        /* If the channel is disabled we're done. */
-       if (!pwm_is_enabled(_pwm))
+       if (!enabled)
                return 0;
 
-       if (duty_only && pwm->timer_on) {
+       if (duty_only && tpd->timer_on) {
                /*
                 * If only the duty cycle changed and the timer is already
                 * running, there's no need to reconfigure it completely, Just
                 * modify the duty cycle.
                 */
-               tpu_pwm_write(pwm, TPU_TGRAn, pwm->duty);
-               dev_dbg(&tpu->pdev->dev, "%u: TGRA 0x%04x\n", pwm->channel,
-                       pwm->duty);
+               tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty);
+               dev_dbg(&tpu->pdev->dev, "%u: TGRA 0x%04x\n", tpd->channel,
+                       tpd->duty);
        } else {
                /* Otherwise perform a full reconfiguration. */
-               ret = tpu_pwm_timer_start(pwm);
+               ret = tpu_pwm_timer_start(tpd);
                if (ret < 0)
                        return ret;
        }
@@ -317,29 +343,29 @@ static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm,
                 * To avoid running the timer when not strictly required, handle
                 * 0% and 100% duty cycles as fixed levels and stop the timer.
                 */
-               tpu_pwm_set_pin(pwm, duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
-               tpu_pwm_timer_stop(pwm);
+               tpu_pwm_set_pin(tpd, duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
+               tpu_pwm_timer_stop(tpd);
        }
 
        return 0;
 }
 
-static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *_pwm,
+static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
                                enum pwm_polarity polarity)
 {
-       struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
 
-       pwm->polarity = polarity;
+       tpd->polarity = polarity;
 
        return 0;
 }
 
-static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *_pwm)
+static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
        int ret;
 
-       ret = tpu_pwm_timer_start(pwm);
+       ret = tpu_pwm_timer_start(tpd);
        if (ret < 0)
                return ret;
 
@@ -347,32 +373,64 @@ static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *_pwm)
         * To avoid running the timer when not strictly required, handle 0% and
         * 100% duty cycles as fixed levels and stop the timer.
         */
-       if (pwm->duty == 0 || pwm->duty == pwm->period) {
-               tpu_pwm_set_pin(pwm, pwm->duty ?
+       if (tpd->duty == 0 || tpd->duty == tpd->period) {
+               tpu_pwm_set_pin(tpd, tpd->duty ?
                                TPU_PIN_ACTIVE : TPU_PIN_INACTIVE);
-               tpu_pwm_timer_stop(pwm);
+               tpu_pwm_timer_stop(tpd);
        }
 
        return 0;
 }
 
-static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *_pwm)
+static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm);
+       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
 
        /* The timer must be running to modify the pin output configuration. */
-       tpu_pwm_timer_start(pwm);
-       tpu_pwm_set_pin(pwm, TPU_PIN_INACTIVE);
-       tpu_pwm_timer_stop(pwm);
+       tpu_pwm_timer_start(tpd);
+       tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE);
+       tpu_pwm_timer_stop(tpd);
+}
+
+static int tpu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                        const struct pwm_state *state)
+{
+       int err;
+       bool enabled = pwm->state.enabled;
+
+       if (state->polarity != pwm->state.polarity) {
+               if (enabled) {
+                       tpu_pwm_disable(chip, pwm);
+                       enabled = false;
+               }
+
+               err = tpu_pwm_set_polarity(chip, pwm, state->polarity);
+               if (err)
+                       return err;
+       }
+
+       if (!state->enabled) {
+               if (enabled)
+                       tpu_pwm_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = tpu_pwm_config(pwm->chip, pwm,
+                            state->duty_cycle, state->period, enabled);
+       if (err)
+               return err;
+
+       if (!enabled)
+               err = tpu_pwm_enable(chip, pwm);
+
+       return err;
 }
 
 static const struct pwm_ops tpu_pwm_ops = {
        .request = tpu_pwm_request,
        .free = tpu_pwm_free,
-       .config = tpu_pwm_config,
-       .set_polarity = tpu_pwm_set_polarity,
-       .enable = tpu_pwm_enable,
-       .disable = tpu_pwm_disable,
+       .apply = tpu_pwm_apply,
        .owner = THIS_MODULE,
 };
 
@@ -398,10 +456,8 @@ static int tpu_probe(struct platform_device *pdev)
                return PTR_ERR(tpu->base);
 
        tpu->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(tpu->clk)) {
-               dev_err(&pdev->dev, "cannot get clock\n");
-               return PTR_ERR(tpu->clk);
-       }
+       if (IS_ERR(tpu->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(tpu->clk), "Failed to get clock\n");
 
        /* Initialize and register the device. */
        platform_set_drvdata(pdev, tpu);
@@ -410,25 +466,13 @@ static int tpu_probe(struct platform_device *pdev)
        tpu->chip.ops = &tpu_pwm_ops;
        tpu->chip.npwm = TPU_CHANNEL_MAX;
 
-       pm_runtime_enable(&pdev->dev);
-
-       ret = pwmchip_add(&tpu->chip);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to register PWM chip\n");
-               pm_runtime_disable(&pdev->dev);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int tpu_remove(struct platform_device *pdev)
-{
-       struct tpu_device *tpu = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&tpu->chip);
+       ret = devm_pm_runtime_enable(&pdev->dev);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "Failed to enable runtime PM\n");
 
-       pm_runtime_disable(&pdev->dev);
+       ret = devm_pwmchip_add(&pdev->dev, &tpu->chip);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "Failed to register PWM chip\n");
 
        return 0;
 }
@@ -447,7 +491,6 @@ MODULE_DEVICE_TABLE(of, tpu_of_table);
 
 static struct platform_driver tpu_driver = {
        .probe          = tpu_probe,
-       .remove         = tpu_remove,
        .driver         = {
                .name   = "renesas-tpu-pwm",
                .of_match_table = of_match_ptr(tpu_of_table),
index 0a4ff55..9c5b4f5 100644 (file)
@@ -321,14 +321,6 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
        struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
        u32 tin_ns = chan->tin_ns, tcnt, tcmp, oldtcmp;
 
-       /*
-        * We currently avoid using 64bit arithmetic by using the
-        * fact that anything faster than 1Hz is easily representable
-        * by 32bits.
-        */
-       if (period_ns > NSEC_PER_SEC)
-               return -ERANGE;
-
        tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm));
        oldtcmp = readl(our_chip->base + REG_TCMPB(pwm->hwpwm));
 
@@ -438,13 +430,51 @@ static int pwm_samsung_set_polarity(struct pwm_chip *chip,
        return 0;
 }
 
+static int pwm_samsung_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                            const struct pwm_state *state)
+{
+       int err, enabled = pwm->state.enabled;
+
+       if (state->polarity != pwm->state.polarity) {
+               if (enabled) {
+                       pwm_samsung_disable(chip, pwm);
+                       enabled = false;
+               }
+
+               err = pwm_samsung_set_polarity(chip, pwm, state->polarity);
+               if (err)
+                       return err;
+       }
+
+       if (!state->enabled) {
+               if (enabled)
+                       pwm_samsung_disable(chip, pwm);
+
+               return 0;
+       }
+
+       /*
+        * We currently avoid using 64bit arithmetic by using the
+        * fact that anything faster than 1Hz is easily representable
+        * by 32bits.
+        */
+       if (state->period > NSEC_PER_SEC)
+               return -ERANGE;
+
+       err = pwm_samsung_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               err = pwm_samsung_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops pwm_samsung_ops = {
        .request        = pwm_samsung_request,
        .free           = pwm_samsung_free,
-       .enable         = pwm_samsung_enable,
-       .disable        = pwm_samsung_disable,
-       .config         = pwm_samsung_config,
-       .set_polarity   = pwm_samsung_set_polarity,
+       .apply          = pwm_samsung_apply,
        .owner          = THIS_MODULE,
 };
 
index 253c4a1..e6d05a3 100644 (file)
@@ -138,10 +138,9 @@ static int pwm_sifive_enable(struct pwm_chip *chip, bool enable)
                        dev_err(ddata->chip.dev, "Enable clk failed\n");
                        return ret;
                }
-       }
-
-       if (!enable)
+       } else {
                clk_disable(ddata->clk);
+       }
 
        return 0;
 }
index f491d56..44b1f93 100644 (file)
@@ -391,11 +391,34 @@ out:
        return ret;
 }
 
+static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                        const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       sti_pwm_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = sti_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               err = sti_pwm_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops sti_pwm_ops = {
        .capture = sti_pwm_capture,
-       .config = sti_pwm_config,
-       .enable = sti_pwm_enable,
-       .disable = sti_pwm_disable,
+       .apply = sti_pwm_apply,
        .free = sti_pwm_free,
        .owner = THIS_MODULE,
 };
index c4336d3..5d4a476 100644 (file)
@@ -259,10 +259,33 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        return 0;
 }
 
+static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                               const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       stmpe_24xx_pwm_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = stmpe_24xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               err = stmpe_24xx_pwm_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops stmpe_24xx_pwm_ops = {
-       .config = stmpe_24xx_pwm_config,
-       .enable = stmpe_24xx_pwm_enable,
-       .disable = stmpe_24xx_pwm_disable,
+       .apply = stmpe_24xx_pwm_apply,
        .owner = THIS_MODULE,
 };
 
index 16d75f9..c8445b0 100644 (file)
@@ -89,7 +89,6 @@ struct sun4i_pwm_chip {
        void __iomem *base;
        spinlock_t ctrl_lock;
        const struct sun4i_pwm_data *data;
-       unsigned long next_period[2];
 };
 
 static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
@@ -236,7 +235,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        u32 ctrl, duty = 0, period = 0, val;
        int ret;
        unsigned int delay_us, prescaler = 0;
-       unsigned long now;
        bool bypass;
 
        pwm_get_state(pwm, &cstate);
@@ -284,8 +282,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
        val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
        sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
-       sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
-               nsecs_to_jiffies(cstate.period + 1000);
 
        if (state->polarity != PWM_POLARITY_NORMAL)
                ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
@@ -305,15 +301,11 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
 
        /* We need a full period to elapse before disabling the channel. */
-       now = jiffies;
-       if (time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) {
-               delay_us = jiffies_to_usecs(sun4i_pwm->next_period[pwm->hwpwm] -
-                                          now);
-               if ((delay_us / 500) > MAX_UDELAY_MS)
-                       msleep(delay_us / 1000 + 1);
-               else
-                       usleep_range(delay_us, delay_us * 2);
-       }
+       delay_us = DIV_ROUND_UP_ULL(cstate.period, NSEC_PER_USEC);
+       if ((delay_us / 500) > MAX_UDELAY_MS)
+               msleep(delay_us / 1000 + 1);
+       else
+               usleep_range(delay_us, delay_us * 2);
 
        spin_lock(&sun4i_pwm->ctrl_lock);
        ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
new file mode 100644 (file)
index 0000000..e776fd1
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PWM device driver for SUNPLUS SP7021 SoC
+ *
+ * Links:
+ *   Reference Manual:
+ *   https://sunplus-tibbo.atlassian.net/wiki/spaces/doc/overview
+ *
+ *   Reference Manual(PWM module):
+ *   https://sunplus.atlassian.net/wiki/spaces/doc/pages/461144198/12.+Pulse+Width+Modulation+PWM
+ *
+ * Limitations:
+ * - Only supports normal polarity.
+ * - It output low when PWM channel disabled.
+ * - When the parameters change, current running period will not be completed
+ *     and run new settings immediately.
+ * - In .apply() PWM output need to write register FREQ and DUTY. When first write FREQ
+ *     done and not yet write DUTY, it has short timing gap use new FREQ and old DUTY.
+ *
+ * Author: Hammer Hsieh <hammerh0314@gmail.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define SP7021_PWM_MODE0               0x000
+#define SP7021_PWM_MODE0_PWMEN(ch)     BIT(ch)
+#define SP7021_PWM_MODE0_BYPASS(ch)    BIT(8 + (ch))
+#define SP7021_PWM_MODE1               0x004
+#define SP7021_PWM_MODE1_CNT_EN(ch)    BIT(ch)
+#define SP7021_PWM_FREQ(ch)            (0x008 + 4 * (ch))
+#define SP7021_PWM_FREQ_MAX            GENMASK(15, 0)
+#define SP7021_PWM_DUTY(ch)            (0x018 + 4 * (ch))
+#define SP7021_PWM_DUTY_DD_SEL(ch)     FIELD_PREP(GENMASK(9, 8), ch)
+#define SP7021_PWM_DUTY_MAX            GENMASK(7, 0)
+#define SP7021_PWM_DUTY_MASK           SP7021_PWM_DUTY_MAX
+#define SP7021_PWM_FREQ_SCALER         256
+#define SP7021_PWM_NUM                 4
+
+struct sunplus_pwm {
+       struct pwm_chip chip;
+       void __iomem *base;
+       struct clk *clk;
+};
+
+static inline struct sunplus_pwm *to_sunplus_pwm(struct pwm_chip *chip)
+{
+       return container_of(chip, struct sunplus_pwm, chip);
+}
+
+static int sunplus_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                            const struct pwm_state *state)
+{
+       struct sunplus_pwm *priv = to_sunplus_pwm(chip);
+       u32 dd_freq, duty, mode0, mode1;
+       u64 clk_rate;
+
+       if (state->polarity != pwm->state.polarity)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               /* disable pwm channel output */
+               mode0 = readl(priv->base + SP7021_PWM_MODE0);
+               mode0 &= ~SP7021_PWM_MODE0_PWMEN(pwm->hwpwm);
+               writel(mode0, priv->base + SP7021_PWM_MODE0);
+               /* disable pwm channel clk source */
+               mode1 = readl(priv->base + SP7021_PWM_MODE1);
+               mode1 &= ~SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm);
+               writel(mode1, priv->base + SP7021_PWM_MODE1);
+               return 0;
+       }
+
+       clk_rate = clk_get_rate(priv->clk);
+
+       /*
+        * The following calculations might overflow if clk is bigger
+        * than 256 GHz. In practise it's 202.5MHz, so this limitation
+        * is only theoretic.
+        */
+       if (clk_rate > (u64)SP7021_PWM_FREQ_SCALER * NSEC_PER_SEC)
+               return -EINVAL;
+
+       /*
+        * With clk_rate limited above we have dd_freq <= state->period,
+        * so this cannot overflow.
+        */
+       dd_freq = mul_u64_u64_div_u64(clk_rate, state->period, (u64)SP7021_PWM_FREQ_SCALER
+                               * NSEC_PER_SEC);
+
+       if (dd_freq == 0)
+               return -EINVAL;
+
+       if (dd_freq > SP7021_PWM_FREQ_MAX)
+               dd_freq = SP7021_PWM_FREQ_MAX;
+
+       writel(dd_freq, priv->base + SP7021_PWM_FREQ(pwm->hwpwm));
+
+       /* cal and set pwm duty */
+       mode0 = readl(priv->base + SP7021_PWM_MODE0);
+       mode0 |= SP7021_PWM_MODE0_PWMEN(pwm->hwpwm);
+       mode1 = readl(priv->base + SP7021_PWM_MODE1);
+       mode1 |= SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm);
+       if (state->duty_cycle == state->period) {
+               /* PWM channel output = high */
+               mode0 |= SP7021_PWM_MODE0_BYPASS(pwm->hwpwm);
+               duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | SP7021_PWM_DUTY_MAX;
+       } else {
+               mode0 &= ~SP7021_PWM_MODE0_BYPASS(pwm->hwpwm);
+               /*
+                * duty_ns <= period_ns 27 bits, clk_rate 28 bits, won't overflow.
+                */
+               duty = mul_u64_u64_div_u64(state->duty_cycle, clk_rate,
+                                          (u64)dd_freq * NSEC_PER_SEC);
+               duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | duty;
+       }
+       writel(duty, priv->base + SP7021_PWM_DUTY(pwm->hwpwm));
+       writel(mode1, priv->base + SP7021_PWM_MODE1);
+       writel(mode0, priv->base + SP7021_PWM_MODE0);
+
+       return 0;
+}
+
+static void sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+                                 struct pwm_state *state)
+{
+       struct sunplus_pwm *priv = to_sunplus_pwm(chip);
+       u32 mode0, dd_freq, duty;
+       u64 clk_rate;
+
+       mode0 = readl(priv->base + SP7021_PWM_MODE0);
+
+       if (mode0 & BIT(pwm->hwpwm)) {
+               clk_rate = clk_get_rate(priv->clk);
+               dd_freq = readl(priv->base + SP7021_PWM_FREQ(pwm->hwpwm));
+               duty = readl(priv->base + SP7021_PWM_DUTY(pwm->hwpwm));
+               duty = FIELD_GET(SP7021_PWM_DUTY_MASK, duty);
+               /*
+                * dd_freq 16 bits, SP7021_PWM_FREQ_SCALER 8 bits
+                * NSEC_PER_SEC 30 bits, won't overflow.
+                */
+               state->period = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)SP7021_PWM_FREQ_SCALER
+                                               * NSEC_PER_SEC, clk_rate);
+               /*
+                * dd_freq 16 bits, duty 8 bits, NSEC_PER_SEC 30 bits, won't overflow.
+                */
+               state->duty_cycle = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)duty * NSEC_PER_SEC,
+                                                      clk_rate);
+               state->enabled = true;
+       } else {
+               state->enabled = false;
+       }
+
+       state->polarity = PWM_POLARITY_NORMAL;
+}
+
+static const struct pwm_ops sunplus_pwm_ops = {
+       .apply = sunplus_pwm_apply,
+       .get_state = sunplus_pwm_get_state,
+       .owner = THIS_MODULE,
+};
+
+static void sunplus_pwm_clk_release(void *data)
+{
+       struct clk *clk = data;
+
+       clk_disable_unprepare(clk);
+}
+
+static int sunplus_pwm_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct sunplus_pwm *priv;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       priv->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(priv->clk))
+               return dev_err_probe(dev, PTR_ERR(priv->clk),
+                                    "get pwm clock failed\n");
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret < 0) {
+               dev_err(dev, "failed to enable clock: %d\n", ret);
+               return ret;
+       }
+
+       ret = devm_add_action_or_reset(dev, sunplus_pwm_clk_release, priv->clk);
+       if (ret < 0) {
+               dev_err(dev, "failed to release clock: %d\n", ret);
+               return ret;
+       }
+
+       priv->chip.dev = dev;
+       priv->chip.ops = &sunplus_pwm_ops;
+       priv->chip.npwm = SP7021_PWM_NUM;
+
+       ret = devm_pwmchip_add(dev, &priv->chip);
+       if (ret < 0)
+               return dev_err_probe(dev, ret, "Cannot register sunplus PWM\n");
+
+       return 0;
+}
+
+static const struct of_device_id sunplus_pwm_of_match[] = {
+       { .compatible = "sunplus,sp7021-pwm", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sunplus_pwm_of_match);
+
+static struct platform_driver sunplus_pwm_driver = {
+       .probe          = sunplus_pwm_probe,
+       .driver         = {
+               .name   = "sunplus-pwm",
+               .of_match_table = sunplus_pwm_of_match,
+       },
+};
+module_platform_driver(sunplus_pwm_driver);
+
+MODULE_DESCRIPTION("Sunplus SoC PWM Driver");
+MODULE_AUTHOR("Hammer Hsieh <hammerh0314@gmail.com>");
+MODULE_LICENSE("GPL");
index e5a9ffe..dad9978 100644 (file)
@@ -99,7 +99,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                            int duty_ns, int period_ns)
 {
        struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
-       unsigned long long c = duty_ns, hz;
+       unsigned long long c = duty_ns;
        unsigned long rate, required_clk_rate;
        u32 val = 0;
        int err;
@@ -156,11 +156,9 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                pc->clk_rate = clk_get_rate(pc->clk);
        }
 
-       rate = pc->clk_rate >> PWM_DUTY_WIDTH;
-
        /* Consider precision in PWM_SCALE_WIDTH rate calculation */
-       hz = DIV_ROUND_CLOSEST_ULL(100ULL * NSEC_PER_SEC, period_ns);
-       rate = DIV_ROUND_CLOSEST_ULL(100ULL * rate, hz);
+       rate = mul_u64_u64_div_u64(pc->clk_rate, period_ns,
+                                  (u64)NSEC_PER_SEC << PWM_DUTY_WIDTH);
 
        /*
         * Since the actual PWM divider is the register's frequency divider
@@ -169,6 +167,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
         */
        if (rate > 0)
                rate--;
+       else
+               return -EINVAL;
 
        /*
         * Make sure that the rate will fit in the register's frequency
@@ -230,10 +230,34 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        pm_runtime_put_sync(pc->dev);
 }
 
+static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                          const struct pwm_state *state)
+{
+       int err;
+       bool enabled = pwm->state.enabled;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (enabled)
+                       tegra_pwm_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = tegra_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!enabled)
+               err = tegra_pwm_enable(chip, pwm);
+
+       return err;
+}
+
 static const struct pwm_ops tegra_pwm_ops = {
-       .config = tegra_pwm_config,
-       .enable = tegra_pwm_enable,
-       .disable = tegra_pwm_disable,
+       .apply = tegra_pwm_apply,
        .owner = THIS_MODULE,
 };
 
index 49d9f7a..ed0b63d 100644 (file)
@@ -137,6 +137,45 @@ out:
        mutex_unlock(&twl->mutex);
 }
 
+static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                               const struct pwm_state *state)
+{
+       int ret;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       twl4030_pwmled_disable(chip, pwm);
+
+               return 0;
+       }
+
+       /*
+        * We cannot skip calling ->config even if state->period ==
+        * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+        * because we might have exited early in the last call to
+        * pwm_apply_state because of !state->enabled and so the two values in
+        * pwm->state might not be configured in hardware.
+        */
+       ret = twl4030_pwmled_config(pwm->chip, pwm,
+                                   state->duty_cycle, state->period);
+       if (ret)
+               return ret;
+
+       if (!pwm->state.enabled)
+               ret = twl4030_pwmled_enable(chip, pwm);
+
+       return ret;
+}
+
+
+static const struct pwm_ops twl4030_pwmled_ops = {
+       .apply = twl4030_pwmled_apply,
+       .owner = THIS_MODULE,
+};
+
 static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
                              int duty_ns, int period_ns)
 {
@@ -206,6 +245,32 @@ out:
        mutex_unlock(&twl->mutex);
 }
 
+static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                               const struct pwm_state *state)
+{
+       int err;
+
+       if (state->polarity != pwm->state.polarity)
+               return -EINVAL;
+
+       if (!state->enabled) {
+               if (pwm->state.enabled)
+                       twl6030_pwmled_disable(chip, pwm);
+
+               return 0;
+       }
+
+       err = twl6030_pwmled_config(pwm->chip, pwm,
+                                   state->duty_cycle, state->period);
+       if (err)
+               return err;
+
+       if (!pwm->state.enabled)
+               err = twl6030_pwmled_enable(chip, pwm);
+
+       return err;
+}
+
 static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct twl_pwmled_chip *twl = to_twl(chip);
@@ -257,17 +322,8 @@ out:
        mutex_unlock(&twl->mutex);
 }
 
-static const struct pwm_ops twl4030_pwmled_ops = {
-       .enable = twl4030_pwmled_enable,
-       .disable = twl4030_pwmled_disable,
-       .config = twl4030_pwmled_config,
-       .owner = THIS_MODULE,
-};
-
 static const struct pwm_ops twl6030_pwmled_ops = {
-       .enable = twl6030_pwmled_enable,
-       .disable = twl6030_pwmled_disable,
-       .config = twl6030_pwmled_config,
+       .apply = twl6030_pwmled_apply,
        .request = twl6030_pwmled_request,
        .free = twl6030_pwmled_free,
        .owner = THIS_MODULE,
diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c
new file mode 100644 (file)
index 0000000..4dab2b8
--- /dev/null
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com>
+ *
+ * Limitations:
+ * - When changing both duty cycle and period, we may end up with one cycle
+ *   with the old duty cycle and the new period. This is because the counters
+ *   may only be reloaded by first stopping them, or by letting them be
+ *   automatically reloaded at the end of a cycle. If this automatic reload
+ *   happens after we set TLR0 but before we set TLR1 then we will have a
+ *   bad cycle. This could probably be fixed by reading TCR0 just before
+ *   reprogramming, but I think it would add complexity for little gain.
+ * - Cannot produce 100% duty cycle by configuring the TLRs. This might be
+ *   possible by stopping the counters at an appropriate point in the cycle,
+ *   but this is not (yet) implemented.
+ * - Only produces "normal" output.
+ * - Always produces low output if disabled.
+ */
+
+#include <clocksource/timer-xilinx.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+/*
+ * The following functions are "common" to drivers for this device, and may be
+ * exported at a future date.
+ */
+u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr,
+                           u64 cycles)
+{
+       WARN_ON(cycles < 2 || cycles - 2 > priv->max);
+
+       if (tcsr & TCSR_UDT)
+               return cycles - 2;
+       return priv->max - cycles + 2;
+}
+
+unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
+                                    u32 tlr, u32 tcsr)
+{
+       u64 cycles;
+
+       if (tcsr & TCSR_UDT)
+               cycles = tlr + 2;
+       else
+               cycles = (u64)priv->max - tlr + 2;
+
+       /* cycles has a max of 2^32 + 2, so we can't overflow */
+       return DIV64_U64_ROUND_UP(cycles * NSEC_PER_SEC,
+                                 clk_get_rate(priv->clk));
+}
+
+/*
+ * The idea here is to capture whether the PWM is actually running (e.g.
+ * because we or the bootloader set it up) and we need to be careful to ensure
+ * we don't cause a glitch. According to the data sheet, to enable the PWM we
+ * need to
+ *
+ * - Set both timers to generate mode (MDT=1)
+ * - Set both timers to PWM mode (PWMA=1)
+ * - Enable the generate out signals (GENT=1)
+ *
+ * In addition,
+ *
+ * - The timer must be running (ENT=1)
+ * - The timer must auto-reload TLR into TCR (ARHT=1)
+ * - We must not be in the process of loading TLR into TCR (LOAD=0)
+ * - Cascade mode must be disabled (CASC=0)
+ *
+ * If any of these differ from usual, then the PWM is either disabled, or is
+ * running in a mode that this driver does not support.
+ */
+#define TCSR_PWM_SET (TCSR_GENT | TCSR_ARHT | TCSR_ENT | TCSR_PWMA)
+#define TCSR_PWM_CLEAR (TCSR_MDT | TCSR_LOAD)
+#define TCSR_PWM_MASK (TCSR_PWM_SET | TCSR_PWM_CLEAR)
+
+struct xilinx_pwm_device {
+       struct pwm_chip chip;
+       struct xilinx_timer_priv priv;
+};
+
+static inline struct xilinx_timer_priv
+*xilinx_pwm_chip_to_priv(struct pwm_chip *chip)
+{
+       return &container_of(chip, struct xilinx_pwm_device, chip)->priv;
+}
+
+static bool xilinx_timer_pwm_enabled(u32 tcsr0, u32 tcsr1)
+{
+       return ((TCSR_PWM_MASK | TCSR_CASC) & tcsr0) == TCSR_PWM_SET &&
+               (TCSR_PWM_MASK & tcsr1) == TCSR_PWM_SET;
+}
+
+static int xilinx_pwm_apply(struct pwm_chip *chip, struct pwm_device *unused,
+                           const struct pwm_state *state)
+{
+       struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
+       u32 tlr0, tlr1, tcsr0, tcsr1;
+       u64 period_cycles, duty_cycles;
+       unsigned long rate;
+
+       if (state->polarity != PWM_POLARITY_NORMAL)
+               return -EINVAL;
+
+       /*
+        * To be representable by TLR, cycles must be between 2 and
+        * priv->max + 2. To enforce this we can reduce the cycles, but we may
+        * not increase them. Caveat emptor: while this does result in more
+        * predictable rounding, it may also result in a completely different
+        * duty cycle (% high time) than what was requested.
+        */
+       rate = clk_get_rate(priv->clk);
+       /* Avoid overflow */
+       period_cycles = min_t(u64, state->period, U32_MAX * NSEC_PER_SEC);
+       period_cycles = mul_u64_u32_div(period_cycles, rate, NSEC_PER_SEC);
+       period_cycles = min_t(u64, period_cycles, priv->max + 2);
+       if (period_cycles < 2)
+               return -ERANGE;
+
+       /* Same thing for duty cycles */
+       duty_cycles = min_t(u64, state->duty_cycle, U32_MAX * NSEC_PER_SEC);
+       duty_cycles = mul_u64_u32_div(duty_cycles, rate, NSEC_PER_SEC);
+       duty_cycles = min_t(u64, duty_cycles, priv->max + 2);
+
+       /*
+        * If we specify 100% duty cycle, we will get 0% instead, so decrease
+        * the duty cycle count by one.
+        */
+       if (duty_cycles >= period_cycles)
+               duty_cycles = period_cycles - 1;
+
+       /* Round down to 0% duty cycle for unrepresentable duty cycles */
+       if (duty_cycles < 2)
+               duty_cycles = period_cycles;
+
+       regmap_read(priv->map, TCSR0, &tcsr0);
+       regmap_read(priv->map, TCSR1, &tcsr1);
+       tlr0 = xilinx_timer_tlr_cycles(priv, tcsr0, period_cycles);
+       tlr1 = xilinx_timer_tlr_cycles(priv, tcsr1, duty_cycles);
+       regmap_write(priv->map, TLR0, tlr0);
+       regmap_write(priv->map, TLR1, tlr1);
+
+       if (state->enabled) {
+               /*
+                * If the PWM is already running, then the counters will be
+                * reloaded at the end of the current cycle.
+                */
+               if (!xilinx_timer_pwm_enabled(tcsr0, tcsr1)) {
+                       /* Load TLR into TCR */
+                       regmap_write(priv->map, TCSR0, tcsr0 | TCSR_LOAD);
+                       regmap_write(priv->map, TCSR1, tcsr1 | TCSR_LOAD);
+                       /* Enable timers all at once with ENALL */
+                       tcsr0 = (TCSR_PWM_SET & ~TCSR_ENT) | (tcsr0 & TCSR_UDT);
+                       tcsr1 = TCSR_PWM_SET | TCSR_ENALL | (tcsr1 & TCSR_UDT);
+                       regmap_write(priv->map, TCSR0, tcsr0);
+                       regmap_write(priv->map, TCSR1, tcsr1);
+               }
+       } else {
+               regmap_write(priv->map, TCSR0, 0);
+               regmap_write(priv->map, TCSR1, 0);
+       }
+
+       return 0;
+}
+
+static void xilinx_pwm_get_state(struct pwm_chip *chip,
+                                struct pwm_device *unused,
+                                struct pwm_state *state)
+{
+       struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
+       u32 tlr0, tlr1, tcsr0, tcsr1;
+
+       regmap_read(priv->map, TLR0, &tlr0);
+       regmap_read(priv->map, TLR1, &tlr1);
+       regmap_read(priv->map, TCSR0, &tcsr0);
+       regmap_read(priv->map, TCSR1, &tcsr1);
+       state->period = xilinx_timer_get_period(priv, tlr0, tcsr0);
+       state->duty_cycle = xilinx_timer_get_period(priv, tlr1, tcsr1);
+       state->enabled = xilinx_timer_pwm_enabled(tcsr0, tcsr1);
+       state->polarity = PWM_POLARITY_NORMAL;
+
+       /*
+        * 100% duty cycle results in constant low output. This may be (very)
+        * wrong if rate > 1 GHz, so fix this if you have such hardware :)
+        */
+       if (state->period == state->duty_cycle)
+               state->duty_cycle = 0;
+}
+
+static const struct pwm_ops xilinx_pwm_ops = {
+       .apply = xilinx_pwm_apply,
+       .get_state = xilinx_pwm_get_state,
+       .owner = THIS_MODULE,
+};
+
+static const struct regmap_config xilinx_pwm_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE,
+       .max_register = TCR1,
+};
+
+static int xilinx_pwm_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct xilinx_timer_priv *priv;
+       struct xilinx_pwm_device *xilinx_pwm;
+       u32 pwm_cells, one_timer, width;
+       void __iomem *regs;
+
+       /* If there are no PWM cells, this binding is for a timer */
+       ret = of_property_read_u32(np, "#pwm-cells", &pwm_cells);
+       if (ret == -EINVAL)
+               return -ENODEV;
+       if (ret)
+               return dev_err_probe(dev, ret, "could not read #pwm-cells\n");
+
+       xilinx_pwm = devm_kzalloc(dev, sizeof(*xilinx_pwm), GFP_KERNEL);
+       if (!xilinx_pwm)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, xilinx_pwm);
+       priv = &xilinx_pwm->priv;
+
+       regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
+
+       priv->map = devm_regmap_init_mmio(dev, regs,
+                                         &xilinx_pwm_regmap_config);
+       if (IS_ERR(priv->map))
+               return dev_err_probe(dev, PTR_ERR(priv->map),
+                                    "Could not create regmap\n");
+
+       ret = of_property_read_u32(np, "xlnx,one-timer-only", &one_timer);
+       if (ret)
+               return dev_err_probe(dev, ret,
+                                    "Could not read xlnx,one-timer-only\n");
+
+       if (one_timer)
+               return dev_err_probe(dev, -EINVAL,
+                                    "Two timers required for PWM mode\n");
+
+       ret = of_property_read_u32(np, "xlnx,count-width", &width);
+       if (ret == -EINVAL)
+               width = 32;
+       else if (ret)
+               return dev_err_probe(dev, ret,
+                                    "Could not read xlnx,count-width\n");
+
+       if (width != 8 && width != 16 && width != 32)
+               return dev_err_probe(dev, -EINVAL,
+                                    "Invalid counter width %d\n", width);
+       priv->max = BIT_ULL(width) - 1;
+
+       /*
+        * The polarity of the Generate Out signals must be active high for PWM
+        * mode to work. We could determine this from the device tree, but
+        * alas, such properties are not allowed to be used.
+        */
+
+       priv->clk = devm_clk_get(dev, "s_axi_aclk");
+       if (IS_ERR(priv->clk))
+               return dev_err_probe(dev, PTR_ERR(priv->clk),
+                                    "Could not get clock\n");
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return dev_err_probe(dev, ret, "Clock enable failed\n");
+       clk_rate_exclusive_get(priv->clk);
+
+       xilinx_pwm->chip.dev = dev;
+       xilinx_pwm->chip.ops = &xilinx_pwm_ops;
+       xilinx_pwm->chip.npwm = 1;
+       ret = pwmchip_add(&xilinx_pwm->chip);
+       if (ret) {
+               clk_rate_exclusive_put(priv->clk);
+               clk_disable_unprepare(priv->clk);
+               return dev_err_probe(dev, ret, "Could not register PWM chip\n");
+       }
+
+       return 0;
+}
+
+static int xilinx_pwm_remove(struct platform_device *pdev)
+{
+       struct xilinx_pwm_device *xilinx_pwm = platform_get_drvdata(pdev);
+
+       pwmchip_remove(&xilinx_pwm->chip);
+       clk_rate_exclusive_put(xilinx_pwm->priv.clk);
+       clk_disable_unprepare(xilinx_pwm->priv.clk);
+       return 0;
+}
+
+static const struct of_device_id xilinx_pwm_of_match[] = {
+       { .compatible = "xlnx,xps-timer-1.00.a", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xilinx_pwm_of_match);
+
+static struct platform_driver xilinx_pwm_driver = {
+       .probe = xilinx_pwm_probe,
+       .remove = xilinx_pwm_remove,
+       .driver = {
+               .name = "xilinx-pwm",
+               .of_match_table = of_match_ptr(xilinx_pwm_of_match),
+       },
+};
+module_platform_driver(xilinx_pwm_driver);
+
+MODULE_ALIAS("platform:xilinx-pwm");
+MODULE_DESCRIPTION("PWM driver for Xilinx LogiCORE IP AXI Timer");
+MODULE_LICENSE("GPL");
index aa55cfc..6b61702 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/of_device.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/platform_device.h>
+#include <linux/reboot.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/pfuze100.h>
@@ -571,10 +572,10 @@ static inline struct device_node *match_of_node(int index)
        return pfuze_matches[index].of_node;
 }
 
-static struct pfuze_chip *syspm_pfuze_chip;
-
-static void pfuze_power_off_prepare(void)
+static int pfuze_power_off_prepare(struct sys_off_data *data)
 {
+       struct pfuze_chip *syspm_pfuze_chip = data->cb_data;
+
        dev_info(syspm_pfuze_chip->dev, "Configure standby mode for power off");
 
        /* Switch from default mode: APS/APS to APS/Off */
@@ -609,28 +610,30 @@ static void pfuze_power_off_prepare(void)
        regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN6VOL,
                           PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
                           PFUZE100_VGENxSTBY);
+
+       return NOTIFY_DONE;
 }
 
 static int pfuze_power_off_prepare_init(struct pfuze_chip *pfuze_chip)
 {
+       int err;
+
        if (pfuze_chip->chip_id != PFUZE100) {
                dev_warn(pfuze_chip->dev, "Requested pm_power_off_prepare handler for not supported chip\n");
                return -ENODEV;
        }
 
-       if (pm_power_off_prepare) {
-               dev_warn(pfuze_chip->dev, "pm_power_off_prepare is already registered.\n");
-               return -EBUSY;
+       err = devm_register_sys_off_handler(pfuze_chip->dev,
+                                           SYS_OFF_MODE_POWER_OFF_PREPARE,
+                                           SYS_OFF_PRIO_DEFAULT,
+                                           pfuze_power_off_prepare,
+                                           pfuze_chip);
+       if (err) {
+               dev_err(pfuze_chip->dev, "failed to register sys-off handler: %d\n",
+                       err);
+               return err;
        }
 
-       if (syspm_pfuze_chip) {
-               dev_warn(pfuze_chip->dev, "syspm_pfuze_chip is already set.\n");
-               return -EBUSY;
-       }
-
-       syspm_pfuze_chip = pfuze_chip;
-       pm_power_off_prepare = pfuze_power_off_prepare;
-
        return 0;
 }
 
@@ -839,23 +842,12 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
        return 0;
 }
 
-static int pfuze100_regulator_remove(struct i2c_client *client)
-{
-       if (syspm_pfuze_chip) {
-               syspm_pfuze_chip = NULL;
-               pm_power_off_prepare = NULL;
-       }
-
-       return 0;
-}
-
 static struct i2c_driver pfuze_driver = {
        .driver = {
                .name = "pfuze100-regulator",
                .of_match_table = pfuze_dt_ids,
        },
        .probe = pfuze100_regulator_probe,
-       .remove = pfuze100_regulator_remove,
 };
 module_i2c_driver(pfuze_driver);
 
index 2abee78..ca0817f 100644 (file)
@@ -649,99 +649,6 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
        return 0;
 }
 
-/**
- * imx_dsp_rproc_elf_load_segments() - load firmware segments to memory
- * @rproc: remote processor which will be booted using these fw segments
- * @fw: the ELF firmware image
- *
- * This function specially checks if memsz is zero or not, otherwise it
- * is mostly same as rproc_elf_load_segments().
- */
-static int imx_dsp_rproc_elf_load_segments(struct rproc *rproc,
-                                          const struct firmware *fw)
-{
-       struct device *dev = &rproc->dev;
-       u8 class = fw_elf_get_class(fw);
-       u32 elf_phdr_get_size = elf_size_of_phdr(class);
-       const u8 *elf_data = fw->data;
-       const void *ehdr, *phdr;
-       int i, ret = 0;
-       u16 phnum;
-
-       ehdr = elf_data;
-       phnum = elf_hdr_get_e_phnum(class, ehdr);
-       phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
-
-       /* go through the available ELF segments */
-       for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
-               u64 da = elf_phdr_get_p_paddr(class, phdr);
-               u64 memsz = elf_phdr_get_p_memsz(class, phdr);
-               u64 filesz = elf_phdr_get_p_filesz(class, phdr);
-               u64 offset = elf_phdr_get_p_offset(class, phdr);
-               u32 type = elf_phdr_get_p_type(class, phdr);
-               void *ptr;
-
-               /*
-                *  There is a case that with PT_LOAD type, the
-                *  filesz = memsz = 0. If memsz = 0, rproc_da_to_va
-                *  should return NULL ptr, then error is returned.
-                *  So this case should be skipped from the loop.
-                *  Add !memsz checking here.
-                */
-               if (type != PT_LOAD || !memsz)
-                       continue;
-
-               dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
-                       type, da, memsz, filesz);
-
-               if (filesz > memsz) {
-                       dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
-                               filesz, memsz);
-                       ret = -EINVAL;
-                       break;
-               }
-
-               if (offset + filesz > fw->size) {
-                       dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
-                               offset + filesz, fw->size);
-                       ret = -EINVAL;
-                       break;
-               }
-
-               if (!rproc_u64_fit_in_size_t(memsz)) {
-                       dev_err(dev, "size (%llx) does not fit in size_t type\n",
-                               memsz);
-                       ret = -EOVERFLOW;
-                       break;
-               }
-
-               /* grab the kernel address for this device address */
-               ptr = rproc_da_to_va(rproc, da, memsz, NULL);
-               if (!ptr) {
-                       dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
-                               memsz);
-                       ret = -EINVAL;
-                       break;
-               }
-
-               /* put the segment where the remote processor expects it */
-               if (filesz)
-                       memcpy(ptr, elf_data + offset, filesz);
-
-               /*
-                * Zero out remaining memory for this segment.
-                *
-                * This isn't strictly required since dma_alloc_coherent already
-                * did this for us. albeit harmless, we may consider removing
-                * this.
-                */
-               if (memsz > filesz)
-                       memset(ptr + filesz, 0, memsz - filesz);
-       }
-
-       return ret;
-}
-
 /* Prepare function for rproc_ops */
 static int imx_dsp_rproc_prepare(struct rproc *rproc)
 {
@@ -802,14 +709,22 @@ static void imx_dsp_rproc_kick(struct rproc *rproc, int vqid)
                dev_err(dev, "%s: failed (%d, err:%d)\n", __func__, vqid, err);
 }
 
+static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+       if (rproc_elf_load_rsc_table(rproc, fw))
+               dev_warn(&rproc->dev, "no resource table found for this firmware\n");
+
+       return 0;
+}
+
 static const struct rproc_ops imx_dsp_rproc_ops = {
        .prepare        = imx_dsp_rproc_prepare,
        .unprepare      = imx_dsp_rproc_unprepare,
        .start          = imx_dsp_rproc_start,
        .stop           = imx_dsp_rproc_stop,
        .kick           = imx_dsp_rproc_kick,
-       .load           = imx_dsp_rproc_elf_load_segments,
-       .parse_fw       = rproc_elf_load_rsc_table,
+       .load           = rproc_elf_load_segments,
+       .parse_fw       = imx_dsp_rproc_parse_fw,
        .sanity_check   = rproc_elf_sanity_check,
        .get_boot_addr  = rproc_elf_get_boot_addr,
 };
index 7a096f1..4a33528 100644 (file)
@@ -91,6 +91,32 @@ struct imx_rproc {
        void __iomem                    *rsc_table;
 };
 
+static const struct imx_rproc_att imx_rproc_att_imx93[] = {
+       /* dev addr , sys addr  , size      , flags */
+       /* TCM CODE NON-SECURE */
+       { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+       { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+       /* TCM CODE SECURE */
+       { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+       { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+       /* TCM SYS NON-SECURE*/
+       { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+       { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+       /* TCM SYS SECURE*/
+       { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+       { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+       /* DDR */
+       { 0x80000000, 0x80000000, 0x10000000, 0 },
+       { 0x90000000, 0x80000000, 0x10000000, 0 },
+
+       { 0xC0000000, 0xa0000000, 0x10000000, 0 },
+       { 0xD0000000, 0xa0000000, 0x10000000, 0 },
+};
+
 static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
        /* dev addr , sys addr  , size      , flags */
        /* ITCM   */
@@ -261,6 +287,12 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
        .method         = IMX_RPROC_MMIO,
 };
 
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
+       .att            = imx_rproc_att_imx93,
+       .att_size       = ARRAY_SIZE(imx_rproc_att_imx93),
+       .method         = IMX_RPROC_SMC,
+};
+
 static int imx_rproc_start(struct rproc *rproc)
 {
        struct imx_rproc *priv = rproc->priv;
@@ -423,6 +455,9 @@ static int imx_rproc_prepare(struct rproc *rproc)
                if (!strcmp(it.node->name, "vdev0buffer"))
                        continue;
 
+               if (!strcmp(it.node->name, "rsc-table"))
+                       continue;
+
                rmem = of_reserved_mem_lookup(it.node);
                if (!rmem) {
                        dev_err(priv->dev, "unable to acquire memory-region\n");
@@ -821,6 +856,7 @@ static const struct of_device_id imx_rproc_of_match[] = {
        { .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
        { .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
        { .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
+       { .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 },
        {},
 };
 MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
index 71ce497..ea6fa11 100644 (file)
@@ -54,6 +54,8 @@
 #define MT8192_CORE0_WDT_IRQ           0x10030
 #define MT8192_CORE0_WDT_CFG           0x10034
 
+#define MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS                GENMASK(7, 4)
+
 #define SCP_FW_VER_LEN                 32
 #define SCP_SHARE_BUFFER_SIZE          288
 
index 3860915..47b2a40 100644 (file)
@@ -365,22 +365,22 @@ static int mt8183_scp_before_load(struct mtk_scp *scp)
        return 0;
 }
 
-static void mt8192_power_on_sram(void __iomem *addr)
+static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
 {
        int i;
 
        for (i = 31; i >= 0; i--)
-               writel(GENMASK(i, 0), addr);
+               writel(GENMASK(i, 0) & ~reserved_mask, addr);
        writel(0, addr);
 }
 
-static void mt8192_power_off_sram(void __iomem *addr)
+static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
 {
        int i;
 
        writel(0, addr);
        for (i = 0; i < 32; i++)
-               writel(GENMASK(i, 0), addr);
+               writel(GENMASK(i, 0) & ~reserved_mask, addr);
 }
 
 static int mt8186_scp_before_load(struct mtk_scp *scp)
@@ -393,7 +393,7 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
        writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
 
        /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
-       mt8192_power_on_sram(scp->reg_base + MT8183_SCP_SRAM_PDN);
+       scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
 
        /* Initialize TCM before loading FW. */
        writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
@@ -412,11 +412,32 @@ static int mt8192_scp_before_load(struct mtk_scp *scp)
        writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
 
        /* enable SRAM clock */
-       mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
-       mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
-       mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
-       mt8192_power_on_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
-       mt8192_power_on_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
+       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+       scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+       scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+       /* enable MPU for all memory regions */
+       writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+       return 0;
+}
+
+static int mt8195_scp_before_load(struct mtk_scp *scp)
+{
+       /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+       writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+       writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+       /* enable SRAM clock */
+       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+       scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+                         MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+       scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
 
        /* enable MPU for all memory regions */
        writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
@@ -572,11 +593,25 @@ static void mt8183_scp_stop(struct mtk_scp *scp)
 static void mt8192_scp_stop(struct mtk_scp *scp)
 {
        /* Disable SRAM clock */
-       mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
-       mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
-       mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
-       mt8192_power_off_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
-       mt8192_power_off_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
+       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+       scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+       scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+       /* Disable SCP watchdog */
+       writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_stop(struct mtk_scp *scp)
+{
+       /* Disable SRAM clock */
+       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+       scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+                          MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+       scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
 
        /* Disable SCP watchdog */
        writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
@@ -774,9 +809,13 @@ static int scp_probe(struct platform_device *pdev)
        struct mtk_scp *scp;
        struct rproc *rproc;
        struct resource *res;
-       char *fw_name = "scp.img";
+       const char *fw_name = "scp.img";
        int ret, i;
 
+       ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+       if (ret < 0 && ret != -EINVAL)
+               return ret;
+
        rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
        if (!rproc)
                return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
@@ -877,7 +916,6 @@ static int scp_remove(struct platform_device *pdev)
        for (i = 0; i < SCP_IPI_MAX; i++)
                mutex_destroy(&scp->ipi_desc[i].lock);
        mutex_destroy(&scp->send_lock);
-       rproc_free(scp->rproc);
 
        return 0;
 }
@@ -922,11 +960,11 @@ static const struct mtk_scp_of_data mt8192_of_data = {
 
 static const struct mtk_scp_of_data mt8195_of_data = {
        .scp_clk_get = mt8195_scp_clk_get,
-       .scp_before_load = mt8192_scp_before_load,
+       .scp_before_load = mt8195_scp_before_load,
        .scp_irq_handler = mt8192_scp_irq_handler,
        .scp_reset_assert = mt8192_scp_reset_assert,
        .scp_reset_deassert = mt8192_scp_reset_deassert,
-       .scp_stop = mt8192_scp_stop,
+       .scp_stop = mt8195_scp_stop,
        .scp_da_to_va = mt8192_scp_da_to_va,
        .host_to_scp_reg = MT8192_GIPC_IN_SET,
        .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
index 1ae47cc..6ae39c5 100644 (file)
@@ -704,6 +704,36 @@ static const struct adsp_data sm8250_cdsp_resource = {
        .ssctl_id = 0x17,
 };
 
+static const struct adsp_data sc8280xp_nsp0_resource = {
+       .crash_reason_smem = 601,
+       .firmware_name = "cdsp.mdt",
+       .pas_id = 18,
+       .has_aggre2_clk = false,
+       .auto_boot = true,
+       .proxy_pd_names = (char*[]){
+               "nsp",
+               NULL
+       },
+       .ssr_name = "cdsp0",
+       .sysmon_name = "cdsp",
+       .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sc8280xp_nsp1_resource = {
+       .crash_reason_smem = 633,
+       .firmware_name = "cdsp.mdt",
+       .pas_id = 30,
+       .has_aggre2_clk = false,
+       .auto_boot = true,
+       .proxy_pd_names = (char*[]){
+               "nsp",
+               NULL
+       },
+       .ssr_name = "cdsp1",
+       .sysmon_name = "cdsp1",
+       .ssctl_id = 0x20,
+};
+
 static const struct adsp_data sm8350_cdsp_resource = {
        .crash_reason_smem = 601,
        .firmware_name = "cdsp.mdt",
@@ -848,6 +878,7 @@ static const struct adsp_data sdx55_mpss_resource = {
 };
 
 static const struct of_device_id adsp_of_match[] = {
+       { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
        { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
        { .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
        { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
@@ -861,6 +892,9 @@ static const struct of_device_id adsp_of_match[] = {
        { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
        { .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
        { .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
+       { .compatible = "qcom,sc8280xp-adsp-pas", .data = &sm8250_adsp_resource},
+       { .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
+       { .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
        { .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
        { .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
        { .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
index 906ff3c..687f205 100644 (file)
@@ -32,21 +32,10 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_
                return -EFAULT;
 
        if (!strncmp(cmd, "start", len)) {
-               if (rproc->state == RPROC_RUNNING ||
-                   rproc->state == RPROC_ATTACHED)
-                       return -EBUSY;
-
                ret = rproc_boot(rproc);
        } else if (!strncmp(cmd, "stop", len)) {
-               if (rproc->state != RPROC_RUNNING &&
-                   rproc->state != RPROC_ATTACHED)
-                       return -EINVAL;
-
                ret = rproc_shutdown(rproc);
        } else if (!strncmp(cmd, "detach", len)) {
-               if (rproc->state != RPROC_ATTACHED)
-                       return -EINVAL;
-
                ret = rproc_detach(rproc);
        } else {
                dev_err(&rproc->dev, "Unrecognized option\n");
index c510125..02a04ab 100644 (file)
@@ -684,10 +684,6 @@ static int rproc_handle_trace(struct rproc *rproc, void *ptr,
 
        /* create the debugfs entry */
        trace->tfile = rproc_create_trace_file(name, rproc, trace);
-       if (!trace->tfile) {
-               kfree(trace);
-               return -EINVAL;
-       }
 
        list_add_tail(&trace->node, &rproc->traces);
 
@@ -2075,6 +2071,12 @@ int rproc_shutdown(struct rproc *rproc)
                return ret;
        }
 
+       if (rproc->state != RPROC_RUNNING &&
+           rproc->state != RPROC_ATTACHED) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        /* if the remote proc is still needed, bail out */
        if (!atomic_dec_and_test(&rproc->power))
                goto out;
@@ -2134,6 +2136,11 @@ int rproc_detach(struct rproc *rproc)
                return ret;
        }
 
+       if (rproc->state != RPROC_ATTACHED) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        /* if the remote proc is still needed, bail out */
        if (!atomic_dec_and_test(&rproc->power)) {
                ret = 0;
index 5819304..b86c1d0 100644 (file)
@@ -386,16 +386,8 @@ void rproc_remove_trace_file(struct dentry *tfile)
 struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
                                       struct rproc_debug_trace *trace)
 {
-       struct dentry *tfile;
-
-       tfile = debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
+       return debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
                                    &trace_rproc_ops);
-       if (!tfile) {
-               dev_err(&rproc->dev, "failed to create debugfs trace entry\n");
-               return NULL;
-       }
-
-       return tfile;
 }
 
 void rproc_delete_debug_dir(struct rproc *rproc)
@@ -411,8 +403,6 @@ void rproc_create_debug_dir(struct rproc *rproc)
                return;
 
        rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
-       if (!rproc->dbg_dir)
-               return;
 
        debugfs_create_file("name", 0400, rproc->dbg_dir,
                            rproc, &rproc_name_ops);
@@ -430,11 +420,8 @@ void rproc_create_debug_dir(struct rproc *rproc)
 
 void __init rproc_init_debugfs(void)
 {
-       if (debugfs_initialized()) {
+       if (debugfs_initialized())
                rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-               if (!rproc_dbg)
-                       pr_err("can't create debugfs dir\n");
-       }
 }
 
 void __exit rproc_exit_debugfs(void)
index d635d19..5a412d7 100644 (file)
@@ -181,7 +181,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
                bool is_iomem = false;
                void *ptr;
 
-               if (type != PT_LOAD)
+               if (type != PT_LOAD || !memsz)
                        continue;
 
                dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
index 51a04bc..8c7ea89 100644 (file)
@@ -194,23 +194,12 @@ static ssize_t state_store(struct device *dev,
        int ret = 0;
 
        if (sysfs_streq(buf, "start")) {
-               if (rproc->state == RPROC_RUNNING ||
-                   rproc->state == RPROC_ATTACHED)
-                       return -EBUSY;
-
                ret = rproc_boot(rproc);
                if (ret)
                        dev_err(&rproc->dev, "Boot failed: %d\n", ret);
        } else if (sysfs_streq(buf, "stop")) {
-               if (rproc->state != RPROC_RUNNING &&
-                   rproc->state != RPROC_ATTACHED)
-                       return -EINVAL;
-
                ret = rproc_shutdown(rproc);
        } else if (sysfs_streq(buf, "detach")) {
-               if (rproc->state != RPROC_ATTACHED)
-                       return -EINVAL;
-
                ret = rproc_detach(rproc);
        } else {
                dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
index 764c980..1957b27 100644 (file)
@@ -1407,9 +1407,9 @@ static int qcom_smd_parse_edge(struct device *dev,
                edge->name = node->name;
 
        irq = irq_of_parse_and_map(node, 0);
-       if (irq < 0) {
+       if (!irq) {
                dev_err(dev, "required smd interrupt missing\n");
-               ret = irq;
+               ret = -EINVAL;
                goto put_node;
        }
 
index 79368a9..290c1f0 100644 (file)
@@ -400,7 +400,8 @@ field##_store(struct device *dev, struct device_attribute *attr,    \
              const char *buf, size_t sz)                               \
 {                                                                      \
        struct rpmsg_device *rpdev = to_rpmsg_device(dev);              \
-       char *new, *old;                                                \
+       const char *old;                                                \
+       char *new;                                                      \
                                                                        \
        new = kstrndup(buf, sz, GFP_KERNEL);                            \
        if (!new)                                                       \
@@ -592,24 +593,51 @@ static struct bus_type rpmsg_bus = {
        .remove         = rpmsg_dev_remove,
 };
 
-int rpmsg_register_device(struct rpmsg_device *rpdev)
+/*
+ * A helper for registering rpmsg device with driver override and name.
+ * Drivers should not be using it, but instead rpmsg_register_device().
+ */
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+                                  const char *driver_override)
 {
        struct device *dev = &rpdev->dev;
        int ret;
 
-       dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+       if (driver_override)
+               strcpy(rpdev->id.name, driver_override);
+
+       dev_set_name(dev, "%s.%s.%d.%d", dev_name(dev->parent),
                     rpdev->id.name, rpdev->src, rpdev->dst);
 
-       rpdev->dev.bus = &rpmsg_bus;
+       dev->bus = &rpmsg_bus;
 
-       ret = device_register(&rpdev->dev);
+       device_initialize(dev);
+       if (driver_override) {
+               ret = driver_set_override(dev, &rpdev->driver_override,
+                                         driver_override,
+                                         strlen(driver_override));
+               if (ret) {
+                       dev_err(dev, "device_set_override failed: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       ret = device_add(dev);
        if (ret) {
-               dev_err(dev, "device_register failed: %d\n", ret);
-               put_device(&rpdev->dev);
+               dev_err(dev, "device_add failed: %d\n", ret);
+               kfree(rpdev->driver_override);
+               rpdev->driver_override = NULL;
+               put_device(dev);
        }
 
        return ret;
 }
+EXPORT_SYMBOL(rpmsg_register_device_override);
+
+int rpmsg_register_device(struct rpmsg_device *rpdev)
+{
+       return rpmsg_register_device_override(rpdev, NULL);
+}
 EXPORT_SYMBOL(rpmsg_register_device);
 
 /*
index d4b23fd..a22cd4a 100644 (file)
@@ -94,10 +94,7 @@ int rpmsg_release_channel(struct rpmsg_device *rpdev,
  */
 static inline int rpmsg_ctrldev_register_device(struct rpmsg_device *rpdev)
 {
-       strcpy(rpdev->id.name, "rpmsg_ctrl");
-       rpdev->driver_override = "rpmsg_ctrl";
-
-       return rpmsg_register_device(rpdev);
+       return rpmsg_register_device_override(rpdev, "rpmsg_ctrl");
 }
 
 #endif
index 762ff1a..c70ad03 100644 (file)
  */
 int rpmsg_ns_register_device(struct rpmsg_device *rpdev)
 {
-       strcpy(rpdev->id.name, "rpmsg_ns");
-       rpdev->driver_override = "rpmsg_ns";
        rpdev->src = RPMSG_NS_ADDR;
        rpdev->dst = RPMSG_NS_ADDR;
 
-       return rpmsg_register_device(rpdev);
+       return rpmsg_register_device_override(rpdev, "rpmsg_ns");
 }
 EXPORT_SYMBOL(rpmsg_ns_register_device);
 
index 3ede25b..905ac79 100644 (file)
@@ -851,7 +851,7 @@ static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev
 
        err = rpmsg_ctrldev_register_device(rpdev_ctrl);
        if (err) {
-               kfree(vch);
+               /* vch will be free in virtio_rpmsg_release_device() */
                return ERR_PTR(err);
        }
 
@@ -862,7 +862,7 @@ static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl)
 {
        if (!rpdev_ctrl)
                return;
-       kfree(to_virtio_rpmsg_channel(rpdev_ctrl));
+       device_unregister(&rpdev_ctrl->dev);
 }
 
 static int rpmsg_probe(struct virtio_device *vdev)
@@ -973,7 +973,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
 
                err = rpmsg_ns_register_device(rpdev_ns);
                if (err)
-                       goto free_vch;
+                       /* vch will be free in virtio_rpmsg_release_device() */
+                       goto free_ctrldev;
        }
 
        /*
@@ -997,8 +998,6 @@ static int rpmsg_probe(struct virtio_device *vdev)
 
        return 0;
 
-free_vch:
-       kfree(vch);
 free_ctrldev:
        rpmsg_virtio_del_ctrl_dev(rpdev_ctrl);
 free_coherent:
index 41c65b4..a00f901 100644 (file)
@@ -1548,6 +1548,13 @@ config RTC_DRV_RS5C313
        help
          If you say yes here you get support for the Ricoh RS5C313 RTC chips.
 
+config RTC_DRV_RZN1
+       tristate "Renesas RZ/N1 RTC"
+       depends on ARCH_RZN1 || COMPILE_TEST
+       depends on OF && HAS_IOMEM
+       help
+         If you say yes here you get support for the Renesas RZ/N1 RTC.
+
 config RTC_DRV_GENERIC
        tristate "Generic RTC support"
        # Please consider writing a new RTC driver instead of using the generic
index 2d827d8..fb04467 100644 (file)
@@ -151,6 +151,7 @@ obj-$(CONFIG_RTC_DRV_RX6110)        += rtc-rx6110.o
 obj-$(CONFIG_RTC_DRV_RX8010)   += rtc-rx8010.o
 obj-$(CONFIG_RTC_DRV_RX8025)   += rtc-rx8025.o
 obj-$(CONFIG_RTC_DRV_RX8581)   += rtc-rx8581.o
+obj-$(CONFIG_RTC_DRV_RZN1)     += rtc-rzn1.o
 obj-$(CONFIG_RTC_DRV_S35390A)  += rtc-s35390a.o
 obj-$(CONFIG_RTC_DRV_S3C)      += rtc-s3c.o
 obj-$(CONFIG_RTC_DRV_S5M)      += rtc-s5m.o
index 53bb08f..25c6e7d 100644 (file)
@@ -137,26 +137,34 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
                ret = clk_prepare_enable(rtc->extclk);
                if (ret) {
                        dev_err(dev, "failed to enable EXTCLK\n");
-                       return ret;
+                       goto err_disable_pclk;
                }
        }
 
        rtc->rtc_irq = platform_get_irq(pdev, 0);
-       if (rtc->rtc_irq < 0)
-               return rtc->rtc_irq;
+       if (rtc->rtc_irq < 0) {
+               ret = rtc->rtc_irq;
+               goto err_disable_extclk;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
+       if (!res) {
+               ret = -ENODEV;
+               goto err_disable_extclk;
+       }
 
        rtc->rtc_base = devm_ioremap(dev, res->start,
                                     resource_size(res));
-       if (!rtc->rtc_base)
-               return -ENOMEM;
+       if (!rtc->rtc_base) {
+               ret = -ENOMEM;
+               goto err_disable_extclk;
+       }
 
        rtc->rtc_dev = devm_rtc_allocate_device(dev);
-       if (IS_ERR(rtc->rtc_dev))
-               return PTR_ERR(rtc->rtc_dev);
+       if (IS_ERR(rtc->rtc_dev)) {
+               ret = PTR_ERR(rtc->rtc_dev);
+               goto err_disable_extclk;
+       }
 
        rtc->rtc_dev->ops = &ftrtc010_rtc_ops;
 
@@ -172,9 +180,15 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
        ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
                               IRQF_SHARED, pdev->name, dev);
        if (unlikely(ret))
-               return ret;
+               goto err_disable_extclk;
 
        return devm_rtc_register_device(rtc->rtc_dev);
+
+err_disable_extclk:
+       clk_disable_unprepare(rtc->extclk);
+err_disable_pclk:
+       clk_disable_unprepare(rtc->pclk);
+       return ret;
 }
 
 static int ftrtc010_rtc_remove(struct platform_device *pdev)
index 18ca3b3..c2717bb 100644 (file)
@@ -267,6 +267,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
        ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
        if (ret) {
                pr_err("failed to get the RTC bias\n");
+               iounmap(hw_srnprot);
                return -1;
        }
 
index 44bdc8b..db1d626 100644 (file)
@@ -399,7 +399,7 @@ static struct platform_driver meson_rtc_driver = {
 module_platform_driver(meson_rtc_driver);
 
 MODULE_DESCRIPTION("Amlogic Meson RTC Driver");
-MODULE_AUTHOR("Ben Dooks <ben.doosk@codethink.co.uk>");
+MODULE_AUTHOR("Ben Dooks <ben.dooks@codethink.co.uk>");
 MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:meson-rtc");
index 80dc479..1d297af 100644 (file)
@@ -269,6 +269,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -EINVAL;
        rtc->addr_base = res->start;
 
        rtc->data = of_device_get_match_data(&pdev->dev);
index 0f08f22..53d4e25 100644 (file)
@@ -311,7 +311,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
        if (!pdata)
                return -ENOMEM;
 
-       pdata->devtype = (enum imx_rtc_type)of_device_get_match_data(&pdev->dev);
+       pdata->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
 
        pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(pdata->ioaddr))
index 9760824..0958919 100644 (file)
@@ -650,6 +650,7 @@ static int pcf85063_probe(struct i2c_client *client)
 }
 
 static const struct i2c_device_id pcf85063_ids[] = {
+       { "pca85073a", PCF85063A },
        { "pcf85063", PCF85063 },
        { "pcf85063tp", PCF85063TP },
        { "pcf85063a", PCF85063A },
@@ -660,6 +661,7 @@ MODULE_DEVICE_TABLE(i2c, pcf85063_ids);
 
 #ifdef CONFIG_OF
 static const struct of_device_id pcf85063_of_match[] = {
+       { .compatible = "nxp,pca85073a", .data = &pcf85063_cfg[PCF85063A] },
        { .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] },
        { .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] },
        { .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] },
index cf8119b..eeacf48 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 
-#include <mach/hardware.h>
-
 #include "rtc-sa1100.h"
 
 #define RTC_DEF_DIVIDER                (32768 - 1)
index 5bfdd34..b32117c 100644 (file)
@@ -436,7 +436,6 @@ static int rx8025_set_offset(struct device *dev, long offset)
 {
        struct i2c_client *client = to_i2c_client(dev);
        u8 digoff;
-       int err;
 
        offset /= RX8025_ADJ_RESOLUTION;
        if (offset > RX8025_ADJ_DATA_MAX)
@@ -449,11 +448,7 @@ static int rx8025_set_offset(struct device *dev, long offset)
                offset += 128;
        digoff = offset;
 
-       err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
-       if (err)
-               return err;
-
-       return 0;
+       return rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
 }
 
 static const struct rtc_class_ops rx8025_rtc_ops = {
diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
new file mode 100644 (file)
index 0000000..ac78879
--- /dev/null
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Renesas RZ/N1 Real Time Clock interface for Linux
+ *
+ * Copyright:
+ * - 2014 Renesas Electronics Europe Limited
+ * - 2022 Schneider Electric
+ *
+ * Authors:
+ * - Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ * - Miquel Raynal <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rtc.h>
+
+#define RZN1_RTC_CTL0 0x00
+#define   RZN1_RTC_CTL0_SLSB_SUBU 0
+#define   RZN1_RTC_CTL0_SLSB_SCMP BIT(4)
+#define   RZN1_RTC_CTL0_AMPM BIT(5)
+#define   RZN1_RTC_CTL0_CE BIT(7)
+
+#define RZN1_RTC_CTL1 0x04
+#define   RZN1_RTC_CTL1_ALME BIT(4)
+
+#define RZN1_RTC_CTL2 0x08
+#define   RZN1_RTC_CTL2_WAIT BIT(0)
+#define   RZN1_RTC_CTL2_WST BIT(1)
+#define   RZN1_RTC_CTL2_WUST BIT(5)
+#define   RZN1_RTC_CTL2_STOPPED (RZN1_RTC_CTL2_WAIT | RZN1_RTC_CTL2_WST)
+
+#define RZN1_RTC_SEC 0x14
+#define RZN1_RTC_MIN 0x18
+#define RZN1_RTC_HOUR 0x1c
+#define RZN1_RTC_WEEK 0x20
+#define RZN1_RTC_DAY 0x24
+#define RZN1_RTC_MONTH 0x28
+#define RZN1_RTC_YEAR 0x2c
+
+#define RZN1_RTC_SUBU 0x38
+#define   RZN1_RTC_SUBU_DEV BIT(7)
+#define   RZN1_RTC_SUBU_DECR BIT(6)
+
+#define RZN1_RTC_ALM 0x40
+#define RZN1_RTC_ALH 0x44
+#define RZN1_RTC_ALW 0x48
+
+#define RZN1_RTC_SECC 0x4c
+#define RZN1_RTC_MINC 0x50
+#define RZN1_RTC_HOURC 0x54
+#define RZN1_RTC_WEEKC 0x58
+#define RZN1_RTC_DAYC 0x5c
+#define RZN1_RTC_MONTHC 0x60
+#define RZN1_RTC_YEARC 0x64
+
+struct rzn1_rtc {
+       struct rtc_device *rtcdev;
+       void __iomem *base;
+};
+
+static void rzn1_rtc_get_time_snapshot(struct rzn1_rtc *rtc, struct rtc_time *tm)
+{
+       tm->tm_sec = readl(rtc->base + RZN1_RTC_SECC);
+       tm->tm_min = readl(rtc->base + RZN1_RTC_MINC);
+       tm->tm_hour = readl(rtc->base + RZN1_RTC_HOURC);
+       tm->tm_wday = readl(rtc->base + RZN1_RTC_WEEKC);
+       tm->tm_mday = readl(rtc->base + RZN1_RTC_DAYC);
+       tm->tm_mon = readl(rtc->base + RZN1_RTC_MONTHC);
+       tm->tm_year = readl(rtc->base + RZN1_RTC_YEARC);
+}
+
+static unsigned int rzn1_rtc_tm_to_wday(struct rtc_time *tm)
+{
+       time64_t time;
+       unsigned int days;
+       u32 secs;
+
+       time = rtc_tm_to_time64(tm);
+       days = div_s64_rem(time, 86400, &secs);
+
+       /* day of the week, 1970-01-01 was a Thursday */
+       return (days + 4) % 7;
+}
+
+static int rzn1_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+       u32 val, secs;
+
+       /*
+        * The RTC was not started or is stopped and thus does not carry the
+        * proper time/date.
+        */
+       val = readl(rtc->base + RZN1_RTC_CTL2);
+       if (val & RZN1_RTC_CTL2_STOPPED)
+               return -EINVAL;
+
+       rzn1_rtc_get_time_snapshot(rtc, tm);
+       secs = readl(rtc->base + RZN1_RTC_SECC);
+       if (tm->tm_sec != secs)
+               rzn1_rtc_get_time_snapshot(rtc, tm);
+
+       tm->tm_sec = bcd2bin(tm->tm_sec);
+       tm->tm_min = bcd2bin(tm->tm_min);
+       tm->tm_hour = bcd2bin(tm->tm_hour);
+       tm->tm_wday = bcd2bin(tm->tm_wday);
+       tm->tm_mday = bcd2bin(tm->tm_mday);
+       tm->tm_mon = bcd2bin(tm->tm_mon);
+       tm->tm_year = bcd2bin(tm->tm_year);
+
+       return 0;
+}
+
+static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+       u32 val;
+       int ret;
+
+       tm->tm_sec = bin2bcd(tm->tm_sec);
+       tm->tm_min = bin2bcd(tm->tm_min);
+       tm->tm_hour = bin2bcd(tm->tm_hour);
+       tm->tm_wday = bin2bcd(rzn1_rtc_tm_to_wday(tm));
+       tm->tm_mday = bin2bcd(tm->tm_mday);
+       tm->tm_mon = bin2bcd(tm->tm_mon);
+       tm->tm_year = bin2bcd(tm->tm_year);
+
+       val = readl(rtc->base + RZN1_RTC_CTL2);
+       if (!(val & RZN1_RTC_CTL2_STOPPED)) {
+               /* Hold the counter if it was counting up */
+               writel(RZN1_RTC_CTL2_WAIT, rtc->base + RZN1_RTC_CTL2);
+
+               /* Wait for the counter to stop: two 32k clock cycles */
+               usleep_range(61, 100);
+               ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, val,
+                                        val & RZN1_RTC_CTL2_WST, 0, 100);
+               if (ret)
+                       return ret;
+       }
+
+       writel(tm->tm_sec, rtc->base + RZN1_RTC_SEC);
+       writel(tm->tm_min, rtc->base + RZN1_RTC_MIN);
+       writel(tm->tm_hour, rtc->base + RZN1_RTC_HOUR);
+       writel(tm->tm_wday, rtc->base + RZN1_RTC_WEEK);
+       writel(tm->tm_mday, rtc->base + RZN1_RTC_DAY);
+       writel(tm->tm_mon, rtc->base + RZN1_RTC_MONTH);
+       writel(tm->tm_year, rtc->base + RZN1_RTC_YEAR);
+       writel(0, rtc->base + RZN1_RTC_CTL2);
+
+       return 0;
+}
+
+static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id)
+{
+       struct rzn1_rtc *rtc = dev_id;
+
+       rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+
+       return IRQ_HANDLED;
+}
+
+static int rzn1_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+       struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+       u32 ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+
+       if (enable)
+               ctl1 |= RZN1_RTC_CTL1_ALME;
+       else
+               ctl1 &= ~RZN1_RTC_CTL1_ALME;
+
+       writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+
+       return 0;
+}
+
+static int rzn1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+       struct rtc_time *tm = &alrm->time;
+       unsigned int min, hour, wday, delta_days;
+       time64_t alarm;
+       u32 ctl1;
+       int ret;
+
+       ret = rzn1_rtc_read_time(dev, tm);
+       if (ret)
+               return ret;
+
+       min = readl(rtc->base + RZN1_RTC_ALM);
+       hour = readl(rtc->base + RZN1_RTC_ALH);
+       wday = readl(rtc->base + RZN1_RTC_ALW);
+
+       tm->tm_sec = 0;
+       tm->tm_min = bcd2bin(min);
+       tm->tm_hour = bcd2bin(hour);
+       delta_days = ((fls(wday) - 1) - tm->tm_wday + 7) % 7;
+       tm->tm_wday = fls(wday) - 1;
+
+       if (delta_days) {
+               alarm = rtc_tm_to_time64(tm) + (delta_days * 86400);
+               rtc_time64_to_tm(alarm, tm);
+       }
+
+       ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+       alrm->enabled = !!(ctl1 & RZN1_RTC_CTL1_ALME);
+
+       return 0;
+}
+
+static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+       struct rtc_time *tm = &alrm->time, tm_now;
+       unsigned long alarm, farest;
+       unsigned int days_ahead, wday;
+       int ret;
+
+       ret = rzn1_rtc_read_time(dev, &tm_now);
+       if (ret)
+               return ret;
+
+       /* We cannot set alarms more than one week ahead */
+       farest = rtc_tm_to_time64(&tm_now) + (7 * 86400);
+       alarm = rtc_tm_to_time64(tm);
+       if (time_after(alarm, farest))
+               return -ERANGE;
+
+       /* Convert alarm day into week day */
+       days_ahead = tm->tm_mday - tm_now.tm_mday;
+       wday = (tm_now.tm_wday + days_ahead) % 7;
+
+       writel(bin2bcd(tm->tm_min), rtc->base + RZN1_RTC_ALM);
+       writel(bin2bcd(tm->tm_hour), rtc->base + RZN1_RTC_ALH);
+       writel(BIT(wday), rtc->base + RZN1_RTC_ALW);
+
+       rzn1_rtc_alarm_irq_enable(dev, alrm->enabled);
+
+       return 0;
+}
+
+static int rzn1_rtc_read_offset(struct device *dev, long *offset)
+{
+       struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+       unsigned int ppb_per_step;
+       bool subtract;
+       u32 val;
+
+       val = readl(rtc->base + RZN1_RTC_SUBU);
+       ppb_per_step = val & RZN1_RTC_SUBU_DEV ? 1017 : 3051;
+       subtract = val & RZN1_RTC_SUBU_DECR;
+       val &= 0x3F;
+
+       if (!val)
+               *offset = 0;
+       else if (subtract)
+               *offset = -(((~val) & 0x3F) + 1) * ppb_per_step;
+       else
+               *offset = (val - 1) * ppb_per_step;
+
+       return 0;
+}
+
+static int rzn1_rtc_set_offset(struct device *dev, long offset)
+{
+       struct rzn1_rtc *rtc = dev_get_drvdata(dev);
+       int stepsh, stepsl, steps;
+       u32 subu = 0, ctl2;
+       int ret;
+
+       /*
+        * Check which resolution mode (every 20 or 60s) can be used.
+        * Between 2 and 124 clock pulses can be added or substracted.
+        *
+        * In 20s mode, the minimum resolution is 2 / (32768 * 20) which is
+        * close to 3051 ppb. In 60s mode, the resolution is closer to 1017.
+        */
+       stepsh = DIV_ROUND_CLOSEST(offset, 1017);
+       stepsl = DIV_ROUND_CLOSEST(offset, 3051);
+
+       if (stepsh >= -0x3E && stepsh <= 0x3E) {
+               /* 1017 ppb per step */
+               steps = stepsh;
+               subu |= RZN1_RTC_SUBU_DEV;
+       } else if (stepsl >= -0x3E && stepsl <= 0x3E) {
+               /* 3051 ppb per step */
+               steps = stepsl;
+       } else {
+               return -ERANGE;
+       }
+
+       if (!steps)
+               return 0;
+
+       if (steps > 0) {
+               subu |= steps + 1;
+       } else {
+               subu |= RZN1_RTC_SUBU_DECR;
+               subu |= (~(-steps - 1)) & 0x3F;
+       }
+
+       ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, ctl2,
+                                !(ctl2 & RZN1_RTC_CTL2_WUST), 100, 2000000);
+       if (ret)
+               return ret;
+
+       writel(subu, rtc->base + RZN1_RTC_SUBU);
+
+       return 0;
+}
+
+static const struct rtc_class_ops rzn1_rtc_ops = {
+       .read_time = rzn1_rtc_read_time,
+       .set_time = rzn1_rtc_set_time,
+       .read_alarm = rzn1_rtc_read_alarm,
+       .set_alarm = rzn1_rtc_set_alarm,
+       .alarm_irq_enable = rzn1_rtc_alarm_irq_enable,
+       .read_offset = rzn1_rtc_read_offset,
+       .set_offset = rzn1_rtc_set_offset,
+};
+
+static int rzn1_rtc_probe(struct platform_device *pdev)
+{
+       struct rzn1_rtc *rtc;
+       int alarm_irq;
+       int ret;
+
+       rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+       if (!rtc)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, rtc);
+
+       rtc->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(rtc->base))
+               return dev_err_probe(&pdev->dev, PTR_ERR(rtc->base), "Missing reg\n");
+
+       alarm_irq = platform_get_irq(pdev, 0);
+       if (alarm_irq < 0)
+               return alarm_irq;
+
+       rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
+       if (IS_ERR(rtc->rtcdev))
+               return PTR_ERR(rtc->rtcdev);
+
+       rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+       rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+       rtc->rtcdev->ops = &rzn1_rtc_ops;
+       set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
+       clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
+
+       devm_pm_runtime_enable(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Ensure the clock counter is enabled.
+        * Set 24-hour mode and possible oscillator offset compensation in SUBU mode.
+        */
+       writel(RZN1_RTC_CTL0_CE | RZN1_RTC_CTL0_AMPM | RZN1_RTC_CTL0_SLSB_SUBU,
+              rtc->base + RZN1_RTC_CTL0);
+
+       /* Disable all interrupts */
+       writel(0, rtc->base + RZN1_RTC_CTL1);
+
+       ret = devm_request_irq(&pdev->dev, alarm_irq, rzn1_rtc_alarm_irq, 0,
+                              dev_name(&pdev->dev), rtc);
+       if (ret) {
+               dev_err(&pdev->dev, "RTC timer interrupt not available\n");
+               goto dis_runtime_pm;
+       }
+
+       ret = devm_rtc_register_device(rtc->rtcdev);
+       if (ret)
+               goto dis_runtime_pm;
+
+       return 0;
+
+dis_runtime_pm:
+       pm_runtime_put(&pdev->dev);
+
+       return ret;
+}
+
+static int rzn1_rtc_remove(struct platform_device *pdev)
+{
+       pm_runtime_put(&pdev->dev);
+
+       return 0;
+}
+
+static const struct of_device_id rzn1_rtc_of_match[] = {
+       { .compatible   = "renesas,rzn1-rtc" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rzn1_rtc_of_match);
+
+static struct platform_driver rzn1_rtc_driver = {
+       .probe = rzn1_rtc_probe,
+       .remove = rzn1_rtc_remove,
+       .driver = {
+               .name   = "rzn1-rtc",
+               .of_match_table = rzn1_rtc_of_match,
+       },
+};
+module_platform_driver(rzn1_rtc_driver);
+
+MODULE_AUTHOR("Michel Pollet <Michel.Pollet@bp.renesas.com");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com");
+MODULE_DESCRIPTION("RZ/N1 RTC driver");
+MODULE_LICENSE("GPL");
index 5252ce4..5754072 100644 (file)
 #define SUN6I_LOSC_OUT_GATING                  0x0060
 #define SUN6I_LOSC_OUT_GATING_EN_OFFSET                0
 
+/* General-purpose data */
+#define SUN6I_GP_DATA                          0x0100
+#define SUN6I_GP_DATA_SIZE                     0x20
+
 /*
  * Get date values
  */
@@ -679,6 +683,39 @@ static const struct rtc_class_ops sun6i_rtc_ops = {
        .alarm_irq_enable       = sun6i_rtc_alarm_irq_enable
 };
 
+static int sun6i_rtc_nvmem_read(void *priv, unsigned int offset, void *_val, size_t bytes)
+{
+       struct sun6i_rtc_dev *chip = priv;
+       u32 *val = _val;
+       int i;
+
+       for (i = 0; i < bytes / 4; ++i)
+               val[i] = readl(chip->base + SUN6I_GP_DATA + offset + 4 * i);
+
+       return 0;
+}
+
+static int sun6i_rtc_nvmem_write(void *priv, unsigned int offset, void *_val, size_t bytes)
+{
+       struct sun6i_rtc_dev *chip = priv;
+       u32 *val = _val;
+       int i;
+
+       for (i = 0; i < bytes / 4; ++i)
+               writel(val[i], chip->base + SUN6I_GP_DATA + offset + 4 * i);
+
+       return 0;
+}
+
+static struct nvmem_config sun6i_rtc_nvmem_cfg = {
+       .type           = NVMEM_TYPE_BATTERY_BACKED,
+       .reg_read       = sun6i_rtc_nvmem_read,
+       .reg_write      = sun6i_rtc_nvmem_write,
+       .size           = SUN6I_GP_DATA_SIZE,
+       .word_size      = 4,
+       .stride         = 4,
+};
+
 #ifdef CONFIG_PM_SLEEP
 /* Enable IRQ wake on suspend, to wake up from RTC. */
 static int sun6i_rtc_suspend(struct device *dev)
@@ -812,6 +849,11 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       sun6i_rtc_nvmem_cfg.priv = chip;
+       ret = devm_rtc_nvmem_register(chip->rtc, &sun6i_rtc_nvmem_cfg);
+       if (ret)
+               return ret;
+
        dev_info(&pdev->dev, "RTC enabled\n");
 
        return 0;
index 1cb9daf..fa8df50 100644 (file)
@@ -103,7 +103,11 @@ struct subchannel {
        struct work_struct todo_work;
        struct schib_config config;
        u64 dma_mask;
-       char *driver_override; /* Driver name to force a match */
+       /*
+        * Driver name to force a match.  Do not set directly, because core
+        * frees it.  Use driver_set_override() to set or clear it.
+        */
+       const char *driver_override;
 } __attribute__ ((aligned(8)));
 
 DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
index fa82933..913b6dd 100644 (file)
@@ -338,31 +338,11 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct subchannel *sch = to_subchannel(dev);
-       char *driver_override, *old, *cp;
-
-       /* We need to keep extra room for a newline */
-       if (count >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, count, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
-
-       cp = strchr(driver_override, '\n');
-       if (cp)
-               *cp = '\0';
-
-       device_lock(dev);
-       old = sch->driver_override;
-       if (strlen(driver_override)) {
-               sch->driver_override = driver_override;
-       } else {
-               kfree(driver_override);
-               sch->driver_override = NULL;
-       }
-       device_unlock(dev);
+       int ret;
 
-       kfree(old);
+       ret = driver_set_override(dev, &sch->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
index 8d1b277..0c2be94 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/idals.h>
 
 #include "vfio_ccw_cp.h"
+#include "vfio_ccw_private.h"
 
 struct pfn_array {
        /* Starting guest physical I/O address. */
@@ -98,17 +99,17 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
  * If the pin request partially succeeds, or fails completely,
  * all pages are left unpinned and a negative error value is returned.
  */
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
+static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
 {
        int ret = 0;
 
-       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+       ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
                             IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
 
        if (ret < 0) {
                goto err_out;
        } else if (ret > 0 && ret != pa->pa_nr) {
-               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
+               vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
                ret = -EINVAL;
                goto err_out;
        }
@@ -122,11 +123,11 @@ err_out:
 }
 
 /* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
 {
        /* Only unpin if any pages were pinned to begin with */
        if (pa->pa_nr)
-               vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+               vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
        pa->pa_nr = 0;
        kfree(pa->pa_iova_pfn);
 }
@@ -190,8 +191,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
  * Within the domain (@mdev), copy @n bytes from a guest physical
  * address (@iova) to a host physical address (@to).
  */
-static long copy_from_iova(struct device *mdev,
-                          void *to, u64 iova,
+static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
                           unsigned long n)
 {
        struct pfn_array pa = {0};
@@ -203,9 +203,9 @@ static long copy_from_iova(struct device *mdev,
        if (ret < 0)
                return ret;
 
-       ret = pfn_array_pin(&pa, mdev);
+       ret = pfn_array_pin(&pa, vdev);
        if (ret < 0) {
-               pfn_array_unpin_free(&pa, mdev);
+               pfn_array_unpin_free(&pa, vdev);
                return ret;
        }
 
@@ -226,7 +226,7 @@ static long copy_from_iova(struct device *mdev,
                        break;
        }
 
-       pfn_array_unpin_free(&pa, mdev);
+       pfn_array_unpin_free(&pa, vdev);
 
        return l;
 }
@@ -423,11 +423,13 @@ static int ccwchain_loop_tic(struct ccwchain *chain,
 
 static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
 {
+       struct vfio_device *vdev =
+               &container_of(cp, struct vfio_ccw_private, cp)->vdev;
        struct ccwchain *chain;
        int len, ret;
 
        /* Copy 2K (the most we support today) of possible CCWs */
-       len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
+       len = copy_from_iova(vdev, cp->guest_cp, cda,
                             CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
        if (len)
                return len;
@@ -508,6 +510,8 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
                                 int idx,
                                 struct channel_program *cp)
 {
+       struct vfio_device *vdev =
+               &container_of(cp, struct vfio_ccw_private, cp)->vdev;
        struct ccw1 *ccw;
        struct pfn_array *pa;
        u64 iova;
@@ -526,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        if (ccw_is_idal(ccw)) {
                /* Read first IDAW to see if it's 4K-aligned or not. */
                /* All subsequent IDAws will be 4K-aligned. */
-               ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
+               ret = copy_from_iova(vdev, &iova, ccw->cda, sizeof(iova));
                if (ret)
                        return ret;
        } else {
@@ -555,7 +559,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
 
        if (ccw_is_idal(ccw)) {
                /* Copy guest IDAL into host IDAL */
-               ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
+               ret = copy_from_iova(vdev, idaws, ccw->cda, idal_len);
                if (ret)
                        goto out_unpin;
 
@@ -574,7 +578,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        }
 
        if (ccw_does_data_transfer(ccw)) {
-               ret = pfn_array_pin(pa, cp->mdev);
+               ret = pfn_array_pin(pa, vdev);
                if (ret < 0)
                        goto out_unpin;
        } else {
@@ -590,7 +594,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        return 0;
 
 out_unpin:
-       pfn_array_unpin_free(pa, cp->mdev);
+       pfn_array_unpin_free(pa, vdev);
 out_free_idaws:
        kfree(idaws);
 out_init:
@@ -632,8 +636,10 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
  * Returns:
  *   %0 on success and a negative error value on failure.
  */
-int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
+int cp_init(struct channel_program *cp, union orb *orb)
 {
+       struct vfio_device *vdev =
+               &container_of(cp, struct vfio_ccw_private, cp)->vdev;
        /* custom ratelimit used to avoid flood during guest IPL */
        static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
        int ret;
@@ -650,11 +656,12 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
         * the problem if something does break.
         */
        if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
-               dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB");
+               dev_warn(
+                       vdev->dev,
+                       "Prefetching channel program even though prefetch not specified in ORB");
 
        INIT_LIST_HEAD(&cp->ccwchain_list);
        memcpy(&cp->orb, orb, sizeof(*orb));
-       cp->mdev = mdev;
 
        /* Build a ccwchain for the first CCW segment */
        ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
@@ -682,6 +689,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
  */
 void cp_free(struct channel_program *cp)
 {
+       struct vfio_device *vdev =
+               &container_of(cp, struct vfio_ccw_private, cp)->vdev;
        struct ccwchain *chain, *temp;
        int i;
 
@@ -691,7 +700,7 @@ void cp_free(struct channel_program *cp)
        cp->initialized = false;
        list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
                for (i = 0; i < chain->ch_len; i++) {
-                       pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
+                       pfn_array_unpin_free(chain->ch_pa + i, vdev);
                        ccwchain_cda_free(chain, i);
                }
                ccwchain_free(chain);
index ba31240..e4c4361 100644 (file)
 struct channel_program {
        struct list_head ccwchain_list;
        union orb orb;
-       struct device *mdev;
        bool initialized;
        struct ccw1 *guest_cp;
 };
 
-extern int cp_init(struct channel_program *cp, struct device *mdev,
-                  union orb *orb);
+extern int cp_init(struct channel_program *cp, union orb *orb);
 extern void cp_free(struct channel_program *cp);
 extern int cp_prefetch(struct channel_program *cp);
 extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
index e435a9c..8483a26 100644 (file)
@@ -262,8 +262,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                        errstr = "transport mode";
                        goto err_out;
                }
-               io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
-                                             orb);
+               io_region->ret_code = cp_init(&private->cp, orb);
                if (io_region->ret_code) {
                        VFIO_CCW_MSG_EVENT(2,
                                           "%pUl (%x.%x.%04x): cp_init=%d\n",
index c4d60cd..b49e2e9 100644 (file)
@@ -183,7 +183,7 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
 
        private->nb.notifier_call = vfio_ccw_mdev_notifier;
 
-       ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
+       ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
                                     &events, &private->nb);
        if (ret)
                return ret;
@@ -204,8 +204,7 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
 
 out_unregister:
        vfio_ccw_unregister_dev_regions(private);
-       vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
-                                &private->nb);
+       vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
        return ret;
 }
 
@@ -223,7 +222,7 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
 
        cp_free(&private->cp);
        vfio_ccw_unregister_dev_regions(private);
-       vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &private->nb);
+       vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
 }
 
 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
index ee0a3bf..a7d2a95 100644 (file)
@@ -124,8 +124,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
                q->saved_isc = VFIO_AP_ISC_INVALID;
        }
        if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
-               vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
-                                &q->saved_pfn, 1);
+               vfio_unpin_pages(&q->matrix_mdev->vdev, &q->saved_pfn, 1);
                q->saved_pfn = 0;
        }
 }
@@ -258,7 +257,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
                return status;
        }
 
-       ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
+       ret = vfio_pin_pages(&q->matrix_mdev->vdev, &g_pfn, 1,
                             IOMMU_READ | IOMMU_WRITE, &h_pfn);
        switch (ret) {
        case 1:
@@ -301,7 +300,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
                break;
        case AP_RESPONSE_OTHERWISE_CHANGED:
                /* We could not modify IRQ setings: clear new configuration */
-               vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
+               vfio_unpin_pages(&q->matrix_mdev->vdev, &g_pfn, 1);
                kvm_s390_gisc_unregister(kvm, isc);
                break;
        default:
@@ -1250,7 +1249,7 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
                struct vfio_iommu_type1_dma_unmap *unmap = data;
                unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
 
-               vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
+               vfio_unpin_pages(&matrix_mdev->vdev, &g_pfn, 1);
                return NOTIFY_OK;
        }
 
@@ -1285,25 +1284,6 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
        }
 }
 
-static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
-                                      unsigned long action, void *data)
-{
-       int notify_rc = NOTIFY_OK;
-       struct ap_matrix_mdev *matrix_mdev;
-
-       if (action != VFIO_GROUP_NOTIFY_SET_KVM)
-               return NOTIFY_OK;
-
-       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
-
-       if (!data)
-               vfio_ap_mdev_unset_kvm(matrix_mdev);
-       else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
-               notify_rc = NOTIFY_DONE;
-
-       return notify_rc;
-}
-
 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
 {
        struct device *dev;
@@ -1403,25 +1383,23 @@ static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
        unsigned long events;
        int ret;
 
-       matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
-       events = VFIO_GROUP_NOTIFY_SET_KVM;
+       if (!vdev->kvm)
+               return -EINVAL;
 
-       ret = vfio_register_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
-                                    &events, &matrix_mdev->group_notifier);
+       ret = vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
        if (ret)
                return ret;
 
        matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
        events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
-       ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
-                                    &events, &matrix_mdev->iommu_notifier);
+       ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events,
+                                    &matrix_mdev->iommu_notifier);
        if (ret)
-               goto out_unregister_group;
+               goto err_kvm;
        return 0;
 
-out_unregister_group:
-       vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
-                                &matrix_mdev->group_notifier);
+err_kvm:
+       vfio_ap_mdev_unset_kvm(matrix_mdev);
        return ret;
 }
 
@@ -1430,10 +1408,8 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
        struct ap_matrix_mdev *matrix_mdev =
                container_of(vdev, struct ap_matrix_mdev, vdev);
 
-       vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
+       vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY,
                                 &matrix_mdev->iommu_notifier);
-       vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
-                                &matrix_mdev->group_notifier);
        vfio_ap_mdev_unset_kvm(matrix_mdev);
 }
 
index 648fcaf..a26efd8 100644 (file)
@@ -81,8 +81,6 @@ struct ap_matrix {
  * @node:      allows the ap_matrix_mdev struct to be added to a list
  * @matrix:    the adapters, usage domains and control domains assigned to the
  *             mediated matrix device.
- * @group_notifier: notifier block used for specifying callback function for
- *                 handling the VFIO_GROUP_NOTIFY_SET_KVM event
  * @iommu_notifier: notifier block used for specifying callback function for
  *                 handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even
  * @kvm:       the struct holding guest's state
@@ -94,7 +92,6 @@ struct ap_matrix_mdev {
        struct vfio_device vdev;
        struct list_head node;
        struct ap_matrix matrix;
-       struct notifier_block group_notifier;
        struct notifier_block iommu_notifier;
        struct kvm *kvm;
        crypto_hook pqap_hook;
index d35e7a3..97e51c3 100644 (file)
@@ -62,6 +62,7 @@ struct virtio_ccw_device {
        unsigned int revision; /* Transport revision */
        wait_queue_head_t wait_q;
        spinlock_t lock;
+       rwlock_t irq_lock;
        struct mutex io_lock; /* Serializes I/O requests */
        struct list_head virtqueues;
        bool is_thinint;
@@ -970,6 +971,10 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
        ccw->flags = 0;
        ccw->count = sizeof(status);
        ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
+       /* We use ssch for setting the status which is a serializing
+        * instruction that guarantees the memory writes have
+        * completed before ssch.
+        */
        ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
        /* Write failed? We assume status is unchanged. */
        if (ret)
@@ -984,6 +989,30 @@ static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
        return dev_name(&vcdev->cdev->dev);
 }
 
+static void virtio_ccw_synchronize_cbs(struct virtio_device *vdev)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       struct airq_info *info = vcdev->airq_info;
+
+       if (info) {
+               /*
+                * This device uses adapter interrupts: synchronize with
+                * vring_interrupt() called by virtio_airq_handler()
+                * via the indicator area lock.
+                */
+               write_lock_irq(&info->lock);
+               write_unlock_irq(&info->lock);
+       } else {
+               /* This device uses classic interrupts: synchronize
+                * with vring_interrupt() called by
+                * virtio_ccw_int_handler() via the per-device
+                * irq_lock
+                */
+               write_lock_irq(&vcdev->irq_lock);
+               write_unlock_irq(&vcdev->irq_lock);
+       }
+}
+
 static const struct virtio_config_ops virtio_ccw_config_ops = {
        .get_features = virtio_ccw_get_features,
        .finalize_features = virtio_ccw_finalize_features,
@@ -995,6 +1024,7 @@ static const struct virtio_config_ops virtio_ccw_config_ops = {
        .find_vqs = virtio_ccw_find_vqs,
        .del_vqs = virtio_ccw_del_vqs,
        .bus_name = virtio_ccw_bus_name,
+       .synchronize_cbs = virtio_ccw_synchronize_cbs,
 };
 
 
@@ -1106,6 +1136,8 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
                        vcdev->err = -EIO;
        }
        virtio_ccw_check_activity(vcdev, activity);
+       /* Interrupts are disabled here */
+       read_lock(&vcdev->irq_lock);
        for_each_set_bit(i, indicators(vcdev),
                         sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
                /* The bit clear must happen before the vring kick. */
@@ -1114,6 +1146,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
                vq = virtio_ccw_vq_by_ind(vcdev, i);
                vring_interrupt(0, vq);
        }
+       read_unlock(&vcdev->irq_lock);
        if (test_bit(0, indicators2(vcdev))) {
                virtio_config_changed(&vcdev->vdev);
                clear_bit(0, indicators2(vcdev));
@@ -1284,6 +1317,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
        init_waitqueue_head(&vcdev->wait_q);
        INIT_LIST_HEAD(&vcdev->virtqueues);
        spin_lock_init(&vcdev->lock);
+       rwlock_init(&vcdev->irq_lock);
        mutex_init(&vcdev->io_lock);
 
        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
index 6e3a041..a9fe515 100644 (file)
@@ -500,7 +500,6 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt3sas/Kconfig"
 source "drivers/scsi/mpi3mr/Kconfig"
 source "drivers/scsi/smartpqi/Kconfig"
-source "drivers/scsi/ufs/Kconfig"
 
 config SCSI_HPTIOP
        tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
index 19814c2..2ad3bc0 100644 (file)
@@ -101,7 +101,6 @@ obj-$(CONFIG_MEGARAID_NEWGEN)       += megaraid/
 obj-$(CONFIG_MEGARAID_SAS)     += megaraid/
 obj-$(CONFIG_SCSI_MPT3SAS)     += mpt3sas/
 obj-$(CONFIG_SCSI_MPI3MR)      += mpi3mr/
-obj-$(CONFIG_SCSI_UFSHCD)      += ufs/
 obj-$(CONFIG_SCSI_ACARD)       += atp870u.o
 obj-$(CONFIG_SCSI_SUNESP)      += esp_scsi.o   sun_esp.o
 obj-$(CONFIG_SCSI_INITIO)      += initio.o
index 429d642..f910e25 100644 (file)
@@ -232,7 +232,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
         */
        rq->req_stat = RS_PENDING;
        if (test_bit(AF_DEGRADED_MODE, &a->flags))
-               /* not suppported for now */;
+               /* not supported for now */;
        else
                build_flash_msg(a, rq);
 
index ac17e3a..6370cdb 100644 (file)
@@ -2182,7 +2182,7 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
        case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
                u16 len = sci_req_tx_bytes(ireq);
 
-               /* likely non-error data underrrun, workaround missing
+               /* likely non-error data underrun, workaround missing
                 * d2h frame from the controller
                 */
                if (d2h->fis_type != FIS_REGD2H) {
index 092a971..bbd1faf 100644 (file)
@@ -33,4 +33,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
 lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
        lpfc_hbadisc.o  lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o   \
        lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
-       lpfc_nvme.o lpfc_nvmet.o
+       lpfc_nvme.o lpfc_nvmet.o lpfc_vmid.o
index b0775be..b1be0dd 100644 (file)
@@ -671,6 +671,9 @@ int lpfc_vmid_cmd(struct lpfc_vport *vport,
 int lpfc_vmid_hash_fn(const char *vmid, int len);
 struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
                                              uint32_t hash, uint8_t *buf);
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+                       enum dma_data_direction iodir,
+                       union lpfc_vmid_io_tag *tag);
 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
 int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
 
index 748c532..7b8cf67 100644 (file)
@@ -1736,6 +1736,28 @@ struct lpfc_fdmi_reg_portattr {
 #define PCI_DEVICE_ID_TOMCAT        0x0714
 #define PCI_DEVICE_ID_SKYHAWK       0x0724
 #define PCI_DEVICE_ID_SKYHAWK_VF    0x072c
+#define PCI_VENDOR_ID_ATTO          0x117c
+#define PCI_DEVICE_ID_CLRY_16XE     0x0064
+#define PCI_DEVICE_ID_CLRY_161E     0x0063
+#define PCI_DEVICE_ID_CLRY_162E     0x0064
+#define PCI_DEVICE_ID_CLRY_164E     0x0065
+#define PCI_DEVICE_ID_CLRY_16XP     0x0094
+#define PCI_DEVICE_ID_CLRY_161P     0x00a0
+#define PCI_DEVICE_ID_CLRY_162P     0x0094
+#define PCI_DEVICE_ID_CLRY_164P     0x00a1
+#define PCI_DEVICE_ID_CLRY_32XE     0x0094
+#define PCI_DEVICE_ID_CLRY_321E     0x00a2
+#define PCI_DEVICE_ID_CLRY_322E     0x00a3
+#define PCI_DEVICE_ID_CLRY_324E     0x00ac
+#define PCI_DEVICE_ID_CLRY_32XP     0x00bb
+#define PCI_DEVICE_ID_CLRY_321P     0x00bc
+#define PCI_DEVICE_ID_CLRY_322P     0x00bd
+#define PCI_DEVICE_ID_CLRY_324P     0x00be
+#define PCI_DEVICE_ID_TLFC_2        0x0064
+#define PCI_DEVICE_ID_TLFC_2XX2     0x4064
+#define PCI_DEVICE_ID_TLFC_3        0x0094
+#define PCI_DEVICE_ID_TLFC_3162     0x40a6
+#define PCI_DEVICE_ID_TLFC_3322     0x40a7
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
index 6a90e6e..a1b9be2 100644 (file)
@@ -124,5 +124,35 @@ const struct pci_device_id lpfc_id_table[] = {
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
                PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161E, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162E, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164E, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161P, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162P, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164P, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321E, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322E, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324E, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321P, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322P, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324P, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2XX2, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3162, },
+       {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+               PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3322, },
        { 0 }
 };
index 2bffaa6..93b94c6 100644 (file)
@@ -2415,6 +2415,90 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
 }
 
 /**
+ * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+       uint16_t sub_dev_id = phba->pcidev->subsystem_device;
+       char *model = "<Unknown>";
+       int tbolt = 0;
+
+       switch (sub_dev_id) {
+       case PCI_DEVICE_ID_CLRY_161E:
+               model = "161E";
+               break;
+       case PCI_DEVICE_ID_CLRY_162E:
+               model = "162E";
+               break;
+       case PCI_DEVICE_ID_CLRY_164E:
+               model = "164E";
+               break;
+       case PCI_DEVICE_ID_CLRY_161P:
+               model = "161P";
+               break;
+       case PCI_DEVICE_ID_CLRY_162P:
+               model = "162P";
+               break;
+       case PCI_DEVICE_ID_CLRY_164P:
+               model = "164P";
+               break;
+       case PCI_DEVICE_ID_CLRY_321E:
+               model = "321E";
+               break;
+       case PCI_DEVICE_ID_CLRY_322E:
+               model = "322E";
+               break;
+       case PCI_DEVICE_ID_CLRY_324E:
+               model = "324E";
+               break;
+       case PCI_DEVICE_ID_CLRY_321P:
+               model = "321P";
+               break;
+       case PCI_DEVICE_ID_CLRY_322P:
+               model = "322P";
+               break;
+       case PCI_DEVICE_ID_CLRY_324P:
+               model = "324P";
+               break;
+       case PCI_DEVICE_ID_TLFC_2XX2:
+               model = "2XX2";
+               tbolt = 1;
+               break;
+       case PCI_DEVICE_ID_TLFC_3162:
+               model = "3162";
+               tbolt = 1;
+               break;
+       case PCI_DEVICE_ID_TLFC_3322:
+               model = "3322";
+               tbolt = 1;
+               break;
+       default:
+               model = "Unknown";
+               break;
+       }
+
+       if (mdp && mdp[0] == '\0')
+               snprintf(mdp, 79, "%s", model);
+
+       if (descp && descp[0] == '\0')
+               snprintf(descp, 255,
+                        "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
+                        (tbolt) ? "ThunderLink FC " : "Celerity FC-",
+                        model,
+                        phba->Port);
+}
+
+/**
  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
  * @phba: pointer to lpfc hba data structure.
  * @mdp: pointer to the data structure to hold the derived model name.
@@ -2444,6 +2528,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
                && descp && descp[0] != '\0')
                return;
 
+       if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
+               lpfc_get_atto_model_desc(phba, mdp, descp);
+               return;
+       }
+
        if (phba->lmt & LMT_64Gb)
                max_speed = 64;
        else if (phba->lmt & LMT_32Gb)
index 5385f4d..335e906 100644 (file)
@@ -1279,6 +1279,19 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
 
        /* Words 13 14 15 are for PBDE support */
 
+       /* add the VMID tags as per switch response */
+       if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
+               if (phba->pport->vmid_priority_tagging) {
+                       bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+                       bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+                              lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
+               } else {
+                       bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+                       bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+                       wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
+               }
+       }
+
        pwqeq->vport = vport;
        return 0;
 }
@@ -1504,6 +1517,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        struct lpfc_nvme_fcpreq_priv *freqpriv;
        struct nvme_common_command *sqe;
        uint64_t start = 0;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+       u8 *uuid = NULL;
+       int err;
+       enum dma_data_direction iodir;
+#endif
 
        /* Validate pointers. LLDD fault handling with transport does
         * have timing races.
@@ -1662,6 +1680,33 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        lpfc_ncmd->ndlp = ndlp;
        lpfc_ncmd->qidx = lpfc_queue_info->qidx;
 
+#if (IS_ENABLED(CONFIG_NVME_FC))
+       /* check the necessary and sufficient condition to support VMID */
+       if (lpfc_is_vmid_enabled(phba) &&
+           (ndlp->vmid_support ||
+            phba->pport->vmid_priority_tagging ==
+            LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+               /* is the I/O generated by a VM, get the associated virtual */
+               /* entity id */
+               uuid = nvme_fc_io_getuuid(pnvme_fcreq);
+
+               if (uuid) {
+                       if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
+                               iodir = DMA_TO_DEVICE;
+                       else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
+                               iodir = DMA_FROM_DEVICE;
+                       else
+                               iodir = DMA_NONE;
+
+                       err = lpfc_vmid_get_appid(vport, uuid, iodir,
+                                       (union lpfc_vmid_io_tag *)
+                                               &lpfc_ncmd->cur_iocbq.vmid_tag);
+                       if (!err)
+                               lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
+               }
+       }
+#endif
+
        /*
         * Issue the IO on the WQ indicated by index in the hw_queue_handle.
         * This identfier was create in our hardware queue create callback
index 3b8afa9..d439682 100644 (file)
@@ -87,14 +87,6 @@ static void
 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
 static int
 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
-                          struct lpfc_vmid *vmp);
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
-                                  *cmd, struct lpfc_vmid *vmp,
-                                  union lpfc_vmid_io_tag *tag);
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
-                                   struct lpfc_vmid *vmid);
 
 /**
  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
@@ -5271,254 +5263,6 @@ void lpfc_poll_timeout(struct timer_list *t)
 }
 
 /*
- * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash: calculated hash value
- * @buf: uuid associated with the VE
- * Return the VMID entry associated with the UUID
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
-                                             u32 hash, u8 *buf)
-{
-       struct lpfc_vmid *vmp;
-
-       hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
-               if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
-                       return vmp;
-       }
-       return NULL;
-}
-
-/*
- * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash - calculated hash value
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- *
- * This routine will insert the newly acquired VMID entity in the hash table.
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
-                          struct lpfc_vmid *vmp)
-{
-       hash_add(vport->hash_table, &vmp->hnode, hash);
-}
-
-/*
- * lpfc_vmid_hash_fn - create a hash value of the UUID
- * @vmid: uuid associated with the VE
- * @len: length of the VMID string
- * Returns the calculated hash value
- */
-int lpfc_vmid_hash_fn(const char *vmid, int len)
-{
-       int c;
-       int hash = 0;
-
-       if (len == 0)
-               return 0;
-       while (len--) {
-               c = *vmid++;
-               if (c >= 'A' && c <= 'Z')
-                       c += 'a' - 'A';
-
-               hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
-                       (c >> LPFC_VMID_HASH_SHIFT)) * 19;
-       }
-
-       return hash & LPFC_VMID_HASH_MASK;
-}
-
-/*
- * lpfc_vmid_update_entry - update the vmid entry in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @cmd: address of scsi cmd descriptor
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- * @tag: VMID tag
- */
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
-                                  *cmd, struct lpfc_vmid *vmp,
-                                  union lpfc_vmid_io_tag *tag)
-{
-       u64 *lta;
-
-       if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
-               tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
-       else if (vport->phba->cfg_vmid_app_header)
-               tag->app_id = vmp->un.app_id;
-
-       if (cmd->sc_data_direction == DMA_TO_DEVICE)
-               vmp->io_wr_cnt++;
-       else
-               vmp->io_rd_cnt++;
-
-       /* update the last access timestamp in the table */
-       lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
-       *lta = jiffies;
-}
-
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
-                                   struct lpfc_vmid *vmid)
-{
-       u32 hash;
-       struct lpfc_vmid *pvmid;
-
-       if (vport->port_type == LPFC_PHYSICAL_PORT) {
-               vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
-       } else {
-               hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
-               pvmid =
-                   lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
-                                               vmid->host_vmid);
-               if (pvmid)
-                       vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
-               else
-                       vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
-       }
-}
-
-/*
- * lpfc_vmid_get_appid - get the VMID associated with the UUID
- * @vport: The virtual port for which this call is being executed.
- * @uuid: UUID associated with the VE
- * @cmd: address of scsi_cmd descriptor
- * @tag: VMID tag
- * Returns status of the function
- */
-static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
-                              scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
-{
-       struct lpfc_vmid *vmp = NULL;
-       int hash, len, rc = -EPERM, i;
-
-       /* check if QFPA is complete */
-       if (lpfc_vmid_is_type_priority_tag(vport) &&
-           !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
-           (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
-               vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
-               return -EAGAIN;
-       }
-
-       /* search if the UUID has already been mapped to the VMID */
-       len = strlen(uuid);
-       hash = lpfc_vmid_hash_fn(uuid, len);
-
-       /* search for the VMID in the table */
-       read_lock(&vport->vmid_lock);
-       vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
-       /* if found, check if its already registered  */
-       if (vmp  && vmp->flag & LPFC_VMID_REGISTERED) {
-               read_unlock(&vport->vmid_lock);
-               lpfc_vmid_update_entry(vport, cmd, vmp, tag);
-               rc = 0;
-       } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
-                          vmp->flag & LPFC_VMID_DE_REGISTER)) {
-               /* else if register or dereg request has already been sent */
-               /* Hence VMID tag will not be added for this I/O */
-               read_unlock(&vport->vmid_lock);
-               rc = -EBUSY;
-       } else {
-               /* The VMID was not found in the hashtable. At this point, */
-               /* drop the read lock first before proceeding further */
-               read_unlock(&vport->vmid_lock);
-               /* start the process to obtain one as per the */
-               /* type of the VMID indicated */
-               write_lock(&vport->vmid_lock);
-               vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
-               /* while the read lock was released, in case the entry was */
-               /* added by other context or is in process of being added */
-               if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
-                       lpfc_vmid_update_entry(vport, cmd, vmp, tag);
-                       write_unlock(&vport->vmid_lock);
-                       return 0;
-               } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
-                       write_unlock(&vport->vmid_lock);
-                       return -EBUSY;
-               }
-
-               /* else search and allocate a free slot in the hash table */
-               if (vport->cur_vmid_cnt < vport->max_vmid) {
-                       for (i = 0; i < vport->max_vmid; i++) {
-                               vmp = vport->vmid + i;
-                               if (vmp->flag == LPFC_VMID_SLOT_FREE)
-                                       break;
-                       }
-                       if (i == vport->max_vmid)
-                               vmp = NULL;
-               } else {
-                       vmp = NULL;
-               }
-
-               if (!vmp) {
-                       write_unlock(&vport->vmid_lock);
-                       return -ENOMEM;
-               }
-
-               /* Add the vmid and register */
-               lpfc_put_vmid_in_hashtable(vport, hash, vmp);
-               vmp->vmid_len = len;
-               memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
-               vmp->io_rd_cnt = 0;
-               vmp->io_wr_cnt = 0;
-               vmp->flag = LPFC_VMID_SLOT_USED;
-
-               vmp->delete_inactive =
-                       vport->vmid_inactivity_timeout ? 1 : 0;
-
-               /* if type priority tag, get next available VMID */
-               if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
-                       lpfc_vmid_assign_cs_ctl(vport, vmp);
-
-               /* allocate the per cpu variable for holding */
-               /* the last access time stamp only if VMID is enabled */
-               if (!vmp->last_io_time)
-                       vmp->last_io_time = __alloc_percpu(sizeof(u64),
-                                                          __alignof__(struct
-                                                          lpfc_vmid));
-               if (!vmp->last_io_time) {
-                       hash_del(&vmp->hnode);
-                       vmp->flag = LPFC_VMID_SLOT_FREE;
-                       write_unlock(&vport->vmid_lock);
-                       return -EIO;
-               }
-
-               write_unlock(&vport->vmid_lock);
-
-               /* complete transaction with switch */
-               if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
-                       rc = lpfc_vmid_uvem(vport, vmp, true);
-               else if (vport->phba->cfg_vmid_app_header)
-                       rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
-               if (!rc) {
-                       write_lock(&vport->vmid_lock);
-                       vport->cur_vmid_cnt++;
-                       vmp->flag |= LPFC_VMID_REQ_REGISTER;
-                       write_unlock(&vport->vmid_lock);
-               } else {
-                       write_lock(&vport->vmid_lock);
-                       hash_del(&vmp->hnode);
-                       vmp->flag = LPFC_VMID_SLOT_FREE;
-                       free_percpu(vmp->last_io_time);
-                       write_unlock(&vport->vmid_lock);
-                       return -EIO;
-               }
-
-               /* finally, enable the idle timer once */
-               if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
-                       mod_timer(&vport->phba->inactive_vmid_poll,
-                                 jiffies +
-                                 msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
-                       vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
-               }
-       }
-       return rc;
-}
-
-/*
  * lpfc_is_command_vm_io - get the UUID from blk cgroup
  * @cmd: Pointer to scsi_cmnd data structure
  * Returns UUID if present, otherwise NULL
@@ -5704,9 +5448,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                uuid = lpfc_is_command_vm_io(cmnd);
 
                if (uuid) {
-                       err = lpfc_vmid_get_appid(vport, uuid, cmnd,
-                               (union lpfc_vmid_io_tag *)
-                                       &cur_iocbq->vmid_tag);
+                       err = lpfc_vmid_get_appid(vport, uuid,
+                                       cmnd->sc_data_direction,
+                                       (union lpfc_vmid_io_tag *)
+                                               &cur_iocbq->vmid_tag);
                        if (!err)
                                cur_iocbq->cmd_flag |= LPFC_IO_VMID;
                }
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
new file mode 100644 (file)
index 0000000..f64ced0
--- /dev/null
@@ -0,0 +1,288 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/dma-direction.h>
+
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+                                              u32 hash, u8 *buf)
+{
+       struct lpfc_vmid *vmp;
+
+       hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+               if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+                       return vmp;
+       }
+       return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+                          struct lpfc_vmid *vmp)
+{
+       hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+       int c;
+       int hash = 0;
+
+       if (len == 0)
+               return 0;
+       while (len--) {
+               c = *vmid++;
+               if (c >= 'A' && c <= 'Z')
+                       c += 'a' - 'A';
+
+               hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+                       (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+       }
+
+       return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @iodir: io direction
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport,
+                                  enum dma_data_direction iodir,
+                                  struct lpfc_vmid *vmp,
+                                  union lpfc_vmid_io_tag *tag)
+{
+       u64 *lta;
+
+       if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+               tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+       else if (vport->phba->cfg_vmid_app_header)
+               tag->app_id = vmp->un.app_id;
+
+       if (iodir == DMA_TO_DEVICE)
+               vmp->io_wr_cnt++;
+       else if (iodir == DMA_FROM_DEVICE)
+               vmp->io_rd_cnt++;
+
+       /* update the last access timestamp in the table */
+       lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+       *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+                                   struct lpfc_vmid *vmid)
+{
+       u32 hash;
+       struct lpfc_vmid *pvmid;
+
+       if (vport->port_type == LPFC_PHYSICAL_PORT) {
+               vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+       } else {
+               hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+               pvmid =
+                   lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+                                                vmid->host_vmid);
+               if (pvmid)
+                       vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+               else
+                       vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+       }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @iodir: io direction
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+                       enum dma_data_direction iodir,
+                       union lpfc_vmid_io_tag *tag)
+{
+       struct lpfc_vmid *vmp = NULL;
+       int hash, len, rc = -EPERM, i;
+
+       /* check if QFPA is complete */
+       if (lpfc_vmid_is_type_priority_tag(vport) &&
+           !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
+           (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
+               vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+               return -EAGAIN;
+       }
+
+       /* search if the UUID has already been mapped to the VMID */
+       len = strlen(uuid);
+       hash = lpfc_vmid_hash_fn(uuid, len);
+
+       /* search for the VMID in the table */
+       read_lock(&vport->vmid_lock);
+       vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+       /* if found, check if its already registered  */
+       if (vmp  && vmp->flag & LPFC_VMID_REGISTERED) {
+               read_unlock(&vport->vmid_lock);
+               lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+               rc = 0;
+       } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+                          vmp->flag & LPFC_VMID_DE_REGISTER)) {
+               /* else if register or dereg request has already been sent */
+               /* Hence VMID tag will not be added for this I/O */
+               read_unlock(&vport->vmid_lock);
+               rc = -EBUSY;
+       } else {
+               /* The VMID was not found in the hashtable. At this point, */
+               /* drop the read lock first before proceeding further */
+               read_unlock(&vport->vmid_lock);
+               /* start the process to obtain one as per the */
+               /* type of the VMID indicated */
+               write_lock(&vport->vmid_lock);
+               vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+               /* while the read lock was released, in case the entry was */
+               /* added by other context or is in process of being added */
+               if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+                       lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+                       write_unlock(&vport->vmid_lock);
+                       return 0;
+               } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+                       write_unlock(&vport->vmid_lock);
+                       return -EBUSY;
+               }
+
+               /* else search and allocate a free slot in the hash table */
+               if (vport->cur_vmid_cnt < vport->max_vmid) {
+                       for (i = 0; i < vport->max_vmid; i++) {
+                               vmp = vport->vmid + i;
+                               if (vmp->flag == LPFC_VMID_SLOT_FREE)
+                                       break;
+                       }
+                       if (i == vport->max_vmid)
+                               vmp = NULL;
+               } else {
+                       vmp = NULL;
+               }
+
+               if (!vmp) {
+                       write_unlock(&vport->vmid_lock);
+                       return -ENOMEM;
+               }
+
+               /* Add the vmid and register */
+               lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+               vmp->vmid_len = len;
+               memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+               vmp->io_rd_cnt = 0;
+               vmp->io_wr_cnt = 0;
+               vmp->flag = LPFC_VMID_SLOT_USED;
+
+               vmp->delete_inactive =
+                       vport->vmid_inactivity_timeout ? 1 : 0;
+
+               /* if type priority tag, get next available VMID */
+               if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+                       lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+               /* allocate the per cpu variable for holding */
+               /* the last access time stamp only if VMID is enabled */
+               if (!vmp->last_io_time)
+                       vmp->last_io_time = __alloc_percpu(sizeof(u64),
+                                                          __alignof__(struct
+                                                          lpfc_vmid));
+               if (!vmp->last_io_time) {
+                       hash_del(&vmp->hnode);
+                       vmp->flag = LPFC_VMID_SLOT_FREE;
+                       write_unlock(&vport->vmid_lock);
+                       return -EIO;
+               }
+
+               write_unlock(&vport->vmid_lock);
+
+               /* complete transaction with switch */
+               if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+                       rc = lpfc_vmid_uvem(vport, vmp, true);
+               else if (vport->phba->cfg_vmid_app_header)
+                       rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+               if (!rc) {
+                       write_lock(&vport->vmid_lock);
+                       vport->cur_vmid_cnt++;
+                       vmp->flag |= LPFC_VMID_REQ_REGISTER;
+                       write_unlock(&vport->vmid_lock);
+               } else {
+                       write_lock(&vport->vmid_lock);
+                       hash_del(&vmp->hnode);
+                       vmp->flag = LPFC_VMID_SLOT_FREE;
+                       free_percpu(vmp->last_io_time);
+                       write_unlock(&vport->vmid_lock);
+                       return -EIO;
+               }
+
+               /* finally, enable the idle timer once */
+               if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+                       mod_timer(&vport->phba->inactive_vmid_poll,
+                                 jiffies +
+                                 msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+                       vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+               }
+       }
+       return rc;
+}
index 01cd017..0e1cb4a 100644 (file)
@@ -954,7 +954,7 @@ struct mpi3mr_ioc {
        u16 active_poll_qcount;
        u16 requested_poll_qcount;
 
-       struct device *bsg_dev;
+       struct device bsg_dev;
        struct request_queue *bsg_queue;
        u8 stop_bsgs;
        u8 *logdata_buf;
index 9ab1762..9baac22 100644 (file)
@@ -1487,28 +1487,28 @@ static int mpi3mr_bsg_request(struct bsg_job *job)
  */
 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
 {
+       struct device *bsg_dev = &mrioc->bsg_dev;
        if (!mrioc->bsg_queue)
                return;
 
        bsg_remove_queue(mrioc->bsg_queue);
        mrioc->bsg_queue = NULL;
 
-       device_del(mrioc->bsg_dev);
-       put_device(mrioc->bsg_dev);
-       kfree(mrioc->bsg_dev);
+       device_del(bsg_dev);
+       put_device(bsg_dev);
 }
 
 /**
  * mpi3mr_bsg_node_release -release bsg device node
  * @dev: bsg device node
  *
- * decrements bsg dev reference count
+ * decrements bsg dev parent reference count
  *
  * Return:Nothing
  */
 static void mpi3mr_bsg_node_release(struct device *dev)
 {
-       put_device(dev);
+       put_device(dev->parent);
 }
 
 /**
@@ -1521,41 +1521,37 @@ static void mpi3mr_bsg_node_release(struct device *dev)
  */
 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
 {
-       mrioc->bsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
-       if (!mrioc->bsg_dev) {
-               ioc_err(mrioc, "bsg device mem allocation failed\n");
-               return;
-       }
+       struct device *bsg_dev = &mrioc->bsg_dev;
+       struct device *parent = &mrioc->shost->shost_gendev;
+
+       device_initialize(bsg_dev);
+
+       bsg_dev->parent = get_device(parent);
+       bsg_dev->release = mpi3mr_bsg_node_release;
 
-       device_initialize(mrioc->bsg_dev);
-       dev_set_name(mrioc->bsg_dev, "mpi3mrctl%u", mrioc->id);
+       dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
 
-       if (device_add(mrioc->bsg_dev)) {
+       if (device_add(bsg_dev)) {
                ioc_err(mrioc, "%s: bsg device add failed\n",
-                   dev_name(mrioc->bsg_dev));
-               goto err_device_add;
+                   dev_name(bsg_dev));
+               put_device(bsg_dev);
+               return;
        }
 
-       mrioc->bsg_dev->release = mpi3mr_bsg_node_release;
-
-       mrioc->bsg_queue = bsg_setup_queue(mrioc->bsg_dev, dev_name(mrioc->bsg_dev),
+       mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
                        mpi3mr_bsg_request, NULL, 0);
        if (IS_ERR(mrioc->bsg_queue)) {
                ioc_err(mrioc, "%s: bsg registration failed\n",
-                   dev_name(mrioc->bsg_dev));
-               goto err_setup_queue;
+                   dev_name(bsg_dev));
+               device_del(bsg_dev);
+               put_device(bsg_dev);
+               return;
        }
 
        blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
        blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
 
        return;
-
-err_setup_queue:
-       device_del(mrioc->bsg_dev);
-       put_device(mrioc->bsg_dev);
-err_device_add:
-       kfree(mrioc->bsg_dev);
 }
 
 /**
@@ -1693,7 +1689,7 @@ logging_level_store(struct device *dev,
 static DEVICE_ATTR_RW(logging_level);
 
 /**
- * adapter_state_show - SysFS callback for adapter state show
+ * adp_state_show() - SysFS callback for adapter state show
  * @dev: class device
  * @attr: Device attributes
  * @buf: Buffer to copy
index 7158552..e885c1d 100644 (file)
@@ -1239,7 +1239,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
        myrb_unmap(cb);
 
        if (cb->mmio_base) {
-               cb->disable_intr(cb->io_base);
+               if (cb->disable_intr)
+                       cb->disable_intr(cb->io_base);
                iounmap(cb->mmio_base);
        }
        if (cb->irq)
@@ -3413,9 +3414,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
        mutex_init(&cb->dcmd_mutex);
        mutex_init(&cb->dma_mutex);
        cb->pdev = pdev;
+       cb->host = shost;
 
-       if (pci_enable_device(pdev))
-               goto failure;
+       if (pci_enable_device(pdev)) {
+               dev_err(&pdev->dev, "Failed to enable PCI device\n");
+               scsi_host_put(shost);
+               return NULL;
+       }
 
        if (privdata->hw_init == DAC960_PD_hw_init ||
            privdata->hw_init == DAC960_P_hw_init) {
index 3d5cd33..bfce601 100644 (file)
@@ -1434,7 +1434,7 @@ static int pmcraid_notify_aen(
                return -EINVAL;
        }
 
-       /* send genetlink multicast message to notify appplications */
+       /* send genetlink multicast message to notify applications */
        genlmsg_end(skb, msg_header);
 
        result = genlmsg_multicast(&pmcraid_event_family, skb,
index e57cc22..4750ec5 100644 (file)
@@ -893,7 +893,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
                return -EINVAL;
        }
 
-       /* Record LUN number for later use if we neeed them */
+       /* Record LUN number for later use if we need them */
        io_req->lun = (int)sc_cmd->device->lun;
 
        /* Obtain free SQE */
index 0ab595c..1e7f4d1 100644 (file)
@@ -4037,7 +4037,6 @@ qla1280_setup(char *s)
 {
        char *cp, *ptr;
        unsigned long val;
-       int toke;
 
        cp = s;
 
@@ -4052,7 +4051,7 @@ qla1280_setup(char *s)
                } else
                        val = simple_strtoul(ptr, &ptr, 0);
 
-               switch ((toke = qla1280_get_token(cp))) {
+               switch (qla1280_get_token(cp)) {
                case TOKEN_NVRAM:
                        if (!val)
                                driver_setup.no_nvram = 1;
index e6b5c4c..346d47b 100644 (file)
@@ -591,7 +591,6 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
        }
        kfree(req->outstanding_cmds);
        kfree(req);
-       req = NULL;
 }
 
 static void
@@ -617,7 +616,6 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
                mutex_unlock(&ha->vport_lock);
        }
        kfree(rsp);
-       rsp = NULL;
 }
 
 int
index a02235a..cb97f62 100644 (file)
@@ -48,13 +48,6 @@ MODULE_PARM_DESC(qlini_mode,
        "when ready "
        "\"enabled\" (default) - initiator mode will always stay enabled.");
 
-static int ql_dm_tgt_ex_pct = 0;
-module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
-       "For Dual Mode (qlini_mode=dual), this parameter determines "
-       "the percentage of exchanges/cmds FW will allocate resources "
-       "for Target mode.");
-
 int ql2xuctrlirq = 1;
 module_param(ql2xuctrlirq, int, 0644);
 MODULE_PARM_DESC(ql2xuctrlirq,
index cdaca13..49ef864 100644 (file)
@@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
        scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
        scmd->cmnd[5] = 0;
        scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+       scmd->allowed = 5;
 
        req->rq_flags |= RQF_QUIET;
        req->timeout = 10 * HZ;
-       scmd->allowed = 5;
+       req->end_io = eh_lock_door_done;
 
-       blk_execute_rq_nowait(req, true, eh_lock_door_done);
+       blk_execute_rq_nowait(req, true);
 }
 
 /**
index e9db7da..6ffc9e4 100644 (file)
@@ -779,7 +779,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
                                        action = ACTION_DELAYED_RETRY;
                                        break;
                                case 0x0a: /* ALUA state transition */
-                                       blk_stat = BLK_STS_AGAIN;
+                                       blk_stat = BLK_STS_TRANSPORT;
                                        fallthrough;
                                default:
                                        action = ACTION_FAIL;
index 546a9e3..4394979 100644 (file)
@@ -573,7 +573,6 @@ struct bus_type scsi_bus_type = {
        .pm             = &scsi_bus_pm_ops,
 #endif
 };
-EXPORT_SYMBOL_GPL(scsi_bus_type);
 
 int scsi_sysfs_register(void)
 {
index 7493164..895b56c 100644 (file)
@@ -3521,7 +3521,7 @@ static int sd_probe(struct device *dev)
        error = device_add_disk(dev, gd, NULL);
        if (error) {
                put_device(&sdkp->disk_dev);
-               blk_cleanup_disk(gd);
+               put_disk(gd);
                goto out;
        }
 
@@ -3542,7 +3542,6 @@ static int sd_probe(struct device *dev)
  out_put:
        put_disk(gd);
  out_free:
-       sd_zbc_release_disk(sdkp);
        kfree(sdkp);
  out:
        scsi_autopm_put_device(sdp);
@@ -3579,7 +3578,7 @@ static void scsi_disk_release(struct device *dev)
        struct scsi_disk *sdkp = to_scsi_disk(dev);
 
        ida_free(&sd_index_ida, sdkp->index);
-       sd_zbc_release_disk(sdkp);
+       sd_zbc_free_zone_info(sdkp);
        put_device(&sdkp->device->sdev_gendev);
        free_opal_dev(sdkp->opal_dev);
 
index 2abad54..5eea762 100644 (file)
@@ -241,7 +241,7 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
 
 #ifdef CONFIG_BLK_DEV_ZONED
 
-void sd_zbc_release_disk(struct scsi_disk *sdkp);
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp);
 int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
 int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
 blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
@@ -256,7 +256,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
 
 #else /* CONFIG_BLK_DEV_ZONED */
 
-static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
+static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {}
 
 static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
 {
index 5b9fad7..6acc4f4 100644 (file)
@@ -786,8 +786,11 @@ static int sd_zbc_init_disk(struct scsi_disk *sdkp)
        return 0;
 }
 
-static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp)
 {
+       if (!sdkp->zone_wp_update_buf)
+               return;
+
        /* Serialize against revalidate zones */
        mutex_lock(&sdkp->rev_mutex);
 
@@ -802,12 +805,6 @@ static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
        mutex_unlock(&sdkp->rev_mutex);
 }
 
-void sd_zbc_release_disk(struct scsi_disk *sdkp)
-{
-       if (sd_is_zoned(sdkp))
-               sd_zbc_clear_zone_info(sdkp);
-}
-
 static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
 {
        struct scsi_disk *sdkp = scsi_disk(disk);
@@ -914,12 +911,15 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
        u32 zone_blocks = 0;
        int ret;
 
-       if (!sd_is_zoned(sdkp))
+       if (!sd_is_zoned(sdkp)) {
                /*
-                * Device managed or normal SCSI disk,
-                * no special handling required
+                * Device managed or normal SCSI disk, no special handling
+                * required. Nevertheless, free the disk zone information in
+                * case the device type changed.
                 */
+               sd_zbc_free_zone_info(sdkp);
                return 0;
+       }
 
        /* READ16/WRITE16 is mandatory for ZBC disks */
        sdkp->device->use_16_for_rw = 1;
@@ -928,11 +928,11 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
        if (!blk_queue_is_zoned(q)) {
                /*
                 * This can happen for a host aware disk with partitions.
-                * The block device zone information was already cleared
-                * by blk_queue_set_zoned(). Only clear the scsi disk zone
+                * The block device zone model was already cleared by
+                * blk_queue_set_zoned(). Only free the scsi disk zone
                 * information and exit early.
                 */
-               sd_zbc_clear_zone_info(sdkp);
+               sd_zbc_free_zone_info(sdkp);
                return 0;
        }
 
index cbffa71..118c7b4 100644 (file)
@@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
 
        srp->rq->timeout = timeout;
        kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
-       blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
+       srp->rq->end_io = sg_rq_end_io;
+       blk_execute_rq_nowait(srp->rq, at_head);
        return 0;
 }
 
index c4c4827..2e40320 100644 (file)
@@ -1082,7 +1082,7 @@ struct pqi_stream_data {
 };
 
 struct pqi_scsi_dev {
-       int     devtype;                /* as reported by INQUIRY commmand */
+       int     devtype;                /* as reported by INQUIRY command */
        u8      device_type;            /* as reported by */
                                        /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
                                        /* only valid for devtype = TYPE_DISK */
index 56a093a..850172a 100644 (file)
@@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
        memcpy(scmd->cmnd, cmd, scmd->cmd_len);
        req->timeout = timeout;
        scmd->allowed = retries;
+       req->end_io = st_scsi_execute_end;
        req->end_io_data = SRpnt;
 
-       blk_execute_rq_nowait(req, true, st_scsi_execute_end);
+       blk_execute_rq_nowait(req, true);
        return 0;
 }
 
index 08ed059..ca35309 100644 (file)
@@ -479,7 +479,7 @@ static void storvsc_host_scan(struct work_struct *work)
        host = host_device->host;
        /*
         * Before scanning the host, first check to see if any of the
-        * currrently known devices have been hot removed. We issue a
+        * currently known devices have been hot removed. We issue a
         * "unit ready" command against all currently known devices.
         * This I/O will result in an error for devices that have been
         * removed. As part of handling the I/O error, we remove the device.
index ec58091..c0c4f89 100644 (file)
@@ -510,10 +510,8 @@ static int qcom_slim_probe(struct platform_device *pdev)
        }
 
        ctrl->irq = platform_get_irq(pdev, 0);
-       if (ctrl->irq < 0) {
-               dev_err(&pdev->dev, "no slimbus IRQ\n");
+       if (ctrl->irq < 0)
                return ctrl->irq;
-       }
 
        sctrl = &ctrl->ctrl;
        sctrl->dev = &pdev->dev;
index 7040293..0aa8408 100644 (file)
@@ -1434,6 +1434,7 @@ static int of_qcom_slim_ngd_register(struct device *parent,
        const struct of_device_id *match;
        struct device_node *node;
        u32 id;
+       int ret;
 
        match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node);
        data = match->data;
@@ -1455,7 +1456,17 @@ static int of_qcom_slim_ngd_register(struct device *parent,
                }
                ngd->id = id;
                ngd->pdev->dev.parent = parent;
-               ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
+
+               ret = driver_set_override(&ngd->pdev->dev,
+                                         &ngd->pdev->driver_override,
+                                         QCOM_SLIM_NGD_DRV_NAME,
+                                         strlen(QCOM_SLIM_NGD_DRV_NAME));
+               if (ret) {
+                       platform_device_put(ngd->pdev);
+                       kfree(ngd);
+                       of_node_put(node);
+                       return ret;
+               }
                ngd->pdev->dev.of_node = node;
                ctrl->ngd = ngd;
 
@@ -1526,13 +1537,11 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
        if (IS_ERR(ctrl->base))
                return PTR_ERR(ctrl->base);
 
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no slimbus IRQ resource\n");
-               return -ENODEV;
-       }
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0)
+               return ret;
 
-       ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt,
+       ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt,
                               IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
        if (ret) {
                dev_err(&pdev->dev, "request IRQ failed\n");
index c5aae42..86ccf59 100644 (file)
@@ -14,6 +14,7 @@ source "drivers/soc/ixp4xx/Kconfig"
 source "drivers/soc/litex/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/microchip/Kconfig"
+source "drivers/soc/pxa/Kconfig"
 source "drivers/soc/qcom/Kconfig"
 source "drivers/soc/renesas/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
index e8228c4..919716e 100644 (file)
@@ -19,6 +19,7 @@ obj-$(CONFIG_SOC_XWAY)                += lantiq/
 obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
 obj-y                          += mediatek/
 obj-y                          += microchip/
+obj-y                          += pxa/
 obj-y                          += amlogic/
 obj-y                          += qcom/
 obj-y                          += renesas/
index 9154c70..291086b 100644 (file)
@@ -459,7 +459,7 @@ static const struct of_device_id ixp4xx_qmgr_of_match[] = {
 static struct platform_driver ixp4xx_qmgr_driver = {
        .driver = {
                .name           = "ixp4xx-qmgr",
-               .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
+               .of_match_table = ixp4xx_qmgr_of_match,
        },
        .probe = ixp4xx_qmgr_probe,
        .remove = ixp4xx_qmgr_remove,
similarity index 83%
rename from arch/arm/plat-pxa/Kconfig
rename to drivers/soc/pxa/Kconfig
index 6f7a0a3..c5c265a 100644 (file)
@@ -1,9 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
-if PLAT_PXA
+config PLAT_PXA
+       bool
 
 config PXA_SSP
        tristate
        help
          Enable support for PXA2xx SSP ports
-
-endif
similarity index 51%
rename from arch/arm/plat-pxa/Makefile
rename to drivers/soc/pxa/Makefile
index 349ea0a..413dece 100644 (file)
@@ -1,8 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for code common across different PXA processor families
-#
-ccflags-$(CONFIG_ARCH_MMP) := -I$(srctree)/$(src)/include
 
 obj-$(CONFIG_PXA3xx)           += mfp.o
 obj-$(CONFIG_ARCH_MMP)         += mfp.o
similarity index 99%
rename from arch/arm/plat-pxa/mfp.c
rename to drivers/soc/pxa/mfp.c
index 17fc4f3..6220ba3 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 
-#include <plat/mfp.h>
+#include <linux/soc/pxa/mfp.h>
 
 #define MFPR_SIZE      (PAGE_SIZE)
 
index 384461b..15a3970 100644 (file)
@@ -165,12 +165,14 @@ static int __init rockchip_grf_init(void)
                return -ENODEV;
        if (!match || !match->data) {
                pr_err("%s: missing grf data\n", __func__);
+               of_node_put(np);
                return -EINVAL;
        }
 
        grf_info = match->data;
 
        grf = syscon_node_to_regmap(np);
+       of_node_put(np);
        if (IS_ERR(grf)) {
                pr_err("%s: could not get grf syscon\n", __func__);
                return PTR_ERR(grf);
index c77ecf6..5611d14 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_opp.h>
+#include <linux/power_supply.h>
 #include <linux/reboot.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 #define PMC_USB_DEBOUNCE_DEL           0xec
 #define PMC_USB_AO                     0xf0
 
+#define PMC_SCRATCH37                  0x130
 #define PMC_SCRATCH41                  0x140
 
 #define PMC_WAKE2_MASK                 0x160
@@ -1101,8 +1103,7 @@ static struct notifier_block tegra_pmc_reboot_notifier = {
        .notifier_call = tegra_pmc_reboot_notify,
 };
 
-static int tegra_pmc_restart_notify(struct notifier_block *this,
-                                   unsigned long action, void *data)
+static void tegra_pmc_restart(void)
 {
        u32 value;
 
@@ -1110,14 +1111,31 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
        value = tegra_pmc_readl(pmc, PMC_CNTRL);
        value |= PMC_CNTRL_MAIN_RST;
        tegra_pmc_writel(pmc, value, PMC_CNTRL);
+}
+
+static int tegra_pmc_restart_handler(struct sys_off_data *data)
+{
+       tegra_pmc_restart();
 
        return NOTIFY_DONE;
 }
 
-static struct notifier_block tegra_pmc_restart_handler = {
-       .notifier_call = tegra_pmc_restart_notify,
-       .priority = 128,
-};
+static int tegra_pmc_power_off_handler(struct sys_off_data *data)
+{
+       /*
+        * Reboot Nexus 7 into special bootloader mode if USB cable is
+        * connected in order to display battery status and power off.
+        */
+       if (of_machine_is_compatible("asus,grouper") &&
+           power_supply_is_system_supplied()) {
+               const u32 go_to_charger_mode = 0xa5a55a5a;
+
+               tegra_pmc_writel(pmc, go_to_charger_mode, PMC_SCRATCH37);
+               tegra_pmc_restart();
+       }
+
+       return NOTIFY_DONE;
+}
 
 static int powergate_show(struct seq_file *s, void *data)
 {
@@ -2880,6 +2898,42 @@ static int tegra_pmc_probe(struct platform_device *pdev)
        }
 
        /*
+        * PMC should be last resort for restarting since it soft-resets
+        * CPU without resetting everything else.
+        */
+       err = devm_register_reboot_notifier(&pdev->dev,
+                                           &tegra_pmc_reboot_notifier);
+       if (err) {
+               dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
+                       err);
+               return err;
+       }
+
+       err = devm_register_sys_off_handler(&pdev->dev,
+                                           SYS_OFF_MODE_RESTART,
+                                           SYS_OFF_PRIO_LOW,
+                                           tegra_pmc_restart_handler, NULL);
+       if (err) {
+               dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+                       err);
+               return err;
+       }
+
+       /*
+        * PMC should be primary power-off method if it soft-resets CPU,
+        * asking bootloader to shutdown hardware.
+        */
+       err = devm_register_sys_off_handler(&pdev->dev,
+                                           SYS_OFF_MODE_POWER_OFF,
+                                           SYS_OFF_PRIO_FIRMWARE,
+                                           tegra_pmc_power_off_handler, NULL);
+       if (err) {
+               dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+                       err);
+               return err;
+       }
+
+       /*
         * PCLK clock rate can't be retrieved using CLK API because it
         * causes lockup if CPU enters LP2 idle state from some other
         * CLK notifier, hence we're caching the rate's value locally.
@@ -2910,28 +2964,13 @@ static int tegra_pmc_probe(struct platform_device *pdev)
                        goto cleanup_sysfs;
        }
 
-       err = devm_register_reboot_notifier(&pdev->dev,
-                                           &tegra_pmc_reboot_notifier);
-       if (err) {
-               dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
-                       err);
-               goto cleanup_debugfs;
-       }
-
-       err = register_restart_handler(&tegra_pmc_restart_handler);
-       if (err) {
-               dev_err(&pdev->dev, "unable to register restart handler, %d\n",
-                       err);
-               goto cleanup_debugfs;
-       }
-
        err = tegra_pmc_pinctrl_init(pmc);
        if (err)
-               goto cleanup_restart_handler;
+               goto cleanup_debugfs;
 
        err = tegra_pmc_regmap_init(pmc);
        if (err < 0)
-               goto cleanup_restart_handler;
+               goto cleanup_debugfs;
 
        err = tegra_powergate_init(pmc, pdev->dev.of_node);
        if (err < 0)
@@ -2954,8 +2993,6 @@ static int tegra_pmc_probe(struct platform_device *pdev)
 
 cleanup_powergates:
        tegra_powergate_remove_all(pdev->dev.of_node);
-cleanup_restart_handler:
-       unregister_restart_handler(&tegra_pmc_restart_handler);
 cleanup_debugfs:
        debugfs_remove(pmc->debugfs);
 cleanup_sysfs:
index b27f885..5dcb766 100644 (file)
@@ -41,25 +41,37 @@ static int event_manager_availability = -EACCES;
 static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
 static int sgi_num = XLNX_EVENT_SGI_NUM;
 
+static bool is_need_to_unregister;
+
+/**
+ * struct agent_cb - Registered callback function and private data.
+ * @agent_data:                Data passed back to handler function.
+ * @eve_cb:            Function pointer to store the callback function.
+ * @list:              member to create list.
+ */
+struct agent_cb {
+       void *agent_data;
+       event_cb_func_t eve_cb;
+       struct list_head list;
+};
+
 /**
  * struct registered_event_data - Registered Event Data.
  * @key:               key is the combine id(Node-Id | Event-Id) of type u64
  *                     where upper u32 for Node-Id and lower u32 for Event-Id,
  *                     And this used as key to index into hashmap.
- * @agent_data:                Data passed back to handler function.
  * @cb_type:           Type of Api callback, like PM_NOTIFY_CB, etc.
- * @eve_cb:            Function pointer to store the callback function.
- * @wake:              If this flag set, firmware will wakeup processor if is
+ * @wake:              If this flag set, firmware will wake up processor if is
  *                     in sleep or power down state.
+ * @cb_list_head:      Head of call back data list which contain the information
+ *                     about registered handler and private data.
  * @hentry:            hlist_node that hooks this entry into hashtable.
  */
 struct registered_event_data {
        u64 key;
        enum pm_api_cb_id cb_type;
-       void *agent_data;
-
-       event_cb_func_t eve_cb;
        bool wake;
+       struct list_head cb_list_head;
        struct hlist_node hentry;
 };
 
@@ -78,29 +90,60 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
                                        event_cb_func_t cb_fun, void *data)
 {
        u64 key = 0;
+       bool present_in_hash = false;
        struct registered_event_data *eve_data;
+       struct agent_cb *cb_data;
+       struct agent_cb *cb_pos;
+       struct agent_cb *cb_next;
 
        key = ((u64)node_id << 32U) | (u64)event;
        /* Check for existing entry in hash table for given key id */
        hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
                if (eve_data->key == key) {
-                       pr_err("Found as already registered\n");
-                       return -EINVAL;
+                       present_in_hash = true;
+                       break;
                }
        }
 
-       /* Add new entry if not present */
-       eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
-       if (!eve_data)
-               return -ENOMEM;
+       if (!present_in_hash) {
+               /* Add new entry if not present in HASH table */
+               eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+               if (!eve_data)
+                       return -ENOMEM;
+               eve_data->key = key;
+               eve_data->cb_type = PM_NOTIFY_CB;
+               eve_data->wake = wake;
+               INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+               cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+               if (!cb_data)
+                       return -ENOMEM;
+               cb_data->eve_cb = cb_fun;
+               cb_data->agent_data = data;
+
+               /* Add into callback list */
+               list_add(&cb_data->list, &eve_data->cb_list_head);
+
+               /* Add into HASH table */
+               hash_add(reg_driver_map, &eve_data->hentry, key);
+       } else {
+               /* Search for callback function and private data in list */
+               list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+                       if (cb_pos->eve_cb == cb_fun &&
+                           cb_pos->agent_data == data) {
+                               return 0;
+                       }
+               }
 
-       eve_data->key = key;
-       eve_data->cb_type = PM_NOTIFY_CB;
-       eve_data->eve_cb = cb_fun;
-       eve_data->wake = wake;
-       eve_data->agent_data = data;
+               /* Add multiple handler and private data in list */
+               cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+               if (!cb_data)
+                       return -ENOMEM;
+               cb_data->eve_cb = cb_fun;
+               cb_data->agent_data = data;
 
-       hash_add(reg_driver_map, &eve_data->hentry, key);
+               list_add(&cb_data->list, &eve_data->cb_list_head);
+       }
 
        return 0;
 }
@@ -108,6 +151,7 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
 static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
 {
        struct registered_event_data *eve_data;
+       struct agent_cb *cb_data;
 
        /* Check for existing entry in hash table for given cb_type */
        hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
@@ -124,8 +168,16 @@ static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
 
        eve_data->key = 0;
        eve_data->cb_type = PM_INIT_SUSPEND_CB;
-       eve_data->eve_cb = cb_fun;
-       eve_data->agent_data = data;
+       INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+       cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+       if (!cb_data)
+               return -ENOMEM;
+       cb_data->eve_cb = cb_fun;
+       cb_data->agent_data = data;
+
+       /* Add into callback list */
+       list_add(&cb_data->list, &eve_data->cb_list_head);
 
        hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
 
@@ -136,15 +188,26 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
 {
        bool is_callback_found = false;
        struct registered_event_data *eve_data;
+       struct agent_cb *cb_pos;
+       struct agent_cb *cb_next;
+
+       is_need_to_unregister = false;
 
        /* Check for existing entry in hash table for given cb_type */
        hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
-               if (eve_data->cb_type == PM_INIT_SUSPEND_CB &&
-                   eve_data->eve_cb == cb_fun) {
-                       is_callback_found = true;
+               if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+                       /* Delete the list of callback */
+                       list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+                               if (cb_pos->eve_cb == cb_fun) {
+                                       is_callback_found = true;
+                                       list_del_init(&cb_pos->list);
+                                       kfree(cb_pos);
+                               }
+                       }
                        /* remove an object from a hashtable */
                        hash_del(&eve_data->hentry);
                        kfree(eve_data);
+                       is_need_to_unregister = true;
                }
        }
        if (!is_callback_found) {
@@ -156,20 +219,36 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
 }
 
 static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
-                                          event_cb_func_t cb_fun)
+                                          event_cb_func_t cb_fun, void *data)
 {
        bool is_callback_found = false;
        struct registered_event_data *eve_data;
        u64 key = ((u64)node_id << 32U) | (u64)event;
+       struct agent_cb *cb_pos;
+       struct agent_cb *cb_next;
+
+       is_need_to_unregister = false;
 
        /* Check for existing entry in hash table for given key id */
        hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
-               if (eve_data->key == key &&
-                   eve_data->eve_cb == cb_fun) {
-                       is_callback_found = true;
-                       /* remove an object from a hashtable */
-                       hash_del(&eve_data->hentry);
-                       kfree(eve_data);
+               if (eve_data->key == key) {
+                       /* Delete the list of callback */
+                       list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+                               if (cb_pos->eve_cb == cb_fun &&
+                                   cb_pos->agent_data == data) {
+                                       is_callback_found = true;
+                                       list_del_init(&cb_pos->list);
+                                       kfree(cb_pos);
+                               }
+                       }
+
+                       /* Remove HASH table if callback list is empty */
+                       if (list_empty(&eve_data->cb_list_head)) {
+                               /* remove an object from a HASH table */
+                               hash_del(&eve_data->hentry);
+                               kfree(eve_data);
+                               is_need_to_unregister = true;
+                       }
                }
        }
        if (!is_callback_found) {
@@ -241,7 +320,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons
                                        eve = event & (1 << pos);
                                        if (!eve)
                                                continue;
-                                       xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+                                       xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
                                }
                        }
                }
@@ -263,10 +342,10 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons
                                        eve = event & (1 << pos);
                                        if (!eve)
                                                continue;
-                                       xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+                                       xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
                                }
                        } else {
-                               xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+                               xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
                        }
                        return ret;
                }
@@ -284,15 +363,18 @@ EXPORT_SYMBOL_GPL(xlnx_register_event);
  * @node_id:   Node-Id related to event.
  * @event:     Event Mask for the Error Event.
  * @cb_fun:    Function pointer of callback function.
+ * @data:      Pointer of agent's private data.
  *
  * Return:     Returns 0 on successful unregistration else error code.
  */
 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
-                         event_cb_func_t cb_fun)
+                         event_cb_func_t cb_fun, void *data)
 {
-       int ret;
+       int ret = 0;
        u32 eve, pos;
 
+       is_need_to_unregister = false;
+
        if (event_manager_availability)
                return event_manager_availability;
 
@@ -309,23 +391,26 @@ int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, co
        } else {
                /* Remove Node-Id/Event from hash table */
                if (!xlnx_is_error_event(node_id)) {
-                       xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+                       xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
                } else {
                        for (pos = 0; pos < MAX_BITS; pos++) {
                                eve = event & (1 << pos);
                                if (!eve)
                                        continue;
 
-                               xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+                               xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
                        }
                }
 
-               /* Un-register for Node-Id/Event combination */
-               ret = zynqmp_pm_register_notifier(node_id, event, false, false);
-               if (ret) {
-                       pr_err("%s() failed for 0x%x and 0x%x: %d\n",
-                              __func__, node_id, event, ret);
-                       return ret;
+               /* Un-register if list is empty */
+               if (is_need_to_unregister) {
+                       /* Un-register for Node-Id/Event combination */
+                       ret = zynqmp_pm_register_notifier(node_id, event, false, false);
+                       if (ret) {
+                               pr_err("%s() failed for 0x%x and 0x%x: %d\n",
+                                      __func__, node_id, event, ret);
+                               return ret;
+                       }
                }
        }
 
@@ -338,12 +423,16 @@ static void xlnx_call_suspend_cb_handler(const u32 *payload)
        bool is_callback_found = false;
        struct registered_event_data *eve_data;
        u32 cb_type = payload[0];
+       struct agent_cb *cb_pos;
+       struct agent_cb *cb_next;
 
        /* Check for existing entry in hash table for given cb_type */
        hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
                if (eve_data->cb_type == cb_type) {
-                       eve_data->eve_cb(&payload[0], eve_data->agent_data);
-                       is_callback_found = true;
+                       list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+                               cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+                               is_callback_found = true;
+                       }
                }
        }
        if (!is_callback_found)
@@ -356,12 +445,16 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
        struct registered_event_data *eve_data;
        u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
        int ret;
+       struct agent_cb *cb_pos;
+       struct agent_cb *cb_next;
 
        /* Check for existing entry in hash table for given key id */
        hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
                if (eve_data->key == key) {
-                       eve_data->eve_cb(&payload[0], eve_data->agent_data);
-                       is_callback_found = true;
+                       list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+                               cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+                               is_callback_found = true;
+                       }
 
                        /* re register with firmware to get future events */
                        ret = zynqmp_pm_register_notifier(payload[1], payload[2],
@@ -369,9 +462,13 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
                        if (ret) {
                                pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
                                       payload[1], payload[2], ret);
-                               /* Remove already registered event from hash table */
-                               xlnx_remove_cb_for_notify_event(payload[1], payload[2],
-                                                               eve_data->eve_cb);
+                               list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
+                                                        list) {
+                                       /* Remove already registered event from hash table */
+                                       xlnx_remove_cb_for_notify_event(payload[1], payload[2],
+                                                                       cb_pos->eve_cb,
+                                                                       cb_pos->agent_data);
+                               }
                        }
                }
        }
@@ -572,8 +669,14 @@ static int xlnx_event_manager_remove(struct platform_device *pdev)
        struct registered_event_data *eve_data;
        struct hlist_node *tmp;
        int ret;
+       struct agent_cb *cb_pos;
+       struct agent_cb *cb_next;
 
        hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
+               list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+                       list_del_init(&cb_pos->list);
+                       kfree(cb_pos);
+               }
                hash_del(&eve_data->hentry);
                kfree(eve_data);
        }
index 859dd31..78a8a75 100644 (file)
@@ -208,7 +208,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
                                                           GFP_KERNEL);
                if (!zynqmp_pm_init_suspend_work) {
                        xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
-                                             suspend_event_callback);
+                                             suspend_event_callback, NULL);
                        return -ENOMEM;
                }
                event_registered = true;
@@ -263,7 +263,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
        ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
        if (ret) {
                if (event_registered) {
-                       xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+                       xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback,
+                                             NULL);
                        event_registered = false;
                }
                dev_err(&pdev->dev, "unable to create sysfs interface\n");
@@ -277,7 +278,7 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
 {
        sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
        if (event_registered)
-               xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+               xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL);
 
        if (!rx_chan)
                mbox_free_channel(rx_chan);
index 354d3f8..a2bfb04 100644 (file)
@@ -536,11 +536,9 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
 {
        int ret;
 
-       ret = pm_runtime_get_sync(&slave->dev);
-       if (ret < 0 && ret != -EACCES) {
-               pm_runtime_put_noidle(&slave->dev);
+       ret = pm_runtime_resume_and_get(&slave->dev);
+       if (ret < 0 && ret != -EACCES)
                return ret;
-       }
 
        ret = sdw_nread_no_pm(slave, addr, count, val);
 
@@ -562,11 +560,9 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
 {
        int ret;
 
-       ret = pm_runtime_get_sync(&slave->dev);
-       if (ret < 0 && ret != -EACCES) {
-               pm_runtime_put_noidle(&slave->dev);
+       ret = pm_runtime_resume_and_get(&slave->dev);
+       if (ret < 0 && ret != -EACCES)
                return ret;
-       }
 
        ret = sdw_nwrite_no_pm(slave, addr, count, val);
 
@@ -1506,10 +1502,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
 
        sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
 
-       ret = pm_runtime_get_sync(&slave->dev);
+       ret = pm_runtime_resume_and_get(&slave->dev);
        if (ret < 0 && ret != -EACCES) {
                dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
-               pm_runtime_put_noidle(&slave->dev);
                return ret;
        }
 
@@ -1838,6 +1833,18 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
                                __func__, slave->dev_num);
 
                        complete(&slave->initialization_complete);
+
+                       /*
+                        * If the manager became pm_runtime active, the peripherals will be
+                        * restarted and attach, but their pm_runtime status may remain
+                        * suspended. If the 'update_slave_status' callback initiates
+                        * any sort of deferred processing, this processing would not be
+                        * cancelled on pm_runtime suspend.
+                        * To avoid such zombie states, we queue a request to resume.
+                        * This would be a no-op in case the peripheral was being resumed
+                        * by e.g. the ALSA/ASoC framework.
+                        */
+                       pm_request_resume(&slave->dev);
                }
        }
 
index 558390a..4fbb195 100644 (file)
@@ -386,12 +386,11 @@ static int cdns_parity_error_injection(void *data, u64 value)
         * Resume Master device. If this results in a bus reset, the
         * Slave devices will re-attach and be re-enumerated.
         */
-       ret = pm_runtime_get_sync(bus->dev);
+       ret = pm_runtime_resume_and_get(bus->dev);
        if (ret < 0 && ret != -EACCES) {
                dev_err_ratelimited(cdns->dev,
-                                   "pm_runtime_get_sync failed in %s, ret %d\n",
+                                   "pm_runtime_resume_and_get failed in %s, ret %d\n",
                                    __func__, ret);
-               pm_runtime_put_noidle(bus->dev);
                return ret;
        }
 
@@ -959,6 +958,8 @@ static void cdns_update_slave_status_work(struct work_struct *work)
                container_of(work, struct sdw_cdns, work);
        u32 slave0, slave1;
        u64 slave_intstat;
+       u32 device0_status;
+       int retry_count = 0;
 
        slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
        slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
@@ -968,10 +969,45 @@ static void cdns_update_slave_status_work(struct work_struct *work)
 
        dev_dbg_ratelimited(cdns->dev, "Slave status change: 0x%llx\n", slave_intstat);
 
+update_status:
        cdns_update_slave_status(cdns, slave_intstat);
        cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
        cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
 
+       /*
+        * When there is more than one peripheral per link, it's
+        * possible that a deviceB becomes attached after we deal with
+        * the attachment of deviceA. Since the hardware does a
+        * logical AND, the attachment of the second device does not
+        * change the status seen by the driver.
+        *
+        * In that case, clearing the registers above would result in
+        * the deviceB never being detected - until a change of status
+        * is observed on the bus.
+        *
+        * To avoid this race condition, re-check if any device0 needs
+        * attention with PING commands. There is no need to check for
+        * ALERTS since they are not allowed until a non-zero
+        * device_number is assigned.
+        */
+
+       device0_status = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+       device0_status &= 3;
+
+       if (device0_status == SDW_SLAVE_ATTACHED) {
+               if (retry_count++ < SDW_MAX_DEVICES) {
+                       dev_dbg_ratelimited(cdns->dev,
+                                           "Device0 detected after clearing status, iteration %d\n",
+                                           retry_count);
+                       slave_intstat = CDNS_MCP_SLAVE_INTSTAT_ATTACHED;
+                       goto update_status;
+               } else {
+                       dev_err_ratelimited(cdns->dev,
+                                           "Device0 detected after %d iterations\n",
+                                           retry_count);
+               }
+       }
+
        /* clear and unmask Slave interrupt now */
        cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
        cdns_updatel(cdns, CDNS_MCP_INTMASK,
index 63101f1..505c5ef 100644 (file)
@@ -799,12 +799,11 @@ static int intel_startup(struct snd_pcm_substream *substream,
        struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
        int ret;
 
-       ret = pm_runtime_get_sync(cdns->dev);
+       ret = pm_runtime_resume_and_get(cdns->dev);
        if (ret < 0 && ret != -EACCES) {
                dev_err_ratelimited(cdns->dev,
-                                   "pm_runtime_get_sync failed in %s, ret %d\n",
+                                   "pm_runtime_resume_and_get failed in %s, ret %d\n",
                                    __func__, ret);
-               pm_runtime_put_noidle(cdns->dev);
                return ret;
        }
        return 0;
@@ -1293,6 +1292,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
        /* use generic bandwidth allocation algorithm */
        sdw->cdns.bus.compute_params = sdw_compute_params;
 
+       /* avoid resuming from pm_runtime suspend if it's not required */
+       dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
        ret = sdw_bus_master_add(bus, dev, dev->fwnode);
        if (ret) {
                dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
@@ -1828,6 +1830,9 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
                return 0;
        }
 
+       /* unconditionally disable WAKEEN interrupt */
+       intel_shim_wake(sdw, false);
+
        link_flags = md_flags >> (bus->link_id * 8);
        multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
 
index da1ad7e..22b7063 100644 (file)
 
 #define SWRM_SPECIAL_CMD_ID    0xF
 #define MAX_FREQ_NUM           1
-#define TIMEOUT_MS             (2 * HZ)
+#define TIMEOUT_MS             100
 #define QCOM_SWRM_MAX_RD_LEN   0x1
 #define QCOM_SDW_MAX_PORTS     14
 #define DEFAULT_CLK_FREQ       9600000
@@ -510,12 +510,12 @@ static irqreturn_t qcom_swrm_wake_irq_handler(int irq, void *dev_id)
        struct qcom_swrm_ctrl *swrm = dev_id;
        int ret;
 
-       ret = pm_runtime_get_sync(swrm->dev);
+       ret = pm_runtime_resume_and_get(swrm->dev);
        if (ret < 0 && ret != -EACCES) {
                dev_err_ratelimited(swrm->dev,
-                                   "pm_runtime_get_sync failed in %s, ret %d\n",
+                                   "pm_runtime_resume_and_get failed in %s, ret %d\n",
                                    __func__, ret);
-               pm_runtime_put_noidle(swrm->dev);
+               return ret;
        }
 
        if (swrm->wake_irq > 0) {
@@ -1058,12 +1058,11 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
        struct snd_soc_dai *codec_dai;
        int ret, i;
 
-       ret = pm_runtime_get_sync(ctrl->dev);
+       ret = pm_runtime_resume_and_get(ctrl->dev);
        if (ret < 0 && ret != -EACCES) {
                dev_err_ratelimited(ctrl->dev,
-                                   "pm_runtime_get_sync failed in %s, ret %d\n",
+                                   "pm_runtime_resume_and_get failed in %s, ret %d\n",
                                    __func__, ret);
-               pm_runtime_put_noidle(ctrl->dev);
                return ret;
        }
 
@@ -1252,12 +1251,12 @@ static int swrm_reg_show(struct seq_file *s_file, void *data)
        struct qcom_swrm_ctrl *swrm = s_file->private;
        int reg, reg_val, ret;
 
-       ret = pm_runtime_get_sync(swrm->dev);
+       ret = pm_runtime_resume_and_get(swrm->dev);
        if (ret < 0 && ret != -EACCES) {
                dev_err_ratelimited(swrm->dev,
-                                   "pm_runtime_get_sync failed in %s, ret %d\n",
+                                   "pm_runtime_resume_and_get failed in %s, ret %d\n",
                                    __func__, ret);
-               pm_runtime_put_noidle(swrm->dev);
+               return ret;
        }
 
        for (reg = 0; reg <= SWR_MSTR_MAX_REG_ADDR; reg += 4) {
@@ -1452,7 +1451,7 @@ static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *swrm)
        } while (retry--);
 
        dev_err(swrm->dev, "%s: link status not %s\n", __func__,
-               comp_sts && SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
+               comp_sts & SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
 
        return false;
 }
@@ -1549,6 +1548,7 @@ static const struct dev_pm_ops swrm_dev_pm_ops = {
 static const struct of_device_id qcom_swrm_of_match[] = {
        { .compatible = "qcom,soundwire-v1.3.0", .data = &swrm_v1_3_data },
        { .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data },
+       { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_5_data },
        {/* sentinel */},
 };
 
index f273459..d341505 100644 (file)
@@ -822,6 +822,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
                } else if (multi_link) {
                        dev_err(bus->dev,
                                "Post bank switch ops not implemented\n");
+                       ret = -EINVAL;
                        goto error;
                }
 
index d403a7a..72ab066 100644 (file)
@@ -319,12 +319,12 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
 
                        end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
                        do {
+                               if (time_after(jiffies, end))
+                                       return -ETIMEDOUT;
+
                                rc = fsi_spi_status(ctx, &status, "TX");
                                if (rc)
                                        return rc;
-
-                               if (time_after(jiffies, end))
-                                       return -ETIMEDOUT;
                        } while (status & SPI_FSI_STATUS_TDR_FULL);
 
                        sent += nb;
@@ -337,12 +337,12 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
                while (transfer->len > recv) {
                        end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
                        do {
+                               if (time_after(jiffies, end))
+                                       return -ETIMEDOUT;
+
                                rc = fsi_spi_status(ctx, &status, "RX");
                                if (rc)
                                        return rc;
-
-                               if (time_after(jiffies, end))
-                                       return -ETIMEDOUT;
                        } while (!(status & SPI_FSI_STATUS_RDR_FULL));
 
                        rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
index fe252a8..ea09d1b 100644 (file)
@@ -71,29 +71,11 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct spi_device *spi = to_spi_device(dev);
-       const char *end = memchr(buf, '\n', count);
-       const size_t len = end ? end - buf : count;
-       const char *driver_override, *old;
-
-       /* We need to keep extra room for a newline when displaying value */
-       if (len >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, len, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
+       int ret;
 
-       device_lock(dev);
-       old = spi->driver_override;
-       if (len) {
-               spi->driver_override = driver_override;
-       } else {
-               /* Empty string, disable driver override */
-               spi->driver_override = NULL;
-               kfree(driver_override);
-       }
-       device_unlock(dev);
-       kfree(old);
+       ret = driver_set_override(dev, &spi->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
@@ -1672,7 +1654,8 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
        ret = ctlr->transfer_one_message(ctlr, msg);
        if (ret) {
                dev_err(&ctlr->dev,
-                       "failed to transfer one message from queue\n");
+                       "failed to transfer one message from queue: %d\n",
+                       ret);
                goto out;
        }
 
index fc27473..0a993c4 100644 (file)
@@ -64,8 +64,6 @@ source "drivers/staging/gdm724x/Kconfig"
 
 source "drivers/staging/fwserial/Kconfig"
 
-source "drivers/staging/unisys/Kconfig"
-
 source "drivers/staging/clocking-wizard/Kconfig"
 
 source "drivers/staging/fbtft/Kconfig"
@@ -86,5 +84,6 @@ source "drivers/staging/fieldbus/Kconfig"
 
 source "drivers/staging/qlge/Kconfig"
 
+source "drivers/staging/vme_user/Kconfig"
 
 endif # STAGING
index 65e3179..2800ab9 100644 (file)
@@ -14,7 +14,7 @@ obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
 obj-$(CONFIG_OCTEON_USB)       += octeon-usb/
 obj-$(CONFIG_VT6655)           += vt6655/
 obj-$(CONFIG_VT6656)           += vt6656/
-obj-$(CONFIG_VME_BUS)          += vme/
+obj-$(CONFIG_VME_BUS)          += vme_user/
 obj-$(CONFIG_IIO)              += iio/
 obj-$(CONFIG_FB_SM750)         += sm750fb/
 obj-$(CONFIG_USB_EMXX)         += emxx_udc/
@@ -22,7 +22,6 @@ obj-$(CONFIG_MFD_NVEC)                += nvec/
 obj-$(CONFIG_STAGING_BOARD)    += board/
 obj-$(CONFIG_LTE_GDM724X)      += gdm724x/
 obj-$(CONFIG_FIREWIRE_SERIAL)  += fwserial/
-obj-$(CONFIG_UNISYSSPAR)       += unisys/
 obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD)  += clocking-wizard/
 obj-$(CONFIG_FB_TFT)           += fbtft/
 obj-$(CONFIG_MOST)             += most/
index a344410..cd86b9c 100644 (file)
@@ -1384,7 +1384,7 @@ anybuss_host_common_probe(struct device *dev,
                goto err_device;
        return cd;
 err_device:
-       device_unregister(&cd->client->dev);
+       put_device(&cd->client->dev);
 err_kthread:
        kthread_stop(cd->qthread);
 err_reset:
index bbf3ba7..45afa20 100644 (file)
@@ -445,7 +445,7 @@ static int __maybe_unused arche_apb_ctrl_suspend(struct device *dev)
 static int __maybe_unused arche_apb_ctrl_resume(struct device *dev)
 {
        /*
-        * Atleast for ES2 we have to meet the delay requirement between
+        * At least for ES2 we have to meet the delay requirement between
         * unipro switch and AP bridge init, depending on whether bridge is in
         * OFF state or standby state.
         *
index e374dfc..fcbd5f7 100644 (file)
@@ -591,7 +591,7 @@ static __maybe_unused int arche_platform_suspend(struct device *dev)
 static __maybe_unused int arche_platform_resume(struct device *dev)
 {
        /*
-        * Atleast for ES2 we have to meet the delay requirement between
+        * At least for ES2 we have to meet the delay requirement between
         * unipro switch and AP bridge init, depending on whether bridge is in
         * OFF state or standby state.
         *
index db0b600..0ad8aea 100644 (file)
@@ -497,7 +497,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
                           struct snd_soc_dai *dai)
 {
        int ret;
-       struct gbaudio_module_info *module;
+       struct gbaudio_module_info *module = NULL, *iter;
        struct gbaudio_data_connection *data;
        struct gb_bundle *bundle;
        struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
@@ -511,11 +511,13 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
                return -ENODEV;
        }
 
-       list_for_each_entry(module, &codec->module_list, list) {
+       list_for_each_entry(iter, &codec->module_list, list) {
                /* find the dai */
-               data = find_data(module, dai->id);
-               if (data)
+               data = find_data(iter, dai->id);
+               if (data) {
+                       module = iter;
                        break;
+               }
        }
        if (!data) {
                dev_err(dai->dev, "DATA connection missing\n");
@@ -563,7 +565,7 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
 {
        int ret;
        struct gbaudio_data_connection *data;
-       struct gbaudio_module_info *module;
+       struct gbaudio_module_info *module = NULL, *iter;
        struct gb_bundle *bundle;
        struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
        struct gbaudio_stream_params *params;
@@ -592,15 +594,17 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                return ret;
        }
 
-       list_for_each_entry(module, &codec->module_list, list) {
+       list_for_each_entry(iter, &codec->module_list, list) {
                /* find the dai */
-               data = find_data(module, dai->id);
-               if (data)
+               data = find_data(iter, dai->id);
+               if (data) {
+                       module = iter;
                        break;
+               }
        }
        if (!data) {
-               dev_err(dai->dev, "%s:%s DATA connection missing\n",
-                       dai->name, module->name);
+               dev_err(dai->dev, "%s DATA connection missing\n",
+                       dai->name);
                mutex_unlock(&codec->lock);
                return -ENODEV;
        }
@@ -1027,12 +1031,6 @@ static int gbcodec_probe(struct snd_soc_component *comp)
        return 0;
 }
 
-static void gbcodec_remove(struct snd_soc_component *comp)
-{
-       /* Empty function for now */
-       return;
-}
-
 static int gbcodec_write(struct snd_soc_component *comp, unsigned int reg,
                         unsigned int value)
 {
@@ -1047,8 +1045,6 @@ static unsigned int gbcodec_read(struct snd_soc_component *comp,
 
 static const struct snd_soc_component_driver soc_codec_dev_gbaudio = {
        .probe  = gbcodec_probe,
-       .remove = gbcodec_remove,
-
        .read = gbcodec_read,
        .write = gbcodec_write,
 };
index ad20ec2..3fda172 100644 (file)
@@ -297,7 +297,6 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
 
        pwm->dev = &gbphy_dev->dev;
        pwm->ops = &gb_pwm_ops;
-       pwm->base = -1;                 /* Allocate base dynamically */
        pwm->npwm = pwmc->pwm_max + 1;
 
        ret = pwmchip_add(pwm);
index 867bf28..4c42e39 100644 (file)
@@ -533,7 +533,7 @@ static int log_results(struct loopback_test *t)
 
                fd = open(file_name, O_WRONLY | O_CREAT | O_APPEND, 0644);
                if (fd < 0) {
-                       fprintf(stderr, "unable to open %s for appendation\n", file_name);
+                       fprintf(stderr, "unable to open %s for appending\n", file_name);
                        abort();
                }
 
index 71c7097..52b8957 100644 (file)
@@ -290,7 +290,7 @@ static inline ssize_t ad7746_start_calib(struct device *dev,
        int ret, timeout = 10;
        bool doit;
 
-       ret = strtobool(buf, &doit);
+       ret = kstrtobool(buf, &doit);
        if (ret < 0)
                return ret;
 
index 793918e..f177b20 100644 (file)
@@ -749,7 +749,6 @@ static int ad5933_probe(struct i2c_client *client,
        indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
 
        ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
-                                         INDIO_BUFFER_SOFTWARE,
                                          &ad5933_ring_setup_ops);
        if (ret)
                return ret;
index 74adb82..c0b2716 100644 (file)
@@ -499,7 +499,6 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
                ret = IIO_VAL_INT;
                break;
        case IIO_ANGL_VEL:
-               negative = st->rx[0] & 0x80;
                vel = be16_to_cpup((__be16 *)st->rx);
                vel >>= 16 - st->resolution;
                if (vel & 0x8000) {
index 1c63d59..9429ee1 100644 (file)
@@ -84,10 +84,6 @@ static void ks_wlan_hw_wakeup_task(struct work_struct *work)
                        return;
                }
        }
-
-       /* power save */
-       if (atomic_read(&priv->sme_task.count) > 0)
-               tasklet_enable(&priv->sme_task);
 }
 
 static void ks_wlan_do_power_save(struct ks_wlan_private *priv)
@@ -2200,10 +2196,11 @@ static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
        }
 }
 
-static
-void hostif_sme_task(struct tasklet_struct *t)
+static void hostif_sme_work(struct work_struct *work)
 {
-       struct ks_wlan_private *priv = from_tasklet(priv, t, sme_task);
+       struct ks_wlan_private *priv;
+
+       priv = container_of(work, struct ks_wlan_private, sme_work);
 
        if (priv->dev_state < DEVICE_STATE_BOOT)
                return;
@@ -2214,7 +2211,7 @@ void hostif_sme_task(struct tasklet_struct *t)
        hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]);
        inc_smeqhead(priv);
        if (cnt_smeqbody(priv) > 0)
-               tasklet_schedule(&priv->sme_task);
+               schedule_work(&priv->sme_work);
 }
 
 /* send to Station Management Entity module */
@@ -2229,7 +2226,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event)
                netdev_err(priv->net_dev, "sme queue buffer overflow\n");
        }
 
-       tasklet_schedule(&priv->sme_task);
+       schedule_work(&priv->sme_work);
 }
 
 static inline void hostif_aplist_init(struct ks_wlan_private *priv)
@@ -2254,7 +2251,7 @@ static inline void hostif_sme_init(struct ks_wlan_private *priv)
        priv->sme_i.qtail = 0;
        spin_lock_init(&priv->sme_i.sme_spin);
        priv->sme_i.sme_flag = 0;
-       tasklet_setup(&priv->sme_task, hostif_sme_task);
+       INIT_WORK(&priv->sme_work, hostif_sme_work);
 }
 
 static inline void hostif_wpa_init(struct ks_wlan_private *priv)
@@ -2312,5 +2309,5 @@ int hostif_init(struct ks_wlan_private *priv)
 
 void hostif_exit(struct ks_wlan_private *priv)
 {
-       tasklet_kill(&priv->sme_task);
+       cancel_work_sync(&priv->sme_work);
 }
index 7aaf8d7..3e9a91b 100644 (file)
@@ -449,7 +449,7 @@ struct ks_wlan_private {
        struct sme_info sme_i;
        u8 *rxp;
        unsigned int rx_size;
-       struct tasklet_struct sme_task;
+       struct work_struct sme_work;
        struct work_struct wakeup_work;
        int scan_ind_count;
 
index 29f8ce2..97dff82 100644 (file)
@@ -45,9 +45,6 @@ MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a powe
 
 static DEFINE_SPINLOCK(dim_lock);
 
-static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
-
 /**
  * struct hdm_channel - private structure to keep channel specific data
  * @name: channel name
@@ -361,15 +358,9 @@ static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
        return IRQ_HANDLED;
 }
 
-/**
- * dim2_tasklet_fn - tasklet function
- * @data: private data
- *
- * Service each initialized channel, if needed
- */
-static void dim2_tasklet_fn(unsigned long data)
+static irqreturn_t dim2_task_irq(int irq, void *_dev)
 {
-       struct dim2_hdm *dev = (struct dim2_hdm *)data;
+       struct dim2_hdm *dev = _dev;
        unsigned long flags;
        int ch_idx;
 
@@ -385,6 +376,8 @@ static void dim2_tasklet_fn(unsigned long data)
                while (!try_start_dim_transfer(dev->hch + ch_idx))
                        continue;
        }
+
+       return IRQ_HANDLED;
 }
 
 /**
@@ -392,8 +385,8 @@ static void dim2_tasklet_fn(unsigned long data)
  * @irq: irq number
  * @_dev: private data
  *
- * Acknowledge the interrupt and schedule a tasklet to service channels.
- * Return IRQ_HANDLED.
+ * Acknowledge the interrupt and service each initialized channel,
+ * if needed, in task context.
  */
 static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
 {
@@ -405,9 +398,7 @@ static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
        dim_service_ahb_int_irq(get_active_channels(dev, buffer));
        spin_unlock_irqrestore(&dim_lock, flags);
 
-       dim2_tasklet.data = (unsigned long)dev;
-       tasklet_schedule(&dim2_tasklet);
-       return IRQ_HANDLED;
+       return IRQ_WAKE_THREAD;
 }
 
 /**
@@ -654,14 +645,12 @@ static int poison_channel(struct most_interface *most_iface, int ch_idx)
        if (!hdm_ch->is_initialized)
                return -EPERM;
 
-       tasklet_disable(&dim2_tasklet);
        spin_lock_irqsave(&dim_lock, flags);
        hal_ret = dim_destroy_channel(&hdm_ch->ch);
        hdm_ch->is_initialized = false;
        if (ch_idx == dev->atx_idx)
                dev->atx_idx = -1;
        spin_unlock_irqrestore(&dim_lock, flags);
-       tasklet_enable(&dim2_tasklet);
        if (hal_ret != DIM_NO_ERROR) {
                pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
                ret = -EFAULT;
@@ -821,8 +810,8 @@ static int dim2_probe(struct platform_device *pdev)
                goto err_shutdown_dim;
        }
 
-       ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
-                              "dim2_ahb0_int", dev);
+       ret = devm_request_threaded_irq(&pdev->dev, irq, dim2_ahb_isr,
+                                       dim2_task_irq, 0, "dim2_ahb0_int", dev);
        if (ret) {
                dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
                goto err_shutdown_dim;
index 55e0ad7..d0dd659 100644 (file)
@@ -2072,6 +2072,7 @@ struct qlge_adapter *netdev_to_qdev(struct net_device *ndev)
 
        return ndev_priv->qdev;
 }
+
 /*
  * The main Adapter structure definition.
  * This structure has all fields relevant to the hardware.
index 2ff78ed..ac6effb 100644 (file)
@@ -188,7 +188,6 @@ void        expire_timeout_chk(struct adapter *padapter)
                                spin_lock_bh(&pstapriv->auth_list_lock);
                        }
                }
-
        }
        spin_unlock_bh(&pstapriv->auth_list_lock);
 
@@ -381,7 +380,6 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
                /* set ra_id, init_rate */
                psta->raid = raid;
                psta->init_rate = init_rate;
-
        }
 }
 
@@ -455,7 +453,6 @@ void update_bmc_sta(struct adapter *padapter)
                spin_lock_bh(&psta->lock);
                psta->state = _FW_LINKED;
                spin_unlock_bh(&psta->lock);
-
        }
 }
 
index f056204..bca20fe 100644 (file)
@@ -53,7 +53,8 @@ static unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned shor
        unsigned char *cur_ptr, *start_ptr;
        unsigned short tagLen, tagType;
 
-       start_ptr = cur_ptr = (unsigned char *)ph->tag;
+       start_ptr = (unsigned char *)ph->tag;
+       cur_ptr = (unsigned char *)ph->tag;
        while ((cur_ptr - start_ptr) < ntohs(ph->length)) {
                /*  prevent un-alignment access */
                tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]);
@@ -87,19 +88,19 @@ static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
        int tail_len;
        unsigned long end, tail;
 
-       if ((src+len) > skb_tail_pointer(skb) || skb->len < len)
+       if ((src + len) > skb_tail_pointer(skb) || skb->len < len)
                return -1;
 
        tail = (unsigned long)skb_tail_pointer(skb);
-       end = (unsigned long)src+len;
+       end = (unsigned long)src + len;
        if (tail < end)
                return -1;
 
-       tail_len = (int)(tail-end);
+       tail_len = (int)(tail - end);
        if (tail_len > 0)
-               memmove(src, src+len, tail_len);
+               memmove(src, src + len, tail_len);
 
-       skb_trim(skb, skb->len-len);
+       skb_trim(skb, skb->len - len);
        return 0;
 }
 
@@ -117,7 +118,7 @@ static void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr,
        memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
 
        networkAddr[0] = NAT25_IPV4;
-       memcpy(networkAddr+7, (unsigned char *)ipAddr, 4);
+       memcpy(networkAddr + 7, (unsigned char *)ipAddr, 4);
 }
 
 static void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
@@ -126,8 +127,8 @@ static void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
        memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
 
        networkAddr[0] = NAT25_PPPOE;
-       memcpy(networkAddr+1, (unsigned char *)sid, 2);
-       memcpy(networkAddr+3, (unsigned char *)ac_mac, 6);
+       memcpy(networkAddr + 1, (unsigned char *)sid, 2);
+       memcpy(networkAddr + 3, (unsigned char *)ac_mac, 6);
 }
 
 static  void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
@@ -136,17 +137,17 @@ static  void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
        memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
 
        networkAddr[0] = NAT25_IPV6;
-       memcpy(networkAddr+1, (unsigned char *)ipAddr, 16);
+       memcpy(networkAddr + 1, (unsigned char *)ipAddr, 16);
 }
 
 static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b)
 {
        while (len > 0) {
-               if (*data == tag && *(data+1) == len8b && len >= len8b*8)
-                       return data+2;
+               if (*data == tag && *(data + 1) == len8b && len >= len8b * 8)
+                       return data + 2;
 
-               len -= (*(data+1))*8;
-               data += (*(data+1))*8;
+               len -= (*(data + 1)) * 8;
+               data += (*(data + 1)) * 8;
        }
        return NULL;
 }
@@ -158,7 +159,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
 
        if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) {
                if (len >= 8) {
-                       mac = scan_tlv(&data[8], len-8, 1, 1);
+                       mac = scan_tlv(&data[8], len - 8, 1, 1);
                        if (mac) {
                                memcpy(mac, replace_mac, 6);
                                return 1;
@@ -166,7 +167,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
                }
        } else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
                if (len >= 16) {
-                       mac = scan_tlv(&data[16], len-16, 1, 1);
+                       mac = scan_tlv(&data[16], len - 16, 1, 1);
                        if (mac) {
                                memcpy(mac, replace_mac, 6);
                                return 1;
@@ -174,7 +175,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
                }
        } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
                if (len >= 24) {
-                       mac = scan_tlv(&data[24], len-24, 1, 1);
+                       mac = scan_tlv(&data[24], len - 24, 1, 1);
                        if (mac) {
                                memcpy(mac, replace_mac, 6);
                                return 1;
@@ -182,7 +183,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
                }
        } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
                if (len >= 24) {
-                       mac = scan_tlv(&data[24], len-24, 2, 1);
+                       mac = scan_tlv(&data[24], len - 24, 2, 1);
                        if (mac) {
                                memcpy(mac, replace_mac, 6);
                                return 1;
@@ -190,7 +191,7 @@ static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char
                }
        } else if (icmphdr->icmp6_type == NDISC_REDIRECT) {
                if (len >= 40) {
-                       mac = scan_tlv(&data[40], len-40, 2, 1);
+                       mac = scan_tlv(&data[40], len - 40, 2, 1);
                        if (mac) {
                                memcpy(mac, replace_mac, 6);
                                return 1;
@@ -313,6 +314,7 @@ void nat25_db_cleanup(struct adapter *priv)
 
        for (i = 0; i < NAT25_HASH_SIZE; i++) {
                struct nat25_network_db_entry *f;
+
                f = priv->nethash[i];
                while (f) {
                        struct nat25_network_db_entry *g;
@@ -339,12 +341,12 @@ void nat25_db_expire(struct adapter *priv)
 
        for (i = 0; i < NAT25_HASH_SIZE; i++) {
                struct nat25_network_db_entry *f;
-               f = priv->nethash[i];
 
+               f = priv->nethash[i];
                while (f) {
                        struct nat25_network_db_entry *g;
-                       g = f->next_hash;
 
+                       g = f->next_hash;
                        if (__nat25_has_expired(f)) {
                                if (atomic_dec_and_test(&f->use_count)) {
                                        if (priv->scdb_entry == f) {
@@ -396,7 +398,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                        tmp = be32_to_cpu(iph->saddr);
                        __nat25_generate_ipv4_network_addr(networkAddr, &tmp);
                        /* record source IP address and , source mac address into db */
-                       __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+                       __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
                        return 0;
                default:
                        return -1;
@@ -421,7 +423,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                        arp_ptr += arp->ar_hln;
                        sender = (unsigned int *)arp_ptr;
                        __nat25_generate_ipv4_network_addr(networkAddr, sender);
-                       __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+                       __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
                        return 0;
                default:
                        return -1;
@@ -432,7 +434,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                /*                Handle PPPoE frame                 */
                /*---------------------------------------------------*/
                struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
-               unsigned short *pMagic;
+               __be16 *pMagic;
 
                switch (method) {
                case NAT25_CHECK:
@@ -458,22 +460,22 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                                                            sizeof(tag_buf))
                                                                return -1;
 
-                                                       memcpy(tag->tag_data+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN,
+                                                       memcpy(tag->tag_data + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN,
                                                                pOldTag->tag_data, old_tag_len);
 
-                                                       if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN+old_tag_len) < 0)
+                                                       if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN + old_tag_len) < 0)
                                                                return -1;
 
-                                                       ph->length = htons(ntohs(ph->length)-TAG_HDR_LEN-old_tag_len);
+                                                       ph->length = htons(ntohs(ph->length) - TAG_HDR_LEN - old_tag_len);
                                                }
 
                                                tag->tag_type = PTT_RELAY_SID;
-                                               tag->tag_len = htons(MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN+old_tag_len);
+                                               tag->tag_len = htons(MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN + old_tag_len);
 
                                                /*  insert the magic_code+client mac in relay tag */
-                                               pMagic = (unsigned short *)tag->tag_data;
+                                               pMagic = (__be16 *)tag->tag_data;
                                                *pMagic = htons(MAGIC_CODE);
-                                               memcpy(tag->tag_data+MAGIC_CODE_LEN, skb->data+ETH_ALEN, ETH_ALEN);
+                                               memcpy(tag->tag_data + MAGIC_CODE_LEN, skb->data + ETH_ALEN, ETH_ALEN);
 
                                                /* Add relay tag */
                                                if (__nat25_add_pppoe_tag(skb, tag) < 0)
@@ -486,7 +488,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                                                        return -2;
 
                                                if (priv->pppoe_connection_in_progress == 0)
-                                                       memcpy(priv->pppoe_addr, skb->data+ETH_ALEN, ETH_ALEN);
+                                                       memcpy(priv->pppoe_addr, skb->data + ETH_ALEN, ETH_ALEN);
 
                                                priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
                                        }
@@ -496,11 +498,11 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                        } else {        /*  session phase */
                                __nat25_generate_pppoe_network_addr(networkAddr, skb->data, &ph->sid);
 
-                               __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+                               __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
 
                                if (!priv->ethBrExtInfo.addPPPoETag &&
                                    priv->pppoe_connection_in_progress &&
-                                   !memcmp(skb->data+ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
+                                   !memcmp(skb->data + ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
                                        priv->pppoe_connection_in_progress = 0;
                        }
                        return 0;
@@ -548,7 +550,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                case NAT25_INSERT:
                        if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
                                __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
-                               __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+                               __nat25_db_network_insert(priv, skb->data + ETH_ALEN, networkAddr);
 
                                if (iph->nexthdr == IPPROTO_ICMPV6 &&
                                                skb->len > (ETH_HLEN +  sizeof(*iph) + 4)) {
@@ -557,9 +559,11 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
                                                struct icmp6hdr  *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
                                                hdr->icmp6_cksum = 0;
                                                hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
-                                                                               iph->payload_len,
+                                                                               be16_to_cpu(iph->payload_len),
                                                                                IPPROTO_ICMPV6,
-                                                                               csum_partial((__u8 *)hdr, iph->payload_len, 0));
+                                                                               csum_partial((__u8 *)hdr,
+                                                                               be16_to_cpu(iph->payload_len),
+                                                                               0));
                                        }
                                }
                        }
index 6eca301..06523d9 100644 (file)
 #include "../include/rtw_mlme_ext.h"
 #include "../include/rtl8188e_dm.h"
 
-/*
-Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
-No irqsave is necessary.
-*/
+/* Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+ * No irqsave is necessary.
+ */
 
-static int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+static void c2h_wk_callback(struct work_struct *work);
+
+void rtw_free_evt_priv(struct  evt_priv *pevtpriv)
 {
-       int res = _SUCCESS;
+       cancel_work_sync(&pevtpriv->c2h_wk);
+       while (pevtpriv->c2h_wk_alive)
+               msleep(10);
+
+       while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
+               void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
+               if (c2h && c2h != (void *)pevtpriv)
+                       kfree(c2h);
+       }
+}
+
+/* Calling Context:
+ *
+ * rtw_enqueue_cmd can only be called between kernel thread,
+ * since only spin_lock is used.
+ *
+ * ISR/Call-Back functions can't call this sub-function.
+ */
+
+static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+{
+       unsigned long flags;
+
+       if (!obj)
+               goto exit;
+
+       spin_lock_irqsave(&queue->lock, flags);
+
+       list_add_tail(&obj->list, &queue->queue);
+
+       spin_unlock_irqrestore(&queue->lock, flags);
+
+exit:
+
+       return _SUCCESS;
+}
+
+u32    rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+       u32 res = _SUCCESS;
 
        init_completion(&pcmdpriv->enqueue_cmd);
        /* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
@@ -57,11 +97,9 @@ exit:
        return res;
 }
 
-static void c2h_wk_callback(struct work_struct *work);
-
-static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
 {
-       int res = _SUCCESS;
+       u32 res = _SUCCESS;
 
        /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
        atomic_set(&pevtpriv->event_seq, 0);
@@ -69,24 +107,13 @@ static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
        INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
        pevtpriv->c2h_wk_alive = false;
        pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN + 1);
+       if (!pevtpriv->c2h_queue)
+               res = _FAIL;
 
        return res;
 }
 
-void rtw_free_evt_priv(struct  evt_priv *pevtpriv)
-{
-       cancel_work_sync(&pevtpriv->c2h_wk);
-       while (pevtpriv->c2h_wk_alive)
-               msleep(10);
-
-       while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
-               void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
-               if (c2h && c2h != (void *)pevtpriv)
-                       kfree(c2h);
-       }
-}
-
-static void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+void rtw_free_cmd_priv(struct  cmd_priv *pcmdpriv)
 {
        if (pcmdpriv) {
                kfree(pcmdpriv->cmd_allocated_buf);
@@ -94,75 +121,6 @@ static void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
        }
 }
 
-/*
-Calling Context:
-
-rtw_enqueue_cmd can only be called between kernel thread,
-since only spin_lock is used.
-
-ISR/Call-Back functions can't call this sub-function.
-
-*/
-
-static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
-{
-       unsigned long flags;
-
-       if (!obj)
-               goto exit;
-
-       spin_lock_irqsave(&queue->lock, flags);
-
-       list_add_tail(&obj->list, &queue->queue);
-
-       spin_unlock_irqrestore(&queue->lock, flags);
-
-exit:
-
-       return _SUCCESS;
-}
-
-static struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
-{
-       struct cmd_obj *obj;
-       unsigned long flags;
-
-       spin_lock_irqsave(&queue->lock, flags);
-       if (list_empty(&queue->queue)) {
-               obj = NULL;
-       } else {
-               obj = container_of((&queue->queue)->next, struct cmd_obj, list);
-               list_del_init(&obj->list);
-       }
-
-       spin_unlock_irqrestore(&queue->lock, flags);
-
-       return obj;
-}
-
-u32    rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
-{
-       u32     res;
-
-       res = _rtw_init_cmd_priv(pcmdpriv);
-
-       return res;
-}
-
-u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
-{
-       int     res;
-
-       res = _rtw_init_evt_priv(pevtpriv);
-
-       return res;
-}
-
-void rtw_free_cmd_priv(struct  cmd_priv *pcmdpriv)
-{
-       _rtw_free_cmd_priv(pcmdpriv);
-}
-
 static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
 {
        u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
@@ -187,7 +145,7 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
        cmd_obj->padapter = padapter;
 
        res = rtw_cmd_filter(pcmdpriv, cmd_obj);
-       if (_FAIL == res) {
+       if (res == _FAIL) {
                rtw_free_cmd_obj(cmd_obj);
                goto exit;
        }
@@ -204,11 +162,21 @@ exit:
 
 struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
 {
-       struct cmd_obj *cmd_obj;
+       struct cmd_obj *obj;
+       struct __queue *queue = &pcmdpriv->cmd_queue;
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->lock, flags);
+       if (list_empty(&queue->queue)) {
+               obj = NULL;
+       } else {
+               obj = container_of((&queue->queue)->next, struct cmd_obj, list);
+               list_del_init(&obj->list);
+       }
 
-       cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
+       spin_unlock_irqrestore(&queue->lock, flags);
 
-       return cmd_obj;
+       return obj;
 }
 
 void rtw_free_cmd_obj(struct cmd_obj *pcmd)
@@ -258,12 +226,12 @@ _next:
                if (!pcmd)
                        continue;
 
-               if (_FAIL == rtw_cmd_filter(pcmdpriv, pcmd)) {
+               if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) {
                        pcmd->res = H2C_DROPPED;
                        goto post_process;
                }
 
-               pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
+               pcmd->cmdsz = round_up(pcmd->cmdsz, 4);
 
                memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
 
@@ -291,7 +259,7 @@ post_process:
                                rtw_free_cmd_obj(pcmd);
                        else
                                /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
-                               pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+                               pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
                } else {
                        rtw_free_cmd_obj(pcmd);
                }
@@ -316,11 +284,10 @@ post_process:
        return 0;
 }
 
-/*
-rtw_sitesurvey_cmd(~)
-       ### NOTE:#### (!!!!)
-       MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
-*/
+/* rtw_sitesurvey_cmd(~)
+ *     ### NOTE:#### (!!!!)
+ *     MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
 u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
        struct rtw_ieee80211_channel *ch, int ch_num)
 {
@@ -330,19 +297,17 @@ u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid,
        struct cmd_priv         *pcmdpriv = &padapter->cmdpriv;
        struct mlme_priv        *pmlmepriv = &padapter->mlmepriv;
 
-       if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+       if (check_fwstate(pmlmepriv, _FW_LINKED))
                rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
-       }
 
-       if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+       if (check_fwstate(pmlmepriv, _FW_LINKED))
                p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
-       }
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c)
                return _FAIL;
 
-       psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+       psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
        if (!psurveyPara) {
                kfree(ph2c);
                return _FAIL;
@@ -403,13 +368,13 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       pbsetdataratepara = kzalloc(sizeof(struct setdatarate_parm), GFP_ATOMIC);
+       pbsetdataratepara = kzalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
        if (!pbsetdataratepara) {
                kfree(ph2c);
                res = _FAIL;
@@ -442,7 +407,7 @@ u8 rtw_createbss_cmd(struct adapter  *padapter)
 
        rtw_led_control(padapter, LED_CTL_START_TO_LINK);
 
-       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
        if (!pcmd) {
                res = _FAIL;
                goto exit;
@@ -479,7 +444,7 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 
        rtw_led_control(padapter, LED_CTL_START_TO_LINK);
 
-       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
        if (!pcmd) {
                res = _FAIL;
                goto exit;
@@ -516,15 +481,14 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 
        psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
 
-       if (psecnetwork->IELength - 12 < 255) {
+       if (psecnetwork->IELength - 12 < 255)
                memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength - 12);
-       } else {
+       else
                memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], 255);
-       }
 
        psecnetwork->IELength = 0;
        /*  Added by Albert 2009/02/18 */
-       /*  If the the driver wants to use the bssid to create the connection. */
+       /*  If the driver wants to use the bssid to create the connection. */
        /*  If not,  we have to copy the connecting AP's MAC address to it so that */
        /*  the driver just has the bssid information for PMKIDList searching. */
 
@@ -550,9 +514,9 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 
        phtpriv->ht_option = false;
        if (pregistrypriv->ht_enable) {
-               /*      Added by Albert 2010/06/23 */
-               /*      For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
-               /*      Especially for Realtek 8192u SoftAP. */
+               /*      Added by Albert 2010/06/23 */
+               /*      For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
+               /*      Especially for Realtek 8192u SoftAP. */
                if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
                    (padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
                    (padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
@@ -611,7 +575,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
                res = rtw_enqueue_cmd(cmdpriv, cmdobj);
        } else {
                /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
-               if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
+               if (disconnect_hdl(padapter, (u8 *)param) != H2C_SUCCESS)
                        res = _FAIL;
                kfree(param);
        }
@@ -629,12 +593,12 @@ u8 rtw_setopmode_cmd(struct adapter  *padapter, enum ndis_802_11_network_infra n
        struct  cmd_priv   *pcmdpriv = &padapter->cmdpriv;
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
        if (!ph2c) {
                res = false;
                goto exit;
        }
-       psetop = kzalloc(sizeof(struct setopmode_parm), GFP_KERNEL);
+       psetop = kzalloc(sizeof(*psetop), GFP_KERNEL);
 
        if (!psetop) {
                kfree(ph2c);
@@ -664,20 +628,20 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
        struct sta_info *sta = (struct sta_info *)psta;
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
+       psetstakey_para = kzalloc(sizeof(*psetstakey_para), GFP_KERNEL);
        if (!psetstakey_para) {
                kfree(ph2c);
                res = _FAIL;
                goto exit;
        }
 
-       psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_KERNEL);
+       psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp), GFP_KERNEL);
        if (!psetstakey_rsp) {
                kfree(ph2c);
                kfree(psetstakey_para);
@@ -723,13 +687,13 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
        if (!enqueue) {
                clear_cam_entry(padapter, entry);
        } else {
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+               ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
                if (!ph2c) {
                        res = _FAIL;
                        goto exit;
                }
 
-               psetstakey_para = kzalloc(sizeof(struct set_stakey_parm),
+               psetstakey_para = kzalloc(sizeof(*psetstakey_para),
                                          GFP_ATOMIC);
                if (!psetstakey_para) {
                        kfree(ph2c);
@@ -737,7 +701,7 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
                        goto exit;
                }
 
-               psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp),
+               psetstakey_rsp = kzalloc(sizeof(*psetstakey_rsp),
                                         GFP_ATOMIC);
                if (!psetstakey_rsp) {
                        kfree(ph2c);
@@ -770,13 +734,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
        struct addBaReq_parm *paddbareq_parm;
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
+       paddbareq_parm = kzalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
        if (!paddbareq_parm) {
                kfree(ph2c);
                res = _FAIL;
@@ -803,13 +767,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+       pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
        if (!pdrvextra_cmd_parm) {
                kfree(ph2c);
                res = _FAIL;
@@ -844,7 +808,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan)
        }
 
        /* prepare cmd parameter */
-       setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param),
+       setChannelPlan_param = kzalloc(sizeof(*setChannelPlan_param),
                                       GFP_KERNEL);
        if (!setChannelPlan_param) {
                res = _FAIL;
@@ -853,7 +817,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan)
        setChannelPlan_param->channel_plan = chplan;
 
        /* need enqueue, prepare cmd_obj and enqueue */
-       pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmdobj = kzalloc(sizeof(*pcmdobj), GFP_KERNEL);
        if (!pcmdobj) {
                kfree(setChannelPlan_param);
                res = _FAIL;
@@ -983,12 +947,12 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
                mstatus = 1;/* connect */
                /*  Reset LPS Setting */
                padapter->pwrctrlpriv.LpsIdleCount = 0;
-               SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+               rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
                break;
        case LPS_CTRL_DISCONNECT:
                mstatus = 0;/* disconnect */
                LPS_Leave(padapter);
-               SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+               rtl8188e_set_FwJoinBssReport_cmd(padapter, mstatus);
                break;
        case LPS_CTRL_SPECIAL_PACKET:
                pwrpriv->DelayLPSLastTimeStamp = jiffies;
@@ -1012,16 +976,16 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
        u8      res = _SUCCESS;
 
        /* if (!pwrctrlpriv->bLeisurePs) */
-       /*      return res; */
+       /*      return res; */
 
        if (enqueue) {
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+               ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
                if (!ph2c) {
                        res = _FAIL;
                        goto exit;
                }
 
-               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+               pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
                                             GFP_ATOMIC);
                if (!pdrvextra_cmd_parm) {
                        kfree(ph2c);
@@ -1047,7 +1011,10 @@ exit:
 
 static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
 {
-       SetHwReg8188EU(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time));
+       struct hal_data_8188e *haldata = &padapter->haldata;
+       struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+       ODM_RA_Set_TxRPT_Time(odmpriv, min_time);
 }
 
 u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
@@ -1058,13 +1025,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
 
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+       pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
                                     GFP_ATOMIC);
        if (!pdrvextra_cmd_parm) {
                kfree(ph2c);
@@ -1084,7 +1051,20 @@ exit:
 
 static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna)
 {
-       SetHwReg8188EU(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna));
+       struct hal_data_8188e *haldata = &padapter->haldata;
+
+       /* switch current antenna to optimum antenna */
+       if (haldata->CurAntenna != antenna) {
+               ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, antenna == 2 ? MAIN_ANT : AUX_ANT);
+               haldata->CurAntenna = antenna;
+       }
+}
+
+static bool rtw_antenna_diversity(struct adapter *adapter)
+{
+       struct hal_data_8188e *haldata = &adapter->haldata;
+
+       return haldata->AntDivCfg != 0;
 }
 
 u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
@@ -1092,21 +1072,19 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
        struct cmd_obj          *ph2c;
        struct drvextra_cmd_parm        *pdrvextra_cmd_parm;
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-       u8      support_ant_div;
        u8      res = _SUCCESS;
 
-       GetHalDefVar8188EUsb(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
-       if (!support_ant_div)
+       if (!rtw_antenna_diversity(padapter))
                return res;
 
        if (enqueue) {
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+               ph2c = kzalloc(sizeof(*ph2c), GFP_KERNEL);
                if (!ph2c) {
                        res = _FAIL;
                        goto exit;
                }
 
-               pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
+               pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm),
                                             GFP_KERNEL);
                if (!pdrvextra_cmd_parm) {
                        kfree(ph2c);
@@ -1139,13 +1117,13 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
        if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
                return res;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+       pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
        if (!pdrvextra_cmd_parm) {
                kfree(ph2c);
                res = _FAIL;
@@ -1153,8 +1131,8 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
        }
 
        pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
-       pdrvextra_cmd_parm->type_size = intCmdType;     /*      As the command tppe. */
-       pdrvextra_cmd_parm->pbuf = NULL;                /*      Must be NULL here */
+       pdrvextra_cmd_parm->type_size = intCmdType;     /*      As the command type. */
+       pdrvextra_cmd_parm->pbuf = NULL;                /*      Must be NULL here */
 
        init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
 
@@ -1173,13 +1151,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
 
        u8      res = _SUCCESS;
 
-       ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ppscmd = kzalloc(sizeof(*ppscmd), GFP_ATOMIC);
        if (!ppscmd) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+       pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
        if (!pdrvextra_cmd_parm) {
                kfree(ppscmd);
                res = _FAIL;
@@ -1197,6 +1175,11 @@ exit:
        return res;
 }
 
+static bool rtw_is_hi_queue_empty(struct adapter *adapter)
+{
+       return (rtw_read32(adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0;
+}
+
 static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
 {
        int cnt = 0;
@@ -1208,12 +1191,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
                return;
 
        if (psta_bmc->sleepq_len == 0) {
-               u8 val = 0;
-
-               /* while ((rtw_read32(padapter, 0x414)&0x00ffff00)!= 0) */
-               /* while ((rtw_read32(padapter, 0x414)&0x0000ff00)!= 0) */
-
-               GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+               bool val = rtw_is_hi_queue_empty(padapter);
 
                while (!val) {
                        msleep(100);
@@ -1223,7 +1201,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
                        if (cnt > 10)
                                break;
 
-                       GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+                       val = rtw_is_hi_queue_empty(padapter);
                }
 
                if (cnt <= 10) {
@@ -1244,13 +1222,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+       pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
        if (!pdrvextra_cmd_parm) {
                kfree(ph2c);
                res = _FAIL;
@@ -1275,13 +1253,13 @@ u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
        u8      res = _SUCCESS;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
+       pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
        if (!pdrvextra_cmd_parm) {
                kfree(ph2c);
                res = _FAIL;
@@ -1380,8 +1358,8 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
                p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
                break;
        case P2P_PROTO_WK_CID:
-               /*      Commented by Albert 2011/07/01 */
-               /*      I used the type_size as the type command */
+               /*      Commented by Albert 2011/07/01 */
+               /*      I used the type_size as the type command */
                p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
                break;
        case CHECK_HIQ_WK_CID:
@@ -1404,11 +1382,8 @@ void rtw_survey_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 {
        struct  mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
-       if (pcmd->res == H2C_DROPPED) {
+       if (pcmd->res != H2C_SUCCESS) {
                /* TODO: cancel timer and do timeout handler directly... */
-               /* need to make timeout handlerOS independent */
-               _set_timer(&pmlmepriv->scan_to_timer, 1);
-               } else if (pcmd->res != H2C_SUCCESS) {
                _set_timer(&pmlmepriv->scan_to_timer, 1);
        }
 
@@ -1416,6 +1391,7 @@ void rtw_survey_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
        rtw_free_cmd_obj(pcmd);
 
 }
+
 void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
 {
        struct  mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1426,8 +1402,10 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
                spin_unlock_bh(&pmlmepriv->lock);
 
                return;
-       } else /* clear bridge database */
-               nat25_db_cleanup(padapter);
+       }
+
+       /* clear bridge database */
+       nat25_db_cleanup(padapter);
 
        /*  free cmd */
        rtw_free_cmd_obj(pcmd);
@@ -1437,11 +1415,8 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 {
        struct  mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
-       if (pcmd->res == H2C_DROPPED) {
+       if (pcmd->res != H2C_SUCCESS) {
                /* TODO: cancel timer and do timeout handler directly... */
-               /* need to make timeout handlerOS independent */
-               _set_timer(&pmlmepriv->assoc_timer, 1);
-       } else if (pcmd->res != H2C_SUCCESS) {
                _set_timer(&pmlmepriv->assoc_timer, 1);
        }
 
@@ -1474,7 +1449,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
                rtw_indicate_connect(padapter);
        } else {
 
-               pwlan = _rtw_alloc_network(pmlmepriv);
+               pwlan = rtw_alloc_network(pmlmepriv);
                spin_lock_bh(&pmlmepriv->scanned_queue.lock);
                if (!pwlan) {
                        pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
index 625d186..0451e51 100644 (file)
@@ -4,51 +4,43 @@
 #include <linux/firmware.h>
 #include "../include/rtw_fw.h"
 
-#define MAX_REG_BOLCK_SIZE     196
+#define MAX_REG_BLOCK_SIZE     196
 #define FW_8188E_START_ADDRESS 0x1000
 #define MAX_PAGE_SIZE          4096
 
 #define IS_FW_HEADER_EXIST(_fwhdr)                             \
-       ((le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x92C0 || \
-       (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x88C0 ||  \
-       (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x2300 ||  \
-       (le16_to_cpu(_fwhdr->Signature) & 0xFFF0) == 0x88E0)
-
-/*  This structure must be careful with byte-ordering */
+       ((le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x92C0 || \
+       (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88C0 ||  \
+       (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x2300 ||  \
+       (le16_to_cpu(_fwhdr->signature) & 0xFFF0) == 0x88E0)
 
 struct rt_firmware_hdr {
-       /*  8-byte alinment required */
-       /*  LONG WORD 0 ---- */
-       __le16          Signature;      /* 92C0: test chip; 92C,
-                                        * 88C0: test chip; 88C1: MP A-cut;
-                                        * 92C1: MP A-cut */
-       u8              Category;       /*  AP/NIC and USB/PCI */
-       u8              Function;       /*  Reserved for different FW function
-                                        *  indcation, for further use when
-                                        *  driver needs to download different
-                                        *  FW for different conditions */
-       __le16          Version;        /*  FW Version */
-       u8              Subversion;     /*  FW Subversion, default 0x00 */
-       u16             Rsvd1;
-
-       /*  LONG WORD 1 ---- */
-       u8              Month;  /*  Release time Month field */
-       u8              Date;   /*  Release time Date field */
-       u8              Hour;   /*  Release time Hour field */
-       u8              Minute; /*  Release time Minute field */
-       __le16          RamCodeSize;    /*  The size of RAM code */
-       u8              Foundry;
-       u8              Rsvd2;
-
-       /*  LONG WORD 2 ---- */
-       __le32          SvnIdx; /*  The SVN entry index */
-       u32             Rsvd3;
-
-       /*  LONG WORD 3 ---- */
-       u32             Rsvd4;
-       u32             Rsvd5;
+       __le16  signature;      /* 92C0: test chip; 92C,
+                                * 88C0: test chip; 88C1: MP A-cut;
+                                * 92C1: MP A-cut */
+       u8      category;       /* AP/NIC and USB/PCI */
+       u8      function;       /* Reserved for different FW function
+                                * indcation, for further use when
+                                * driver needs to download different
+                                * FW for different conditions */
+       __le16  version;        /* FW Version */
+       u8      subversion;     /* FW Subversion, default 0x00 */
+       u8      rsvd1;
+       u8      month;          /* Release time Month field */
+       u8      date;           /* Release time Date field */
+       u8      hour;           /* Release time Hour field */
+       u8      minute;         /* Release time Minute field */
+       __le16  ramcodesize;    /* The size of RAM code */
+       u8      foundry;
+       u8      rsvd2;
+       __le32  svnidx;         /* The SVN entry index */
+       __le32  rsvd3;
+       __le32  rsvd4;
+       __le32  rsvd5;
 };
 
+static_assert(sizeof(struct rt_firmware_hdr) == 32);
+
 static void fw_download_enable(struct adapter *padapter, bool enable)
 {
        u8 tmp;
@@ -71,53 +63,55 @@ static void fw_download_enable(struct adapter *padapter, bool enable)
        }
 }
 
-static int block_write(struct adapter *padapter, void *buffer, u32 buffSize)
+static int block_write(struct adapter *padapter, u8 *buffer, u32 size)
 {
        int ret = _SUCCESS;
-       u32     blockSize_p1 = 4;       /*  (Default) Phase #1 : PCI muse use 4-byte write to download FW */
-       u32     blockSize_p2 = 8;       /*  Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */
-       u32     blockSize_p3 = 1;       /*  Phase #3 : Use 1-byte, the remnant of FW image. */
-       u32     blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;
-       u32     remainSize_p1 = 0, remainSize_p2 = 0;
-       u8 *bufferPtr   = (u8 *)buffer;
-       u32     i = 0, offset = 0;
-
-       blockSize_p1 = MAX_REG_BOLCK_SIZE;
-
-       /* 3 Phase #1 */
-       blockCount_p1 = buffSize / blockSize_p1;
-       remainSize_p1 = buffSize % blockSize_p1;
-
-       for (i = 0; i < blockCount_p1; i++) {
-               ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + i * blockSize_p1), blockSize_p1, (bufferPtr + i * blockSize_p1));
+       u32 blocks, block_size, remain;
+       u32 i, offset, addr;
+       u8 *data;
+
+       block_size = MAX_REG_BLOCK_SIZE;
+
+       blocks = size / block_size;
+       remain = size % block_size;
+
+       for (i = 0; i < blocks; i++) {
+               addr = FW_8188E_START_ADDRESS + i * block_size;
+               data = buffer + i * block_size;
+
+               ret = rtw_writeN(padapter, addr, block_size, data);
                if (ret == _FAIL)
                        goto exit;
        }
 
-       /* 3 Phase #2 */
-       if (remainSize_p1) {
-               offset = blockCount_p1 * blockSize_p1;
+       if (remain) {
+               offset = blocks * block_size;
+               block_size = 8;
 
-               blockCount_p2 = remainSize_p1 / blockSize_p2;
-               remainSize_p2 = remainSize_p1 % blockSize_p2;
+               blocks = remain / block_size;
+               remain = remain % block_size;
 
-               for (i = 0; i < blockCount_p2; i++) {
-                       ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + offset + i * blockSize_p2), blockSize_p2, (bufferPtr + offset + i * blockSize_p2));
+               for (i = 0; i < blocks; i++) {
+                       addr = FW_8188E_START_ADDRESS + offset + i * block_size;
+                       data = buffer + offset + i * block_size;
 
+                       ret = rtw_writeN(padapter, addr, block_size, data);
                        if (ret == _FAIL)
                                goto exit;
                }
        }
 
-       /* 3 Phase #3 */
-       if (remainSize_p2) {
-               offset = (blockCount_p1 * blockSize_p1) + (blockCount_p2 * blockSize_p2);
+       if (remain) {
+               offset += blocks * block_size;
 
-               blockCount_p3 = remainSize_p2 / blockSize_p3;
+               /* block size 1 */
+               blocks = remain;
 
-               for (i = 0; i < blockCount_p3; i++) {
-                       ret = rtw_write8(padapter, (FW_8188E_START_ADDRESS + offset + i), *(bufferPtr + offset + i));
+               for (i = 0; i < blocks; i++) {
+                       addr = FW_8188E_START_ADDRESS + offset + i;
+                       data = buffer + offset + i;
 
+                       ret = rtw_write8(padapter, addr, *data);
                        if (ret == _FAIL)
                                goto exit;
                }
@@ -127,7 +121,7 @@ exit:
        return ret;
 }
 
-static int page_write(struct adapter *padapter, u32 page, void *buffer, u32 size)
+static int page_write(struct adapter *padapter, u32 page, u8 *buffer, u32 size)
 {
        u8 value8;
        u8 u8Page = (u8)(page & 0x07);
@@ -138,21 +132,20 @@ static int page_write(struct adapter *padapter, u32 page, void *buffer, u32 size
        return block_write(padapter, buffer, size);
 }
 
-static int write_fw(struct adapter *padapter, void *buffer, u32 size)
+static int write_fw(struct adapter *padapter, u8 *buffer, u32 size)
 {
        /*  Since we need dynamic decide method of dwonload fw, so we call this function to get chip version. */
        /*  We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
        int ret = _SUCCESS;
        u32     pageNums, remainSize;
        u32     page, offset;
-       u8 *bufferPtr = (u8 *)buffer;
 
        pageNums = size / MAX_PAGE_SIZE;
        remainSize = size % MAX_PAGE_SIZE;
 
        for (page = 0; page < pageNums; page++) {
                offset = page * MAX_PAGE_SIZE;
-               ret = page_write(padapter, page, bufferPtr + offset, MAX_PAGE_SIZE);
+               ret = page_write(padapter, page, buffer + offset, MAX_PAGE_SIZE);
 
                if (ret == _FAIL)
                        goto exit;
@@ -160,7 +153,7 @@ static int write_fw(struct adapter *padapter, void *buffer, u32 size)
        if (remainSize) {
                offset = pageNums * MAX_PAGE_SIZE;
                page = pageNums;
-               ret = page_write(padapter, page, bufferPtr + offset, remainSize);
+               ret = page_write(padapter, page, buffer + offset, remainSize);
 
                if (ret == _FAIL)
                        goto exit;
@@ -247,14 +240,12 @@ int rtl8188e_firmware_download(struct adapter *padapter)
 {
        int ret = _SUCCESS;
        u8 write_fw_retry = 0;
-       u32 fwdl_start_time;
+       unsigned long fwdl_timeout;
        struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
        struct device *device = dvobj_to_dev(dvobj);
        struct rt_firmware_hdr *fwhdr = NULL;
-       u16 fw_version, fw_subversion, fw_signature;
        u8 *fw_data;
        u32 fw_size;
-       static int log_version;
 
        if (!dvobj->firmware.data)
                ret = load_firmware(&dvobj->firmware, device);
@@ -265,21 +256,15 @@ int rtl8188e_firmware_download(struct adapter *padapter)
        fw_data = dvobj->firmware.data;
        fw_size = dvobj->firmware.size;
 
-       /*  To Check Fw header. Added by tynli. 2009.12.04. */
        fwhdr = (struct rt_firmware_hdr *)dvobj->firmware.data;
 
-       fw_version = le16_to_cpu(fwhdr->Version);
-       fw_subversion = fwhdr->Subversion;
-       fw_signature = le16_to_cpu(fwhdr->Signature);
-
-       if (!log_version++)
-               pr_info("%sFirmware Version %d, SubVersion %d, Signature 0x%x\n",
-                       DRIVER_PREFIX, fw_version, fw_subversion, fw_signature);
-
        if (IS_FW_HEADER_EXIST(fwhdr)) {
-               /*  Shift 32 bytes for FW header */
-               fw_data = fw_data + 32;
-               fw_size = fw_size - 32;
+               pr_info_once("R8188EU: Firmware Version %d, SubVersion %d, Signature 0x%x\n",
+                            le16_to_cpu(fwhdr->version), fwhdr->subversion,
+                            le16_to_cpu(fwhdr->signature));
+
+               fw_data = fw_data + sizeof(struct rt_firmware_hdr);
+               fw_size = fw_size - sizeof(struct rt_firmware_hdr);
        }
 
        /*  Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
@@ -290,7 +275,7 @@ int rtl8188e_firmware_download(struct adapter *padapter)
        }
 
        fw_download_enable(padapter, true);
-       fwdl_start_time = jiffies;
+       fwdl_timeout = jiffies + msecs_to_jiffies(500);
        while (1) {
                /* reset the FWDL chksum */
                rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_CHKSUM_RPT);
@@ -298,7 +283,7 @@ int rtl8188e_firmware_download(struct adapter *padapter)
                ret = write_fw(padapter, fw_data, fw_size);
 
                if (ret == _SUCCESS ||
-                   (rtw_get_passing_time_ms(fwdl_start_time) > 500 && write_fw_retry++ >= 3))
+                   (time_after(jiffies, fwdl_timeout) && write_fw_retry++ >= 3))
                        break;
        }
        fw_download_enable(padapter, false);
index 5a0e42e..385a9ed 100644 (file)
@@ -97,16 +97,15 @@ bool        rtw_is_cckratesonly_included(u8 *rate)
 
 int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
 {
-       if (channel > 14) {
+       if (channel > 14)
                return WIRELESS_INVALID;
-       } else {  /*  could be pure B, pure G, or B/G */
-               if (rtw_is_cckratesonly_included(rate))
-                       return WIRELESS_11B;
-               else if (rtw_is_cckrates_included(rate))
-                       return  WIRELESS_11BG;
-               else
-                       return WIRELESS_11G;
-       }
+       /*  could be pure B, pure G, or B/G */
+       if (rtw_is_cckratesonly_included(rate))
+               return WIRELESS_11B;
+       else if (rtw_is_cckrates_included(rate))
+               return  WIRELESS_11BG;
+       else
+               return WIRELESS_11G;
 }
 
 u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
@@ -160,11 +159,10 @@ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
                if (*p == index) {
                        *len = *(p + 1);
                        return p;
-               } else {
-                       tmp = *(p + 1);
-                       p += (tmp + 2);
-                       i += (tmp + 2);
                }
+               tmp = *(p + 1);
+               p += (tmp + 2);
+               i += (tmp + 2);
                if (i >= limit)
                        break;
        }
@@ -295,10 +293,9 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
                                goto check_next_ie;
                        *wpa_ie_len = *(pbuf + 1);
                        return pbuf;
-               } else {
-                       *wpa_ie_len = 0;
-                       return NULL;
                }
+               *wpa_ie_len = 0;
+               return NULL;
 
 check_next_ie:
                limit_new = limit - (pbuf - pie) - 2 - len;
@@ -558,9 +555,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
                        cnt += in_ie[cnt + 1] + 2;
 
                        break;
-               } else {
-                       cnt += in_ie[cnt + 1] + 2; /* goto next */
                }
+               cnt += in_ie[cnt + 1] + 2; /* goto next */
        }
        return wpsie_ptr;
 }
@@ -604,9 +600,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
                        if (len_attr)
                                *len_attr = attr_len;
                        break;
-               } else {
-                       attr_ptr += attr_len; /* goto next */
                }
+               attr_ptr += attr_len; /* goto next */
        }
        return target_attr_ptr;
 }
@@ -901,9 +896,8 @@ u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
                        if (p2p_ielen)
                                *p2p_ielen = in_ie[cnt + 1] + 2;
                        return p2p_ie_ptr;
-               } else {
-                       cnt += in_ie[cnt + 1] + 2; /* goto next */
                }
+               cnt += in_ie[cnt + 1] + 2; /* goto next */
        }
        return NULL;
 }
@@ -948,9 +942,8 @@ u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id, u8 *buf_attr
                        if (len_attr)
                                *len_attr = attr_len;
                        break;
-               } else {
-                       attr_ptr += attr_len; /* goto next */
                }
+               attr_ptr += attr_len; /* goto next */
        }
        return target_attr_ptr;
 }
@@ -1058,7 +1051,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
        pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
 
        if (pbuf && (wpa_ielen > 0)) {
-               if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+               if (rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
                        pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
                        pnetwork->BcnInfo.group_cipher = group_cipher;
                        pnetwork->BcnInfo.is_8021x = is8021x;
@@ -1068,7 +1061,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
                pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
 
                if (pbuf && (wpa_ielen > 0)) {
-                       if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+                       if (rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
                                pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
                                pnetwork->BcnInfo.group_cipher = group_cipher;
                                pnetwork->BcnInfo.is_8021x = is8021x;
index 4b78e42..7ba75f7 100644 (file)
@@ -44,7 +44,7 @@ u8 rtw_do_join(struct adapter *padapter)
                    pmlmepriv->to_roaming > 0) {
                        /*  submit site_survey_cmd */
                        ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
-                       if (_SUCCESS != ret)
+                       if (ret != _SUCCESS)
                                pmlmepriv->to_join = false;
                } else {
                        pmlmepriv->to_join = false;
@@ -91,7 +91,7 @@ u8 rtw_do_join(struct adapter *padapter)
                                if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
                                    pmlmepriv->to_roaming > 0) {
                                        ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
-                                       if (_SUCCESS != ret)
+                                       if (ret != _SUCCESS)
                                                pmlmepriv->to_join = false;
                                } else {
                                        ret = _FAIL;
index e14e374..af8e84a 100644 (file)
@@ -57,10 +57,10 @@ int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds, u32 cmd_len
 
 bool rtw_IOL_applied(struct adapter  *adapter)
 {
-       if (1 == adapter->registrypriv.fw_iol)
+       if (adapter->registrypriv.fw_iol == 1)
                return true;
 
-       if ((2 == adapter->registrypriv.fw_iol) &&
+       if ((adapter->registrypriv.fw_iol == 2) &&
            (adapter_to_dvobj(adapter)->pusbdev->speed != USB_SPEED_HIGH))
                return true;
 
index ccd43ac..2f30004 100644 (file)
@@ -110,7 +110,7 @@ static void blink_work(struct work_struct *work)
                                pLed->bLedLinkBlinkInProgress = true;
                                pLed->CurrLedState = LED_BLINK_NORMAL;
                                schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
-                       } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+                       } else {
                                pLed->bLedNoLinkBlinkInProgress = true;
                                pLed->CurrLedState = LED_BLINK_SLOWLY;
                                schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
@@ -131,7 +131,7 @@ static void blink_work(struct work_struct *work)
                                pLed->bLedLinkBlinkInProgress = true;
                                pLed->CurrLedState = LED_BLINK_NORMAL;
                                schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
-                       } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+                       } else {
                                pLed->bLedNoLinkBlinkInProgress = true;
                                pLed->CurrLedState = LED_BLINK_SLOWLY;
                                schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
@@ -278,7 +278,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
                        else
                                pLed->BlinkingLedState = RTW_LED_ON;
                        schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
-                }
+               }
                break;
        case LED_CTL_TX:
        case LED_CTL_RX:
@@ -304,7 +304,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
                }
                break;
        case LED_CTL_START_WPS: /* wait until xinpin finish */
-                if (!pLed->bLedWPSBlinkInProgress) {
+               if (!pLed->bLedWPSBlinkInProgress) {
                        if (pLed->bLedNoLinkBlinkInProgress) {
                                cancel_delayed_work(&pLed->blink_work);
                                pLed->bLedNoLinkBlinkInProgress = false;
@@ -328,7 +328,7 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
                        else
                                pLed->BlinkingLedState = RTW_LED_ON;
                        schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
-                }
+               }
                break;
        case LED_CTL_STOP_WPS:
                if (pLed->bLedNoLinkBlinkInProgress) {
index 6f0bff1..5a81564 100644 (file)
@@ -16,7 +16,6 @@
 #include "../include/usb_osintf.h"
 #include "../include/rtl8188e_dm.h"
 
-extern unsigned char   MCS_rate_2R[16];
 extern unsigned char   MCS_rate_1R[16];
 
 void rtw_set_roaming(struct adapter *adapter, u8 to_roaming)
@@ -31,60 +30,6 @@ u8 rtw_to_roaming(struct adapter *adapter)
        return adapter->mlmepriv.to_roaming;
 }
 
-int _rtw_init_mlme_priv(struct adapter *padapter)
-{
-       int     i;
-       u8      *pbuf;
-       struct wlan_network     *pnetwork;
-       struct mlme_priv                *pmlmepriv = &padapter->mlmepriv;
-       int     res = _SUCCESS;
-
-       /*  We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
-
-       pmlmepriv->nic_hdl = (u8 *)padapter;
-
-       pmlmepriv->pscanned = NULL;
-       pmlmepriv->fw_state = 0;
-       pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
-       pmlmepriv->scan_mode = SCAN_ACTIVE;/*  1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
-
-       spin_lock_init(&pmlmepriv->lock);
-       rtw_init_queue(&pmlmepriv->free_bss_pool);
-       rtw_init_queue(&pmlmepriv->scanned_queue);
-
-       set_scanned_network_val(pmlmepriv, 0);
-
-       memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
-
-       pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
-
-       if (!pbuf) {
-               res = _FAIL;
-               goto exit;
-       }
-       pmlmepriv->free_bss_buf = pbuf;
-
-       pnetwork = (struct wlan_network *)pbuf;
-
-       for (i = 0; i < MAX_BSS_CNT; i++) {
-               INIT_LIST_HEAD(&pnetwork->list);
-
-               list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
-
-               pnetwork++;
-       }
-
-       /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
-
-       rtw_clear_scan_deny(padapter);
-
-       rtw_init_mlme_timer(padapter);
-
-exit:
-
-       return res;
-}
-
 static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
 {
        kfree(*ppie);
@@ -95,7 +40,6 @@ static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
 void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
 {
        kfree(pmlmepriv->assoc_req);
-       kfree(pmlmepriv->assoc_rsp);
        rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
        rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
        rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
@@ -108,49 +52,6 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
        rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
 }
 
-void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
-{
-
-       rtw_free_mlme_priv_ie_data(pmlmepriv);
-
-       if (pmlmepriv) {
-               vfree(pmlmepriv->free_bss_buf);
-       }
-
-}
-
-struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
-{
-       struct  wlan_network    *pnetwork;
-       struct __queue *free_queue = &pmlmepriv->free_bss_pool;
-       struct list_head *plist = NULL;
-
-       spin_lock_bh(&free_queue->lock);
-
-       if (list_empty(&free_queue->queue)) {
-               pnetwork = NULL;
-               goto exit;
-       }
-       plist = (&free_queue->queue)->next;
-
-       pnetwork = container_of(plist, struct wlan_network, list);
-
-       list_del_init(&pnetwork->list);
-
-       pnetwork->network_type = 0;
-       pnetwork->fixed = false;
-       pnetwork->last_scanned = jiffies;
-       pnetwork->aid = 0;
-       pnetwork->join_res = 0;
-
-       pmlmepriv->num_of_scanned++;
-
-exit:
-       spin_unlock_bh(&free_queue->lock);
-
-       return pnetwork;
-}
-
 void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall)
 {
        u32 curr_time, delta_time;
@@ -194,7 +95,7 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
 /*
        return the wlan_network with the matching addr
 
-       Shall be calle under atomic context... to avoid possible racing condition...
+       Shall be called under atomic context... to avoid possible racing condition...
 */
 struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
 {
@@ -291,23 +192,92 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
 
 int rtw_init_mlme_priv(struct adapter *padapter)/* struct      mlme_priv *pmlmepriv) */
 {
-       int     res;
+       int     i;
+       u8      *pbuf;
+       struct wlan_network     *pnetwork;
+       struct mlme_priv                *pmlmepriv = &padapter->mlmepriv;
+       int     res = _SUCCESS;
 
-       res = _rtw_init_mlme_priv(padapter);/*  (pmlmepriv); */
+       /*  We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+
+       pmlmepriv->nic_hdl = (u8 *)padapter;
+
+       pmlmepriv->pscanned = NULL;
+       pmlmepriv->fw_state = 0;
+       pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
+       pmlmepriv->scan_mode = SCAN_ACTIVE;/*  1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
+
+       spin_lock_init(&pmlmepriv->lock);
+       rtw_init_queue(&pmlmepriv->free_bss_pool);
+       rtw_init_queue(&pmlmepriv->scanned_queue);
+
+       set_scanned_network_val(pmlmepriv, 0);
+
+       memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+
+       pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+
+       if (!pbuf) {
+               res = _FAIL;
+               goto exit;
+       }
+       pmlmepriv->free_bss_buf = pbuf;
+
+       pnetwork = (struct wlan_network *)pbuf;
+
+       for (i = 0; i < MAX_BSS_CNT; i++) {
+               INIT_LIST_HEAD(&pnetwork->list);
+
+               list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
+
+               pnetwork++;
+       }
+
+       /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+       rtw_clear_scan_deny(padapter);
+
+       rtw_init_mlme_timer(padapter);
+
+exit:
 
        return res;
 }
 
 void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
 {
-       _rtw_free_mlme_priv(pmlmepriv);
+       rtw_free_mlme_priv_ie_data(pmlmepriv);
+       vfree(pmlmepriv->free_bss_buf);
 }
 
-static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
 {
        struct  wlan_network    *pnetwork;
+       struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+       struct list_head *plist = NULL;
+
+       spin_lock_bh(&free_queue->lock);
+
+       if (list_empty(&free_queue->queue)) {
+               pnetwork = NULL;
+               goto exit;
+       }
+       plist = (&free_queue->queue)->next;
+
+       pnetwork = container_of(plist, struct wlan_network, list);
+
+       list_del_init(&pnetwork->list);
 
-       pnetwork = _rtw_alloc_network(pmlmepriv);
+       pnetwork->network_type = 0;
+       pnetwork->fixed = false;
+       pnetwork->last_scanned = jiffies;
+       pnetwork->aid = 0;
+       pnetwork->join_res = 0;
+
+       pmlmepriv->num_of_scanned++;
+
+exit:
+       spin_unlock_bh(&free_queue->lock);
 
        return pnetwork;
 }
@@ -330,7 +300,7 @@ void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
 /*
        return the wlan_network with the matching addr
 
-       Shall be calle under atomic context... to avoid possible racing condition...
+       Shall be called under atomic context... to avoid possible racing condition...
 */
 struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
 {
@@ -465,6 +435,13 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
 
 }
 
+u8 rtw_current_antenna(struct adapter *adapter)
+{
+       struct hal_data_8188e *haldata = &adapter->haldata;
+
+       return haldata->CurAntenna;
+}
+
 /*
 Caller must hold pmlmepriv->lock first.
 */
@@ -498,7 +475,8 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
                        /* If there are no more slots, expire the oldest */
                        pnetwork = oldest;
 
-                       GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+                       target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
+
                        memcpy(&pnetwork->network, target,  get_wlan_bssid_ex_sz(target));
                        /*  variable initialize */
                        pnetwork->fixed = false;
@@ -521,7 +499,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
 
                        bssid_ex_sz = get_wlan_bssid_ex_sz(target);
                        target->Length = bssid_ex_sz;
-                       GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+                       target->PhyInfo.Optimum_antenna = rtw_current_antenna(adapter);
                        memcpy(&pnetwork->network, target, bssid_ex_sz);
 
                        pnetwork->last_scanned = jiffies;
@@ -567,8 +545,8 @@ static void rtw_add_network(struct adapter *adapter,
 
 /* select the desired network based on the capability of the (i)bss. */
 /*  check items:       (1) security */
-/*                     (2) network_type */
-/*                     (3) WMM */
+/*                     (2) network_type */
+/*                     (3) WMM */
 /*                     (4) HT */
 /*                     (5) others */
 static bool rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
@@ -715,15 +693,12 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
                        set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
                        pmlmepriv->to_join = false;
                        s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
-                       if (_SUCCESS == s_ret) {
-                            _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
-                       } else if (s_ret == 2) { /* there is no need to wait for join */
-                               _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-                               rtw_indicate_connect(adapter);
+                       if (s_ret == _SUCCESS) {
+                               _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
                        } else {
                                if (rtw_to_roaming(adapter) != 0) {
                                        if (--pmlmepriv->to_roaming == 0 ||
-                                           _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
+                                           rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) {
                                                rtw_set_roaming(adapter, 0);
                                                rtw_free_assoc_resources(adapter, 1);
                                                rtw_indicate_disconnect(adapter);
@@ -748,14 +723,6 @@ void rtw_surveydone_event_callback(struct adapter  *adapter, u8 *pbuf)
        rtw_os_xmit_schedule(adapter);
 }
 
-void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf)
-{
-}
-
-void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf)
-{
-}
-
 static void free_scanqueue(struct      mlme_priv *pmlmepriv)
 {
        struct __queue *free_queue = &pmlmepriv->free_bss_pool;
@@ -911,9 +878,8 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
                        memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
                        memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
                }
-               /*      Commented by Albert 2012/07/21 */
-               /*      When doing the WPS, the wps_ie_len won't equal to 0 */
-               /*      And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+               /*      When doing the WPS, the wps_ie_len won't equal to 0 */
+               /*      And the Wi-Fi driver shouldn't allow the data packet to be transmitted. */
                if (padapter->securitypriv.wps_ie_len != 0) {
                        psta->ieee8021x_blocked = true;
                        padapter->securitypriv.wps_ie_len = 0;
@@ -1071,8 +1037,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
                                rtw_indicate_connect(adapter);
                        }
 
+                       spin_unlock_bh(&pmlmepriv->lock);
                        /* s5. Cancel assoc_timer */
                        del_timer_sync(&pmlmepriv->assoc_timer);
+                       spin_lock_bh(&pmlmepriv->lock);
                } else {
                        spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
                        goto ignore_joinbss_callback;
@@ -1105,6 +1073,11 @@ void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
 
 }
 
+void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid)
+{
+       rtw_write8(adapter, REG_TX_RPT_CTRL + 1, macid + 1);
+}
+
 static u8 search_max_mac_id(struct adapter *padapter)
 {
        u8 mac_id;
@@ -1141,7 +1114,8 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
                return;
 
        macid = search_max_mac_id(adapter);
-       SetHwReg8188EU(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
+       rtw_set_max_rpt_macid(adapter, macid);
+
        /* MACID|OPMODE:1 connect */
        media_status_rpt = (u16)((psta->mac_id << 8) | mstatus);
        SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status_rpt);
@@ -1299,7 +1273,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
 }
 
 /*
-* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
 * @adapter: pointer to struct adapter structure
 */
 void _rtw_join_timeout_handler (struct adapter *adapter)
@@ -1310,7 +1284,7 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
        if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
                return;
 
-       spin_lock_bh(&pmlmepriv->lock);
+       spin_lock_irq(&pmlmepriv->lock);
 
        if (rtw_to_roaming(adapter) > 0) { /* join timeout caused by roaming */
                while (1) {
@@ -1329,12 +1303,12 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
                rtw_indicate_disconnect(adapter);
                free_scanqueue(pmlmepriv);/*  */
        }
-       spin_unlock_bh(&pmlmepriv->lock);
+       spin_unlock_irq(&pmlmepriv->lock);
 
 }
 
 /*
-* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+* rtw_scan_timeout_handler - Timeout/Failure handler for CMD SiteSurvey
 * @adapter: pointer to struct adapter structure
 */
 void rtw_scan_timeout_handler (struct adapter *adapter)
@@ -1414,6 +1388,7 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
 {
        int updated = false;
        struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
+       unsigned long scan_res_expire;
 
        /* check bssid, if needed */
        if (pmlmepriv->assoc_by_bssid) {
@@ -1431,8 +1406,9 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
        if (!rtw_is_desired_network(adapter, competitor))
                goto exit;
 
+       scan_res_expire = competitor->last_scanned + msecs_to_jiffies(RTW_SCAN_RESULT_EXPIRE);
        if (rtw_to_roaming(adapter) > 0) {
-               if (rtw_get_passing_time_ms((u32)competitor->last_scanned) >= RTW_SCAN_RESULT_EXPIRE ||
+               if (time_after(jiffies, scan_res_expire) ||
                    !is_same_ess(&competitor->network, &pmlmepriv->cur_network.network))
                        goto exit;
        }
@@ -1461,7 +1437,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
        struct __queue *queue   = &pmlmepriv->scanned_queue;
        struct  wlan_network    *pnetwork = NULL;
        struct  wlan_network    *candidate = NULL;
-       u8      supp_ant_div = false;
 
        spin_lock_bh(&pmlmepriv->scanned_queue.lock);
        phead = get_list_head(queue);
@@ -1488,12 +1463,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
                rtw_free_assoc_resources(adapter, 0);
        }
 
-       GetHalDefVar8188EUsb(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &supp_ant_div);
-       if (supp_ant_div) {
-               u8 cur_ant;
-               GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &cur_ant);
-       }
-
        ret = rtw_joinbss_cmd(adapter, candidate);
 
 exit:
@@ -1509,13 +1478,13 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
        struct  cmd_priv *pcmdpriv = &adapter->cmdpriv;
        int             res = _SUCCESS;
 
-       pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd = kzalloc(sizeof(*pcmd), GFP_KERNEL);
        if (!pcmd) {
                res = _FAIL;  /* try again */
                goto exit;
        }
 
-       psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
+       psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_KERNEL);
        if (!psetauthparm) {
                kfree(pcmd);
                res = _FAIL;
@@ -1628,38 +1597,22 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
 }
 
 /*  */
-/*  Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
-/*  Added by Annie, 2006-05-07. */
-/*  */
 /*  Search by BSSID, */
 /*  Return Value: */
-/*             -1              :if there is no pre-auth key in the  table */
-/*             >= 0            :if there is pre-auth key, and   return the entry id */
+/*             -1              :if there is no pre-auth key in the  table */
+/*             >= 0            :if there is pre-auth key, and   return the entry id */
 /*  */
 /*  */
 
 static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
 {
-       struct security_priv *psecuritypriv = &Adapter->securitypriv;
-       int i = 0;
-
-       do {
-               if ((psecuritypriv->PMKIDList[i].bUsed) &&
-                   (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
-                       break;
-               } else {
-                       i++;
-                       /* continue; */
-               }
-
-       } while (i < NUM_PMKID_CACHE);
+       struct security_priv *p = &Adapter->securitypriv;
+       int i;
 
-       if (i == NUM_PMKID_CACHE) {
-               i = -1;/*  Could not find. */
-       } else {
-               /*  There is one Pre-Authentication Key for the specific BSSID. */
-       }
-       return i;
+       for (i = 0; i < NUM_PMKID_CACHE; i++)
+               if (p->PMKIDList[i].bUsed && !memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN))
+                       return i;
+       return -1;
 }
 
 /*  */
@@ -1796,10 +1749,23 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
 
 }
 
+static void rtw_set_threshold(struct adapter *adapter)
+{
+       struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+       struct ht_priv *htpriv = &mlmepriv->htpriv;
+
+       if (htpriv->ht_option && adapter->registrypriv.wifi_spec != 1) {
+               /* validate usb rx aggregation, use init value. */
+               rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, USB_RXAGG_PAGE_COUNT);
+       } else {
+               /* invalidate usb rx aggregation */
+               rtw_write8(adapter, REG_RXDMA_AGG_PG_TH, 1);
+       }
+}
+
 /* the function is at passive_level */
 void rtw_joinbss_reset(struct adapter *padapter)
 {
-       u8      threshold;
        struct mlme_priv        *pmlmepriv = &padapter->mlmepriv;
        struct ht_priv          *phtpriv = &pmlmepriv->htpriv;
 
@@ -1810,18 +1776,7 @@ void rtw_joinbss_reset(struct adapter *padapter)
 
        phtpriv->ampdu_enable = false;/* reset to disabled */
 
-       /*  TH = 1 => means that invalidate usb rx aggregation */
-       /*  TH = 0 => means that validate usb rx aggregation, use init value. */
-       if (phtpriv->ht_option) {
-               if (padapter->registrypriv.wifi_spec == 1)
-                       threshold = 1;
-               else
-                       threshold = 0;
-               SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
-       } else {
-               threshold = 1;
-               SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
-       }
+       rtw_set_threshold(padapter);
 }
 
 /* the function is >= passive_level */
@@ -1984,7 +1939,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
                issued = (phtpriv->agg_enable_bitmap >> priority) & 0x1;
                issued |= (phtpriv->candidate_tid_bitmap >> priority) & 0x1;
 
-               if (0 == issued) {
+               if (issued == 0) {
                        psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
                        rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
                }
@@ -2011,19 +1966,19 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
        else
                pnetwork = &pmlmepriv->cur_network;
 
-       if (0 < rtw_to_roaming(padapter)) {
+       if (rtw_to_roaming(padapter) > 0) {
                memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
 
                pmlmepriv->assoc_by_bssid = false;
 
                while (1) {
                        do_join_r = rtw_do_join(padapter);
-                       if (_SUCCESS == do_join_r) {
+                       if (do_join_r == _SUCCESS) {
                                break;
                        } else {
                                pmlmepriv->to_roaming--;
 
-                               if (0 < pmlmepriv->to_roaming) {
+                               if (pmlmepriv->to_roaming > 0) {
                                        continue;
                                } else {
                                        rtw_indicate_disconnect(padapter);
index 10d5f12..faf23fc 100644 (file)
 #include "../include/rtl8188e_xmit.h"
 #include "../include/rtl8188e_dm.h"
 
-static struct mlme_handler mlme_sta_tbl[] = {
-       {WIFI_ASSOCREQ,         "OnAssocReq",   &OnAssocReq},
-       {WIFI_ASSOCRSP,         "OnAssocRsp",   &OnAssocRsp},
-       {WIFI_REASSOCREQ,       "OnReAssocReq", &OnAssocReq},
-       {WIFI_REASSOCRSP,       "OnReAssocRsp", &OnAssocRsp},
-       {WIFI_PROBEREQ,         "OnProbeReq",   &OnProbeReq},
-       {WIFI_PROBERSP,         "OnProbeRsp",           &OnProbeRsp},
-
-       /*----------------------------------------------------------
-                                       below 2 are reserved
-       -----------------------------------------------------------*/
-       {0,                                     "DoReserved",           &DoReserved},
-       {0,                                     "DoReserved",           &DoReserved},
-       {WIFI_BEACON,           "OnBeacon",             &OnBeacon},
-       {WIFI_ATIM,                     "OnATIM",               &OnAtim},
-       {WIFI_DISASSOC,         "OnDisassoc",           &OnDisassoc},
-       {WIFI_AUTH,                     "OnAuth",               &OnAuthClient},
-       {WIFI_DEAUTH,           "OnDeAuth",             &OnDeAuth},
-       {WIFI_ACTION,           "OnAction",             &OnAction},
-};
-
-static struct action_handler OnAction_tbl[] = {
-       {RTW_WLAN_CATEGORY_SPECTRUM_MGMT,        "ACTION_SPECTRUM_MGMT", on_action_spct},
-       {RTW_WLAN_CATEGORY_QOS, "ACTION_QOS", &OnAction_qos},
-       {RTW_WLAN_CATEGORY_DLS, "ACTION_DLS", &OnAction_dls},
-       {RTW_WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction_back},
-       {RTW_WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public},
-       {RTW_WLAN_CATEGORY_RADIO_MEASUREMENT, "ACTION_RADIO_MEASUREMENT", &DoReserved},
-       {RTW_WLAN_CATEGORY_FT, "ACTION_FT",     &DoReserved},
-       {RTW_WLAN_CATEGORY_HT,  "ACTION_HT",    &OnAction_ht},
-       {RTW_WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &DoReserved},
-       {RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &OnAction_wmm},
-       {RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &OnAction_p2p},
+/* response function for each management frame subtype, do not reorder */
+static mlme_handler mlme_sta_tbl[] = {
+       OnAssocReq,
+       OnAssocRsp,
+       OnAssocReq,
+       OnAssocRsp,
+       OnProbeReq,
+       OnProbeRsp,
+       NULL,
+       NULL,
+       OnBeacon,
+       NULL,
+       OnDisassoc,
+       OnAuthClient,
+       OnDeAuth,
+       OnAction,
 };
 
 static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
@@ -71,7 +54,6 @@ extern unsigned char REALTEK_96B_IE[];
 /********************************************************
 MCS rate definitions
 *********************************************************/
-unsigned char  MCS_rate_2R[16] = {0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
 unsigned char  MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
 
 /********************************************************
@@ -287,11 +269,11 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
                                continue;
                        }
 
-                       if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+                       if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
                                continue;
 
-                       if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
-                           ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+                       if (((padapter->registrypriv.cbw40_enable & BIT(1)) == 0) &&
+                           ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
                                continue;
 
                        if (!reg) {
@@ -320,7 +302,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
 
        if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
                b2_4GBand = true;
-               if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+               if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
                        Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
                else
                        Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -330,14 +312,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
                for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
                        channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
 
-                       if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
-                           (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)) {
+                       if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+                           (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G)) {
                                if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
                                        channel_set[chanset_size].ScanType = SCAN_ACTIVE;
                                else if ((channel_set[chanset_size].ChannelNum  >= 12 && channel_set[chanset_size].ChannelNum  <= 14))
                                        channel_set[chanset_size].ScanType  = SCAN_PASSIVE;
-                       } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
-                                  RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {/*  channel 12~13, passive scan */
+                       } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+                                  Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) {/*  channel 12~13, passive scan */
                                if (channel_set[chanset_size].ChannelNum <= 11)
                                        channel_set[chanset_size].ScanType = SCAN_ACTIVE;
                                else
@@ -352,9 +334,8 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
        return chanset_size;
 }
 
-int    init_mlme_ext_priv(struct adapter *padapter)
+void init_mlme_ext_priv(struct adapter *padapter)
 {
-       int     res = _SUCCESS;
        struct registry_priv *pregistrypriv = &padapter->registrypriv;
        struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -376,8 +357,6 @@ int init_mlme_ext_priv(struct adapter *padapter)
        pmlmeext->mlmeext_init = true;
 
        pmlmeext->active_keep_alive_check = true;
-
-       return res;
 }
 
 void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
@@ -394,45 +373,29 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
        }
 }
 
-static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
-{
-       u8 *pframe = precv_frame->rx_data;
-
-       if (ptable->func) {
-       /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
-               if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
-                   !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
-                       return;
-               ptable->func(padapter, precv_frame);
-       }
-}
-
 void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
 {
        int index;
-       struct mlme_handler *ptable;
+       mlme_handler fct;
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-       u8 *pframe = precv_frame->rx_data;
-       struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+       struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
 
-       if (GetFrameType(pframe) != IEEE80211_FTYPE_MGMT)
+       if (!ieee80211_is_mgmt(hdr->frame_control))
                return;
 
        /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
-       if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
-           !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
+       if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN) &&
+           !is_broadcast_ether_addr(hdr->addr1))
                return;
 
-       ptable = mlme_sta_tbl;
-
-       index = GetFrameSubType(pframe) >> 4;
-
-       if (index > 13)
+       index = (le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
+       if (index >= ARRAY_SIZE(mlme_sta_tbl))
                return;
-       ptable += index;
+       fct = mlme_sta_tbl[index];
 
        if (psta) {
-               if (GetRetry(pframe)) {
+               if (ieee80211_has_retry(hdr->frame_control)) {
                        if (precv_frame->attrib.seq_num == psta->RxMgmtFrameSeqNum)
                                /* drop the duplicate management frame */
                                return;
@@ -440,13 +403,15 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
                psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
        }
 
-       if (GetFrameSubType(pframe) == WIFI_AUTH) {
+       if (ieee80211_is_auth(hdr->frame_control)) {
                if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
-                       ptable->func = &OnAuth;
+                       fct = OnAuth;
                else
-                       ptable->func = &OnAuthClient;
+                       fct = OnAuthClient;
        }
-       _mgt_dispatcher(padapter, ptable, precv_frame);
+
+       if (fct)
+               fct(padapter, precv_frame);
 }
 
 static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
@@ -482,7 +447,6 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
        u8 is_valid_p2p_probereq = false;
 
        struct wifidirect_info  *pwdinfo = &padapter->wdinfo;
-       u8 wifi_test_chk_rate = 1;
 
        if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
            !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE) &&
@@ -497,25 +461,18 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
 
                /*      Commented by Kurt 2012/10/16 */
                /*      IOT issue: Google Nexus7 use 1M rate to send p2p_probe_req after GO nego completed and Nexus7 is client */
-               if (wifi_test_chk_rate == 1) {
-                       is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
-                       if (is_valid_p2p_probereq) {
-                               if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
-                                       /*  FIXME */
-                                       report_survey_event(padapter, precv_frame);
-                                       p2p_listen_state_process(padapter,  get_sa(pframe));
-
-                                       return _SUCCESS;
-                               }
-
-                               if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
-                                       goto _continue;
+               is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
+               if (is_valid_p2p_probereq) {
+                       if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+                               /*  FIXME */
+                               report_survey_event(padapter, precv_frame);
+                               p2p_listen_state_process(padapter,  get_sa(pframe));
+
+                               return _SUCCESS;
                        }
                }
        }
 
-_continue:
-
        if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
                return _SUCCESS;
 
@@ -622,7 +579,7 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
                        }
 
                        /* check the vendor of the assoc AP */
-                       pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct rtw_ieee80211_hdr_3addr), len - sizeof(struct rtw_ieee80211_hdr_3addr));
+                       pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
 
                        /* update TSF Value */
                        update_TSF(pmlmeext, pframe, len);
@@ -988,7 +945,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
                        status = _STATS_FAILURE_;
        }
 
-       if (_STATS_SUCCESSFUL_ != status)
+       if (status != _STATS_SUCCESSFUL_)
                goto OnAssocReqFail;
 
        /*  check if the supported rate is ok */
@@ -1077,7 +1034,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
                wpa_ie_len = 0;
        }
 
-       if (_STATS_SUCCESSFUL_ != status)
+       if (status != _STATS_SUCCESSFUL_)
                goto OnAssocReqFail;
 
        pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
@@ -1272,7 +1229,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
        spin_unlock_bh(&pstapriv->asoc_list_lock);
 
        /*  now the station is qualified to join our BSS... */
-       if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+       if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == _STATS_SUCCESSFUL_)) {
                /* 1 bss_cap_update & sta_info_update */
                bss_cap_update_on_sta_join(padapter, pstat);
                sta_info_update(padapter, pstat);
@@ -1315,7 +1272,6 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
        int res;
        unsigned short  status;
        struct ndis_802_11_var_ie *pIE;
-       struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        /* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
@@ -1386,11 +1342,6 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
        UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
 
 report_assoc_result:
-       if (res > 0)
-               rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
-       else
-               kfree(pmlmepriv->assoc_rsp);
-
        report_join_res(padapter, res);
 
        return _SUCCESS;
@@ -1448,7 +1399,7 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
                    (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
                        if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
                                ignore_received_deauth = 1;
-                       } else if (WLAN_REASON_PREV_AUTH_NOT_VALID == reason) {
+                       } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
                                // TODO: 802.11r
                                ignore_received_deauth = 1;
                        }
@@ -1508,126 +1459,76 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
        return _SUCCESS;
 }
 
-unsigned int OnAtim(struct adapter *padapter, struct recv_frame *precv_frame)
-{
-       return _SUCCESS;
-}
-
-unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_frame)
-{
-       unsigned int ret = _FAIL;
-       struct sta_info *psta = NULL;
-       struct sta_priv *pstapriv = &padapter->stapriv;
-       u8 *pframe = precv_frame->rx_data;
-       u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
-       u8 category;
-       u8 action;
-
-       psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
-
-       if (!psta)
-               goto exit;
-
-       category = frame_body[0];
-       if (category != RTW_WLAN_CATEGORY_SPECTRUM_MGMT)
-               goto exit;
-
-       action = frame_body[1];
-       switch (action) {
-       case RTW_WLAN_ACTION_SPCT_MSR_REQ:
-       case RTW_WLAN_ACTION_SPCT_MSR_RPRT:
-       case RTW_WLAN_ACTION_SPCT_TPC_REQ:
-       case RTW_WLAN_ACTION_SPCT_TPC_RPRT:
-               break;
-       case RTW_WLAN_ACTION_SPCT_CHL_SWITCH:
-               break;
-       default:
-               break;
-       }
-
-exit:
-       return ret;
-}
-
-unsigned int OnAction_qos(struct adapter *padapter, struct recv_frame *precv_frame)
-{
-       return _SUCCESS;
-}
-
-unsigned int OnAction_dls(struct adapter *padapter, struct recv_frame *precv_frame)
-{
-       return _SUCCESS;
-}
-
 unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame)
 {
-       u8 *addr;
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
        struct sta_info *psta = NULL;
        struct recv_reorder_ctrl *preorder_ctrl;
        unsigned char           *frame_body;
-       unsigned char           category, action;
-       unsigned short  tid, status;
+       unsigned short  tid;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        u8 *pframe = precv_frame->rx_data;
        struct sta_priv *pstapriv = &padapter->stapriv;
        /* check RA matches or not */
-       if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+       if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN))/* for if1, sta/ap mode */
                return _SUCCESS;
 
        if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
                if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
                        return _SUCCESS;
 
-       addr = GetAddr2Ptr(pframe);
-       psta = rtw_get_stainfo(pstapriv, addr);
+       psta = rtw_get_stainfo(pstapriv, mgmt->sa);
 
        if (!psta)
                return _SUCCESS;
 
-       frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+       frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
 
-       category = frame_body[0];
-       if (category == RTW_WLAN_CATEGORY_BACK) { /*  representing Block Ack */
-               if (!pmlmeinfo->HT_enable)
-                       return _SUCCESS;
-               action = frame_body[1];
-               switch (action) {
-               case RTW_WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
-                       memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request));
-                       process_addba_req(padapter, (u8 *)&pmlmeinfo->ADDBA_req, addr);
-
-                       if (pmlmeinfo->bAcceptAddbaReq)
-                               issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
-                       else
-                               issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
-                       break;
-               case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
-                       status = get_unaligned_le16(&frame_body[3]);
-                       tid = ((frame_body[5] >> 2) & 0x7);
-                       if (status == 0) {      /* successful */
-                               psta->htpriv.agg_enable_bitmap |= 1 << tid;
-                               psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
-                       } else {
-                               psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
-                       }
-                       break;
-               case RTW_WLAN_ACTION_DELBA: /* DELBA */
-                       if ((frame_body[3] & BIT(3)) == 0) {
-                               psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
-                               psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
-                       } else if ((frame_body[3] & BIT(3)) == BIT(3)) {
-                               tid = (frame_body[3] >> 4) & 0x0F;
-                               preorder_ctrl =  &psta->recvreorder_ctrl[tid];
-                               preorder_ctrl->enable = false;
-                               preorder_ctrl->indicate_seq = 0xffff;
-                       }
-                       /* todo: how to notify the host while receiving DELETE BA */
-                       break;
-               default:
-                       break;
+       if (!pmlmeinfo->HT_enable)
+               return _SUCCESS;
+       /* All union members start with an action code, it's ok to use addba_req. */
+       switch (mgmt->u.action.u.addba_req.action_code) {
+       case WLAN_ACTION_ADDBA_REQ:
+               memcpy(&pmlmeinfo->ADDBA_req, &frame_body[2], sizeof(struct ADDBA_request));
+               tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_req.capab),
+                                  IEEE80211_ADDBA_PARAM_TID_MASK);
+               preorder_ctrl = &psta->recvreorder_ctrl[tid];
+               preorder_ctrl->indicate_seq = 0xffff;
+               preorder_ctrl->enable = pmlmeinfo->bAcceptAddbaReq;
+
+               issue_action_BA(padapter, mgmt->sa, WLAN_ACTION_ADDBA_RESP,
+                               pmlmeinfo->bAcceptAddbaReq ?
+                                       WLAN_STATUS_SUCCESS : WLAN_STATUS_REQUEST_DECLINED);
+               break;
+       case WLAN_ACTION_ADDBA_RESP:
+               tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.addba_resp.capab),
+                                  IEEE80211_ADDBA_PARAM_TID_MASK);
+               if (mgmt->u.action.u.addba_resp.status == 0) {  /* successful */
+                       psta->htpriv.agg_enable_bitmap |= BIT(tid);
+                       psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+               } else {
+                       psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+               }
+               break;
+       case WLAN_ACTION_DELBA:
+               tid = u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
+                                  IEEE80211_DELBA_PARAM_TID_MASK);
+               if (u16_get_bits(le16_to_cpu(mgmt->u.action.u.delba.params),
+                                IEEE80211_DELBA_PARAM_INITIATOR_MASK) == WLAN_BACK_RECIPIENT) {
+                       psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+                       psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+               } else {
+                       preorder_ctrl =  &psta->recvreorder_ctrl[tid];
+                       preorder_ctrl->enable = false;
+                       preorder_ctrl->indicate_seq = 0xffff;
                }
+               /* todo: how to notify the host while receiving DELETE BA */
+               break;
+       default:
+               break;
        }
+
        return _SUCCESS;
 }
 
@@ -1645,7 +1546,7 @@ static int get_reg_classes_full_count(struct p2p_channels *channel_list)
 
 void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
 {
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8 action = P2P_PUB_ACTION_ACTION;
        __be32 p2poui = cpu_to_be32(P2POUI);
        u8 oui_subtype = P2P_GO_NEGO_REQ;
@@ -1655,7 +1556,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
        struct xmit_frame *pmgntframe;
        struct pkt_attrib *pattrib;
        unsigned char *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -1672,9 +1573,9 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -1685,8 +1586,8 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -1975,7 +1876,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
 
 static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame_body, uint len, u8 result)
 {
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8 action = P2P_PUB_ACTION_ACTION;
        __be32                  p2poui = cpu_to_be32(P2POUI);
        u8 oui_subtype = P2P_GO_NEGO_RESP;
@@ -1990,7 +1891,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -2007,9 +1908,9 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2020,8 +1921,8 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2337,7 +2238,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
 
 static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
 {
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8 action = P2P_PUB_ACTION_ACTION;
        __be32                  p2poui = cpu_to_be32(P2POUI);
        u8 oui_subtype = P2P_GO_NEGO_CONF;
@@ -2347,7 +2248,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -2364,9 +2265,9 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2377,8 +2278,8 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2498,7 +2399,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
 
 void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
 {
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8 action = P2P_PUB_ACTION_ACTION;
        __be32                  p2poui = cpu_to_be32(P2POUI);
        u8 oui_subtype = P2P_INVIT_REQ;
@@ -2509,7 +2410,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -2526,9 +2427,9 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2539,8 +2440,8 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2745,7 +2646,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
 
 void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialogToken, u8 status_code)
 {
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8 action = P2P_PUB_ACTION_ACTION;
        __be32                  p2poui = cpu_to_be32(P2POUI);
        u8 oui_subtype = P2P_INVIT_RESP;
@@ -2755,7 +2656,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -2772,9 +2673,9 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -2785,8 +2686,8 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -2935,7 +2836,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
 
 void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
 {
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8 action = P2P_PUB_ACTION_ACTION;
        u8 dialogToken = 1;
        u8 oui_subtype = P2P_PROVISION_DISC_REQ;
@@ -2946,7 +2847,7 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -2963,9 +2864,9 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, pdev_raddr, ETH_ALEN);
@@ -2976,8 +2877,8 @@ void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidle
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -3045,7 +2946,7 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        unsigned char                                   *mac;
        struct xmit_priv        *pxmitpriv = &padapter->xmitpriv;
@@ -3067,11 +2968,11 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
        mac = myid(&padapter->eeprompriv);
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
        memcpy(pwlanhdr->addr1, da, ETH_ALEN);
        memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -3083,7 +2984,7 @@ void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(fctrl, WIFI_PROBERSP);
 
-       pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
        pattrib->pktlen = pattrib->hdrlen;
        pframe += pattrib->hdrlen;
 
@@ -3291,7 +3192,7 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
        struct xmit_frame               *pmgntframe;
        struct pkt_attrib               *pattrib;
        unsigned char                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        unsigned char                   *mac;
        struct xmit_priv                *pxmitpriv = &padapter->xmitpriv;
@@ -3312,11 +3213,11 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
        mac = myid(&padapter->eeprompriv);
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        if (da) {
@@ -3339,8 +3240,8 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_PROBEREQ);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ))
                pframe = rtw_set_ie(pframe, _SSID_IE_, pwdinfo->tx_prov_disc_info.ssid.SsidLength, pwdinfo->tx_prov_disc_info.ssid.Ssid, &pattrib->pktlen);
@@ -3614,7 +3515,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
        u8      result = P2P_STATUS_SUCCESS;
        u8      empty_addr[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
-       frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+       frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
 
        dialogToken = frame_body[7];
 
@@ -3626,7 +3527,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
        if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
                return _SUCCESS;
 
-       len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+       len -= sizeof(struct ieee80211_hdr_3addr);
 
        switch (frame_body[6]) { /* OUI Subtype */
        case P2P_GO_NEGO_REQ:
@@ -3668,7 +3569,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
                        pwdinfo->nego_req_info.benable = false;
                        result = process_p2p_group_negotation_resp(pwdinfo, frame_body, len);
                        issue_p2p_GO_confirm(pwdinfo->padapter, GetAddr2Ptr(pframe), result);
-                       if (P2P_STATUS_SUCCESS == result) {
+                       if (result == P2P_STATUS_SUCCESS) {
                                if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
                                        pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
                                        pwdinfo->p2p_info.scan_op_ch_only = 1;
@@ -3683,7 +3584,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
                break;
        case P2P_GO_NEGO_CONF:
                result = process_p2p_group_negotation_confirm(pwdinfo, frame_body, len);
-               if (P2P_STATUS_SUCCESS == result) {
+               if (result == P2P_STATUS_SUCCESS) {
                        if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
                                pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
                                pwdinfo->p2p_info.scan_op_ch_only = 1;
@@ -3867,7 +3768,7 @@ static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
 {
        unsigned int ret = _FAIL;
        u8 *pframe = precv_frame->rx_data;
-       u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+       u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
 
        if (!memcmp(frame_body + 2, P2P_OUI, 4)) {
                ret = on_action_public_p2p(precv_frame);
@@ -3880,7 +3781,7 @@ static unsigned int on_action_public_default(struct recv_frame *precv_frame)
 {
        unsigned int ret = _FAIL;
        u8 *pframe = precv_frame->rx_data;
-       u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+       u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
        u8 token;
 
        token = frame_body[2];
@@ -3898,7 +3799,7 @@ unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv
 {
        unsigned int ret = _FAIL;
        u8 *pframe = precv_frame->rx_data;
-       u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+       u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
        u8 category, action;
 
        /* check RA matches or not */
@@ -3906,7 +3807,7 @@ unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv
                goto exit;
 
        category = frame_body[0];
-       if (category != RTW_WLAN_CATEGORY_PUBLIC)
+       if (category != WLAN_CATEGORY_PUBLIC)
                goto exit;
 
        action = frame_body[1];
@@ -3923,16 +3824,6 @@ exit:
        return ret;
 }
 
-unsigned int OnAction_ht(struct adapter *padapter, struct recv_frame *precv_frame)
-{
-       return _SUCCESS;
-}
-
-unsigned int OnAction_wmm(struct adapter *padapter, struct recv_frame *precv_frame)
-{
-       return _SUCCESS;
-}
-
 unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
 {
        u8 *frame_body;
@@ -3945,7 +3836,7 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
        if (memcmp(myid(&padapter->eeprompriv), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
                return _SUCCESS;
 
-       frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+       frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
 
        category = frame_body[0];
        if (category != RTW_WLAN_CATEGORY_P2P)
@@ -3954,7 +3845,7 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
        if (be32_to_cpu(*((__be32 *)(frame_body + 1))) != P2POUI)
                return _SUCCESS;
 
-       len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+       len -= sizeof(struct ieee80211_hdr_3addr);
        OUI_Subtype = frame_body[5];
 
        switch (OUI_Subtype) {
@@ -3975,29 +3866,22 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
 
 unsigned int OnAction(struct adapter *padapter, struct recv_frame *precv_frame)
 {
-       int i;
-       unsigned char   category;
-       struct action_handler *ptable;
-       unsigned char   *frame_body;
-       u8 *pframe = precv_frame->rx_data;
-
-       frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
 
-       category = frame_body[0];
-
-       for (i = 0; i < sizeof(OnAction_tbl) / sizeof(struct action_handler); i++) {
-               ptable = &OnAction_tbl[i];
-               if (category == ptable->num)
-                       ptable->func(padapter, precv_frame);
+       switch (mgmt->u.action.category) {
+       case WLAN_CATEGORY_BACK:
+               OnAction_back(padapter, precv_frame);
+               break;
+       case WLAN_CATEGORY_PUBLIC:
+               on_action_public(padapter, precv_frame);
+               break;
+       case RTW_WLAN_CATEGORY_P2P:
+               OnAction_p2p(padapter, precv_frame);
+               break;
        }
        return _SUCCESS;
 }
 
-unsigned int DoReserved(struct adapter *padapter, struct recv_frame *precv_frame)
-{
-       return _SUCCESS;
-}
-
 struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
 {
        struct xmit_frame                       *pmgntframe;
@@ -4154,7 +4038,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
        struct xmit_frame       *pmgntframe;
        struct pkt_attrib       *pattrib;
        unsigned char   *pframe;
-       struct rtw_ieee80211_hdr *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        unsigned int    rate_len;
        struct xmit_priv        *pxmitpriv = &padapter->xmitpriv;
@@ -4177,9 +4061,9 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        eth_broadcast_addr(pwlanhdr->addr1);
@@ -4190,8 +4074,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
        /* pmlmeext->mgnt_seq++; */
        SetFrameSubType(pframe, WIFI_BEACON);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
                /*  for P2P : Primary Device Type & Device Name */
@@ -4274,8 +4158,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
                        u8 *wps_ie;
                        uint wps_ielen;
                        u8 sr = 0;
-                       wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct rtw_ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
-                               pattrib->pktlen - sizeof(struct rtw_ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
+                       wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
+                               pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
                        if (wps_ie && wps_ielen > 0)
                                rtw_get_wps_attr_content(wps_ie,  wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
                        if (sr != 0)
@@ -4362,7 +4246,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        unsigned char                                   *mac, *bssid;
        struct xmit_priv        *pxmitpriv = &padapter->xmitpriv;
@@ -4386,12 +4270,12 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
        mac = myid(&padapter->eeprompriv);
        bssid = cur_network->MacAddress;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
        memcpy(pwlanhdr->addr1, da, ETH_ALEN);
        memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -4401,7 +4285,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
        pmlmeext->mgnt_seq++;
        SetFrameSubType(fctrl, WIFI_PROBERSP);
 
-       pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
        pattrib->pktlen = pattrib->hdrlen;
        pframe += pattrib->hdrlen;
 
@@ -4511,7 +4395,7 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
        struct xmit_frame               *pmgntframe;
        struct pkt_attrib               *pattrib;
        unsigned char                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        unsigned char                   *mac;
        unsigned char                   bssrate[NumRates];
@@ -4531,11 +4415,11 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
        mac = myid(&padapter->eeprompriv);
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        if (da) {
@@ -4554,8 +4438,8 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_PROBEREQ);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        if (pssid)
                pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &pattrib->pktlen);
@@ -4629,7 +4513,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
        struct xmit_frame *pmgntframe;
        struct pkt_attrib *pattrib;
        unsigned char *pframe;
-       struct rtw_ieee80211_hdr *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        unsigned int val32;
        u16 val16;
@@ -4650,17 +4534,17 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_AUTH);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        if (psta) {/*  for AP mode */
                memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
@@ -4734,7 +4618,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
 
                        SetPrivacy(fctrl);
 
-                       pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+                       pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
 
                        pattrib->encrypt = _WEP40_;
 
@@ -4753,7 +4637,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
 void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
 {
        struct xmit_frame       *pmgntframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        struct pkt_attrib *pattrib;
        unsigned char   *pbuf, *pframe;
        unsigned short val;
@@ -4778,9 +4662,9 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
@@ -4794,7 +4678,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
        else
                return;
 
-       pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
        pattrib->pktlen += pattrib->hdrlen;
        pframe += pattrib->hdrlen;
 
@@ -4884,7 +4768,7 @@ void issue_assocreq(struct adapter *padapter)
        struct xmit_frame       *pmgntframe;
        struct pkt_attrib       *pattrib;
        unsigned char           *pframe, *p;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        __le16          le_tmp;
        unsigned int    i, j, ie_len, index = 0;
@@ -4910,9 +4794,9 @@ void issue_assocreq(struct adapter *padapter)
 
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
        memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
        memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
@@ -4922,8 +4806,8 @@ void issue_assocreq(struct adapter *padapter)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ASSOCREQ);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        /* caps */
 
@@ -5184,7 +5068,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv        *pxmitpriv;
        struct mlme_ext_priv    *pmlmeext;
@@ -5209,9 +5093,9 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
@@ -5230,8 +5114,8 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_DATA_NULL);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pattrib->last_txcmdsz = pattrib->pktlen;
 
@@ -5286,7 +5170,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        unsigned short *qc;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
@@ -5310,9 +5194,9 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)
@@ -5336,8 +5220,8 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr_qos);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+       pframe += sizeof(struct ieee80211_qos_hdr);
+       pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
 
        pattrib->last_txcmdsz = pattrib->pktlen;
 
@@ -5390,7 +5274,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -5416,9 +5300,9 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -5429,8 +5313,8 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_DEAUTH);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        le_tmp = cpu_to_le16(reason);
        pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_, (unsigned char *)&le_tmp, &pattrib->pktlen);
@@ -5481,7 +5365,7 @@ exit:
 
 void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
 {
-       u8 category = RTW_WLAN_CATEGORY_BACK;
+       u8 category = WLAN_CATEGORY_BACK;
        u16 start_seq;
        u16 BA_para_set;
        u16 reason_code;
@@ -5491,7 +5375,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
        struct xmit_frame *pmgntframe;
        struct pkt_attrib *pattrib;
        u8 *pframe;
-       struct rtw_ieee80211_hdr *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -5511,9 +5395,9 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
@@ -5525,8 +5409,8 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &(category), &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &(action), &pattrib->pktlen);
@@ -5599,7 +5483,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                           *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct  wlan_network    *pnetwork = NULL;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
@@ -5615,7 +5499,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
        if (pmlmeinfo->bwmode_updated)
                return;
 
-       category = RTW_WLAN_CATEGORY_PUBLIC;
+       category = WLAN_CATEGORY_PUBLIC;
        action = ACT_PUBLIC_BSSCOEXIST;
 
        pmgntframe = alloc_mgtxmitframe(pxmitpriv);
@@ -5629,9 +5513,9 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
@@ -5642,8 +5526,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -5759,32 +5643,38 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
 
 unsigned int send_beacon(struct adapter *padapter)
 {
-       u8 bxmitok = false;
+       bool bxmitok = false;
        int     issue = 0;
        int poll = 0;
 
-       u32 start = jiffies;
+       clear_beacon_valid_bit(padapter);
 
-       SetHwReg8188EU(padapter, HW_VAR_BCN_VALID, NULL);
        do {
                issue_beacon(padapter, 100);
                issue++;
                do {
                        yield();
-                       GetHwReg8188EU(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+                       bxmitok = get_beacon_valid_bit(padapter);
                        poll++;
                } while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
        } while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
 
-       if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
-               return _FAIL;
-       if (!bxmitok) {
+       if (padapter->bSurpriseRemoved || padapter->bDriverStopped || !bxmitok)
                return _FAIL;
-       } else {
-               rtw_get_passing_time_ms(start);
 
-               return _SUCCESS;
-       }
+       return _SUCCESS;
+}
+
+bool get_beacon_valid_bit(struct adapter *adapter)
+{
+       /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
+       return BIT(0) & rtw_read8(adapter, REG_TDECTRL + 2);
+}
+
+void clear_beacon_valid_bit(struct adapter *adapter)
+{
+       /* BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
+       rtw_write8(adapter, REG_TDECTRL + 2, rtw_read8(adapter, REG_TDECTRL + 2) | BIT(0));
 }
 
 /****************************************************************************
@@ -5793,13 +5683,27 @@ Following are some utitity fuctions for WiFi MLME
 
 *****************************************************************************/
 
+static void rtw_set_initial_gain(struct adapter *adapter, u8 gain)
+{
+       struct hal_data_8188e *haldata = &adapter->haldata;
+       struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+       struct rtw_dig *digtable = &odmpriv->DM_DigTable;
+
+       if (gain == 0xff) {
+               /* restore rx gain */
+               ODM_Write_DIG(odmpriv, digtable->BackupIGValue);
+       } else {
+               digtable->BackupIGValue = digtable->CurIGValue;
+               ODM_Write_DIG(odmpriv, gain);
+       }
+}
+
 void site_survey(struct adapter *padapter)
 {
        unsigned char           survey_channel = 0, val8;
        enum rt_scan_type ScanType = SCAN_PASSIVE;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
-       u32 initialgain = 0;
        struct wifidirect_info *pwdinfo = &padapter->wdinfo;
 
        if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
@@ -5877,8 +5781,8 @@ void site_survey(struct adapter *padapter)
                        rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_LISTEN);
                        pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
 
-                       initialgain = 0xff; /* restore RX GAIN */
-                       SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+                       /* restore RX GAIN */
+                       rtw_set_initial_gain(padapter, 0xff);
                        /* turn on dynamic functions */
                        Restore_DM_Func_Flag(padapter);
                        /* Switch_DM_Func(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
@@ -5911,8 +5815,8 @@ void site_survey(struct adapter *padapter)
                        /* config MSR */
                        Set_MSR(padapter, (pmlmeinfo->state & 0x3));
 
-                       initialgain = 0xff; /* restore RX GAIN */
-                       SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+                       /* restore RX GAIN */
+                       rtw_set_initial_gain(padapter, 0xff);
                        /* turn on dynamic functions */
                        Restore_DM_Func_Flag(padapter);
                        /* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
@@ -5950,7 +5854,7 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        __le32 le32_tmp;
 
-       len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+       len = packet_len - sizeof(struct ieee80211_hdr_3addr);
 
        if (len > MAX_IE_SZ)
                return _FAIL;
@@ -5980,13 +5884,13 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
 
        /* below is to copy the information element */
        bssid->IELength = len;
-       memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+       memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
 
        /* get the signal strength */
        bssid->Rssi = precv_frame->attrib.phy_info.recvpower; /*  in dBM.raw data */
        bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
        bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
-       GetHalDefVar8188EUsb(padapter, HAL_DEF_CURRENT_ANTENNA,  &bssid->PhyInfo.Optimum_antenna);
+       bssid->PhyInfo.Optimum_antenna = rtw_current_antenna(padapter);
 
        /*  checking SSID */
        p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
@@ -6087,10 +5991,58 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
        return _SUCCESS;
 }
 
+static void rtw_set_bssid(struct adapter *adapter, u8 *bssid)
+{
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               rtw_write8(adapter, REG_BSSID + i, bssid[i]);
+}
+
+static void mlme_join(struct adapter *adapter, int type)
+{
+       struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+       u8 retry_limit = 0x30;
+
+       switch (type) {
+       case 0:
+               /* prepare to join */
+               /* enable to rx data frame, accept all data frame */
+               rtw_write16(adapter, REG_RXFLTMAP2, 0xFFFF);
+
+               rtw_write32(adapter, REG_RCR,
+                           rtw_read32(adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+
+               if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) {
+                       retry_limit = 48;
+               } else {
+                       /* ad-hoc mode */
+                       retry_limit = 0x7;
+               }
+               break;
+       case 1:
+               /* joinbss_event call back when join res < 0 */
+               rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
+               break;
+       case 2:
+               /* sta add event call back */
+               /* enable update TSF */
+               rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) & (~BIT(4)));
+
+               if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
+                       retry_limit = 0x7;
+               break;
+       default:
+               break;
+       }
+
+       rtw_write16(adapter, REG_RL,
+                   retry_limit << RETRY_LIMIT_SHORT_SHIFT | retry_limit << RETRY_LIMIT_LONG_SHIFT);
+}
+
 void start_create_ibss(struct adapter *padapter)
 {
        unsigned short  caps;
-       u8 join_type;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
@@ -6121,9 +6073,8 @@ void start_create_ibss(struct adapter *padapter)
                        report_join_res(padapter, -1);
                        pmlmeinfo->state = WIFI_FW_NULL_STATE;
                } else {
-                       SetHwReg8188EU(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
-                       join_type = 0;
-                       SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+                       rtw_set_bssid(padapter, padapter->registrypriv.dev_network.MacAddress);
+                       mlme_join(padapter, 0);
 
                        report_join_res(padapter, 1);
                        pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
@@ -6421,7 +6372,7 @@ void report_survey_event(struct adapter *padapter, struct recv_frame *precv_fram
        pmlmeext = &padapter->mlmeextpriv;
        pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
        if (!pcmd_obj)
                return;
 
@@ -6471,7 +6422,7 @@ void report_surveydone_event(struct adapter *padapter)
        struct mlme_ext_priv            *pmlmeext = &padapter->mlmeextpriv;
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
        if (!pcmd_obj)
                return;
 
@@ -6513,7 +6464,7 @@ void report_join_res(struct adapter *padapter, int res)
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
        if (!pcmd_obj)
                return;
 
@@ -6610,7 +6561,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
        struct mlme_ext_priv            *pmlmeext = &padapter->mlmeextpriv;
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_KERNEL);
        if (!pcmd_obj)
                return;
 
@@ -6696,13 +6647,11 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
        struct sta_priv         *pstapriv = &padapter->stapriv;
-       u8 join_type;
        u16 media_status;
 
        if (join_res < 0) {
-               join_type = 1;
-               SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
-               SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+               mlme_join(padapter, 1);
+               rtw_set_bssid(padapter, null_addr);
 
                /* restore to initial setting. */
                update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -6721,7 +6670,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
        }
 
        /* turn on dynamic functions */
-       Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+       SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_RESET, NULL);
 
        /*  update IOT-releated issue */
        update_IOT_info(padapter);
@@ -6750,13 +6699,13 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
 
                /* set per sta rate after updating HT cap. */
                set_sta_rate(padapter, psta);
-               SetHwReg8188EU(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
+               rtw_set_max_rpt_macid(padapter, psta->mac_id);
+
                media_status = (psta->mac_id << 8) | 1; /*   MACID|OPMODE: 1 means connect */
                SetHwReg8188EU(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
        }
 
-       join_type = 2;
-       SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+       mlme_join(padapter, 2);
 
        if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
                /*  correcting TSF */
@@ -6769,7 +6718,6 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
 {
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
-       u8 join_type;
 
        if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
                if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {/* adhoc master or sta_count>1 */
@@ -6786,9 +6734,7 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
                        }
                        pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
                }
-
-               join_type = 2;
-               SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+               mlme_join(padapter, 2);
        }
 
        pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
@@ -6800,14 +6746,27 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
        update_sta_info(padapter, psta);
 }
 
+static void mlme_disconnect(struct adapter *adapter)
+{
+       /* Set RCR to not to receive data frame when NO LINK state */
+       /* reject all data frames */
+       rtw_write16(adapter, REG_RXFLTMAP2, 0x00);
+
+       /* reset TSF */
+       rtw_write8(adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+
+       /* disable update TSF */
+       rtw_write8(adapter, REG_BCN_CTRL, rtw_read8(adapter, REG_BCN_CTRL) | BIT(4));
+}
+
 void mlmeext_sta_del_event_callback(struct adapter *padapter)
 {
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
 
        if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter)) {
-               SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
-               SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+               mlme_disconnect(padapter);
+               rtw_set_bssid(padapter, null_addr);
 
                /* restore to initial setting. */
                update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -6951,7 +6910,7 @@ void linked_status_chk(struct adapter *padapter)
                        if (pmlmeinfo->FW_sta_info[i].status == 1) {
                                psta = pmlmeinfo->FW_sta_info[i].psta;
 
-                               if (NULL == psta)
+                               if (psta == NULL)
                                        continue;
                                if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
                                        if (pmlmeinfo->FW_sta_info[i].retry < 3) {
@@ -6996,11 +6955,11 @@ void survey_timer_hdl(struct adapter *padapter)
                        pmlmeext->scan_abort = false;/* reset */
                }
 
-               ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+               ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
                if (!ph2c)
                        goto exit_survey_timer_hdl;
 
-               psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
+               psurveyPara = kzalloc(sizeof(*psurveyPara), GFP_ATOMIC);
                if (!psurveyPara) {
                        kfree(ph2c);
                        goto exit_survey_timer_hdl;
@@ -7122,7 +7081,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
 
                /* disable dynamic functions, such as high power, DIG */
                Save_DM_Func_Flag(padapter);
-               Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+               SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
 
                /* cancel link timer */
                _cancel_timer_ex(&pmlmeext->link_timer);
@@ -7146,7 +7105,6 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
 
 u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
 {
-       u8 join_type;
        struct ndis_802_11_var_ie *pIE;
        struct registry_priv    *pregpriv = &padapter->registrypriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -7170,7 +7128,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
                /* set MSR to nolink -> infra. mode */
                Set_MSR(padapter, _HW_STATE_STATION_);
 
-               SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+               mlme_disconnect(padapter);
        }
 
        rtw_antenna_select_cmd(padapter, pparm->network.PhyInfo.Optimum_antenna, false);
@@ -7243,9 +7201,8 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
 
        /* config the initial gain under linking, need to write the BB registers */
 
-       SetHwReg8188EU(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
-       join_type = 0;
-       SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+       rtw_set_bssid(padapter, pmlmeinfo->network.MacAddress);
+       mlme_join(padapter, 0);
 
        /* cancel link timer */
        _cancel_timer_ex(&pmlmeext->link_timer);
@@ -7266,8 +7223,8 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
        if (is_client_associated_to_ap(padapter))
                issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
 
-       SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
-       SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
+       mlme_disconnect(padapter);
+       rtw_set_bssid(padapter, null_addr);
 
        /* restore to initial setting. */
        update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -7346,7 +7303,6 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
        struct sitesurvey_parm  *pparm = (struct sitesurvey_parm *)pbuf;
        u8 bdelayscan = false;
        u8 val8;
-       u32     initialgain;
        u32     i;
        struct wifidirect_info *pwdinfo = &padapter->wdinfo;
 
@@ -7391,15 +7347,14 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
        if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
                /* disable dynamic functions, such as high power, DIG */
                Save_DM_Func_Flag(padapter);
-               Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+               SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, NULL);
 
                /* config the initial gain under scanning, need to write the BB registers */
                if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
-                       initialgain = 0x1E;
+                       rtw_set_initial_gain(padapter, 0x1e);
                else
-                       initialgain = 0x28;
+                       rtw_set_initial_gain(padapter, 0x28);
 
-               SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
 
                /* set MSR to no link state */
                Set_MSR(padapter, _HW_STATE_NOLINK_);
@@ -7538,13 +7493,13 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
        u8 res = _SUCCESS;
        int len_diff = 0;
 
-       ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
+       ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
        if (!ph2c) {
                res = _FAIL;
                goto exit;
        }
 
-       ptxBeacon_parm = kzalloc(sizeof(struct Tx_Beacon_param), GFP_ATOMIC);
+       ptxBeacon_parm = kzalloc(sizeof(*ptxBeacon_parm), GFP_ATOMIC);
        if (!ptxBeacon_parm) {
                kfree(ph2c);
                res = _FAIL;
index 48500fb..beffe5b 100644 (file)
@@ -111,7 +111,7 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct adapter *padapter = pwdinfo->padapter;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
@@ -132,9 +132,9 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -145,8 +145,8 @@ static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        /* Build P2P action frame header */
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -166,12 +166,12 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct adapter *padapter = pwdinfo->padapter;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8                      action = P2P_PUB_ACTION_ACTION;
        __be32                  p2poui = cpu_to_be32(P2POUI);
        u8                      oui_subtype = P2P_DEVDISC_RESP;
@@ -189,9 +189,9 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -202,8 +202,8 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        /* Build P2P public action frame header */
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -233,7 +233,7 @@ static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 s
 static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr, u8 *frame_body, u16 config_method)
 {
        struct adapter *padapter = pwdinfo->padapter;
-       unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+       unsigned char category = WLAN_CATEGORY_PUBLIC;
        u8                      action = P2P_PUB_ACTION_ACTION;
        u8                      dialogToken = frame_body[7];    /*      The Dialog Token of provisioning discovery request frame. */
        __be32                  p2poui = cpu_to_be32(P2POUI);
@@ -243,7 +243,7 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
@@ -259,9 +259,9 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
@@ -272,8 +272,8 @@ static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr,
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
        pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
@@ -311,7 +311,7 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
        struct xmit_frame                       *pmgntframe;
        struct pkt_attrib                       *pattrib;
        unsigned char                                   *pframe;
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        struct adapter *padapter = pwdinfo->padapter;
        struct xmit_priv                        *pxmitpriv = &padapter->xmitpriv;
@@ -334,9 +334,9 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
        memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
 
        pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        memcpy(pwlanhdr->addr1, da, ETH_ALEN);
@@ -347,8 +347,8 @@ static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8
        pmlmeext->mgnt_seq++;
        SetFrameSubType(pframe, WIFI_ACTION);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        /* Build P2P action frame header */
        pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
@@ -872,7 +872,7 @@ u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint l
                                }
 
                                psta->dev_name_len = 0;
-                               if (WPS_ATTR_DEVICE_NAME == be16_to_cpu(*(__be16 *)pattr_content)) {
+                               if (be16_to_cpu(*(__be16 *)pattr_content) == WPS_ATTR_DEVICE_NAME) {
                                        dev_name_len = be16_to_cpu(*(__be16 *)(pattr_content + 2));
 
                                        psta->dev_name_len = (sizeof(psta->dev_name) < dev_name_len) ? sizeof(psta->dev_name) : dev_name_len;
@@ -900,7 +900,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
        u8 *p2p_ie;
        u32     p2p_ielen = 0;
 
-       frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+       frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
 
        dialogToken = frame_body[7];
        status = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
@@ -951,7 +951,7 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
        /* issue Device Discoverability Response */
        issue_p2p_devdisc_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
 
-       return (status == P2P_STATUS_SUCCESS) ? true : false;
+       return status == P2P_STATUS_SUCCESS;
 }
 
 u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
@@ -967,7 +967,7 @@ u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo,  u8 *pframe, uint l
        u16     uconfig_method = 0;
        __be16 be_tmp;
 
-       frame_body = (pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+       frame_body = (pframe + sizeof(struct ieee80211_hdr_3addr));
 
        wpsie = rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
        if (wpsie) {
@@ -1213,7 +1213,7 @@ u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo, u8 *pframe
                                if (attr_content == P2P_STATUS_SUCCESS) {
                                        /*      Do nothing. */
                                } else {
-                                       if (P2P_STATUS_FAIL_INFO_UNAVAILABLE == attr_content) {
+                                       if (attr_content == P2P_STATUS_FAIL_INFO_UNAVAILABLE) {
                                                rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INFOR_NOREADY);
                                        } else {
                                                rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
@@ -1401,7 +1401,7 @@ u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
        u8 dialogToken = 0;
        u8 status = P2P_STATUS_SUCCESS;
 
-       frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+       frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
 
        dialogToken = frame_body[6];
 
@@ -1602,7 +1602,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
        case P2P_PS_DISABLE:
                pwdinfo->p2p_ps_state = p2p_ps_state;
 
-               SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+               rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
 
                pwdinfo->noa_index = 0;
                pwdinfo->ctwindow = 0;
@@ -1612,7 +1612,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
                if (padapter->pwrctrlpriv.bFwCurrentInPSMode) {
                        if (pwrpriv->smart_ps == 0) {
                                pwrpriv->smart_ps = 2;
-                               SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+                               rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
                        }
                }
                break;
@@ -1623,10 +1623,10 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
                        if (pwdinfo->ctwindow > 0) {
                                if (pwrpriv->smart_ps != 0) {
                                        pwrpriv->smart_ps = 0;
-                                       SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+                                       rtw_set_firmware_ps_mode(padapter, pwrpriv->pwr_mode);
                                }
                        }
-                       SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+                       rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
                }
                break;
        case P2P_PS_SCAN:
@@ -1634,7 +1634,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
        case P2P_PS_ALLSTASLEEP:
                if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
                        pwdinfo->p2p_ps_state = p2p_ps_state;
-                       SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+                       rtl8188e_set_p2p_ps_offload_cmd(padapter, p2p_ps_state);
                }
                break;
        default:
@@ -1891,7 +1891,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
 
        if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
                /* leave IPS/Autosuspend */
-               if (_FAIL == rtw_pwr_wakeup(padapter)) {
+               if (rtw_pwr_wakeup(padapter) == _FAIL) {
                        ret = _FAIL;
                        goto exit;
                }
@@ -1905,7 +1905,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
                init_wifidirect_info(padapter, role);
 
        } else if (role == P2P_ROLE_DISABLE) {
-               if (_FAIL == rtw_pwr_wakeup(padapter)) {
+               if (rtw_pwr_wakeup(padapter) == _FAIL) {
                        ret = _FAIL;
                        goto exit;
                }
index 7beabf8..7b816b8 100644 (file)
@@ -59,7 +59,7 @@ int ips_leave(struct adapter *padapter)
                        pwrpriv->rf_pwrstate = rf_on;
                }
 
-               if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
+               if ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_)) {
                        set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
                        for (keyid = 0; keyid < 4; keyid++) {
                                if (pmlmepriv->key_mask & BIT(keyid)) {
@@ -133,9 +133,8 @@ void rtw_ps_processor(struct adapter *padapter)
        if (!rtw_pwr_unassociated_idle(padapter))
                goto exit;
 
-       if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts % 4) == 0)) {
+       if (pwrpriv->rf_pwrstate == rf_on) {
                pwrpriv->change_rfpwrstate = rf_off;
-
                ips_enter(padapter);
        }
 exit:
@@ -177,6 +176,19 @@ static bool PS_RDY_CHECK(struct adapter *padapter)
        return true;
 }
 
+void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode)
+{
+       struct hal_data_8188e *haldata = &adapter->haldata;
+       struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+       /* Force leave RF low power mode for 1T1R to prevent
+        * conflicting setting in firmware power saving sequence.
+        */
+       if (mode != PS_MODE_ACTIVE)
+               ODM_RF_Saving(odmpriv, true);
+       rtl8188e_set_FwPwrMode_cmd(adapter, mode);
+}
+
 void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
 {
        struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
@@ -186,7 +198,7 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
                return;
 
        if (pwrpriv->pwr_mode == ps_mode) {
-               if (PS_MODE_ACTIVE == ps_mode)
+               if (ps_mode == PS_MODE_ACTIVE)
                        return;
 
                if ((pwrpriv->smart_ps == smart_ps) &&
@@ -194,11 +206,10 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
                        return;
        }
 
-       /* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
        if (ps_mode == PS_MODE_ACTIVE) {
                if (pwdinfo->opp_ps == 0) {
                        pwrpriv->pwr_mode = ps_mode;
-                       SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+                       rtw_set_firmware_ps_mode(padapter, ps_mode);
                        pwrpriv->bFwCurrentInPSMode = false;
                }
        } else {
@@ -207,14 +218,28 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
                        pwrpriv->pwr_mode = ps_mode;
                        pwrpriv->smart_ps = smart_ps;
                        pwrpriv->bcn_ant_mode = bcn_ant_mode;
-                       SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+                       rtw_set_firmware_ps_mode(padapter, ps_mode);
 
                        /*  Set CTWindow after LPS */
                        if (pwdinfo->opp_ps == 1)
                                p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 0);
                }
        }
+}
 
+static bool lps_rf_on(struct adapter *adapter)
+{
+       /* When we halt NIC, we should check if FW LPS is leave. */
+       if (adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
+               /*  If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
+               /*  because Fw is unload. */
+               return true;
+       }
+
+       if (rtw_read32(adapter, REG_RCR) & 0x00070000)
+               return false;
+
+       return true;
 }
 
 /*
@@ -223,16 +248,13 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
  *     -1:     Timeout
  *     -2:     Other error
  */
-s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
 {
-       u32 start_time;
-       u8 bAwake = false;
+       unsigned long timeout = jiffies + msecs_to_jiffies(delay_ms);
        s32 err = 0;
 
-       start_time = jiffies;
        while (1) {
-               GetHwReg8188EU(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
-               if (bAwake)
+               if (lps_rf_on(padapter))
                        break;
 
                if (padapter->bSurpriseRemoved) {
@@ -240,7 +262,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
                        break;
                }
 
-               if (rtw_get_passing_time_ms(start_time) > delay_ms) {
+               if (time_after(jiffies, timeout)) {
                        err = -1;
                        break;
                }
@@ -329,13 +351,12 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
        pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
 
        pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
-       pwrctrlpriv->pwr_state_check_cnts = 0;
        pwrctrlpriv->bInSuspend = false;
        pwrctrlpriv->bkeepfwalive = false;
 
        pwrctrlpriv->LpsIdleCount = 0;
        pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/*  PS_MODE_MIN; */
-       pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+       pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
 
        pwrctrlpriv->bFwCurrentInPSMode = false;
 
@@ -346,58 +367,38 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
        timer_setup(&pwrctrlpriv->pwr_state_check_timer, pwr_state_check_handler, 0);
 }
 
-/*
-* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
-* @adapter: pointer to struct adapter structure
-* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
-* Return _SUCCESS or _FAIL
-*/
-
-int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller)
+/* Wake the NIC up from: 1)IPS 2)USB autosuspend */
+int rtw_pwr_wakeup(struct adapter *padapter)
 {
        struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+       unsigned long timeout = jiffies + msecs_to_jiffies(3000);
+       unsigned long deny_time;
        int ret = _SUCCESS;
-       u32 start = jiffies;
-
-       if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
-               pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
-
-       if (pwrpriv->ps_processing) {
-               while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
-                       msleep(10);
-       }
 
-       /* System suspend is not allowed to wakeup */
-       if (pwrpriv->bInSuspend) {
-               while (pwrpriv->bInSuspend &&
-                      (rtw_get_passing_time_ms(start) <= 3000 ||
-                      (rtw_get_passing_time_ms(start) <= 500)))
-                               msleep(10);
-       }
+       while (pwrpriv->ps_processing && time_before(jiffies, timeout))
+               msleep(10);
 
        /* I think this should be check in IPS, LPS, autosuspend functions... */
        if (check_fwstate(pmlmepriv, _FW_LINKED)) {
                ret = _SUCCESS;
                goto exit;
        }
-       if (rf_off == pwrpriv->rf_pwrstate) {
-               if (_FAIL ==  ips_leave(padapter)) {
-                       ret = _FAIL;
-                       goto exit;
-               }
+
+       if (pwrpriv->rf_pwrstate == rf_off && ips_leave(padapter) == _FAIL) {
+               ret = _FAIL;
+               goto exit;
        }
 
-       /* TODO: the following checking need to be merged... */
-       if (padapter->bDriverStopped || !padapter->bup ||
-           !padapter->hw_init_completed) {
-               ret = false;
+       if (padapter->bDriverStopped || !padapter->bup || !padapter->hw_init_completed) {
+               ret = _FAIL;
                goto exit;
        }
 
 exit:
-       if (pwrpriv->ips_deny_time < jiffies + rtw_ms_to_systime(ips_deffer_ms))
-               pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ips_deffer_ms);
+       deny_time = jiffies + msecs_to_jiffies(RTW_PWR_STATE_CHK_INTERVAL);
+       if (time_before(pwrpriv->ips_deny_time, deny_time))
+               pwrpriv->ips_deny_time = deny_time;
        return ret;
 }
 
@@ -408,12 +409,12 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
 
        if (mode < PS_MODE_NUM) {
                if (pwrctrlpriv->power_mgnt != mode) {
-                       if (PS_MODE_ACTIVE == mode)
+                       if (mode == PS_MODE_ACTIVE)
                                LeaveAllPowerSaveMode(padapter);
                        else
                                pwrctrlpriv->LpsIdleCount = 2;
                        pwrctrlpriv->power_mgnt = mode;
-                       pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+                       pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
                }
        } else {
                ret = -EINVAL;
@@ -431,7 +432,7 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
                return 0;
        } else if (mode == IPS_NONE) {
                rtw_ips_mode_req(pwrctrlpriv, mode);
-               if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+               if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL))
                        return -EFAULT;
        } else {
                return -EINVAL;
index 8800ea4..df51843 100644 (file)
@@ -71,7 +71,6 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
 
                list_add_tail(&precvframe->list, &precvpriv->free_recv_queue.queue);
 
-               precvframe->pkt_newalloc = NULL;
                precvframe->pkt = NULL;
 
                precvframe->len = 0;
@@ -81,8 +80,6 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
        }
        precvpriv->rx_pending_cnt = 1;
 
-       sema_init(&precvpriv->allrxreturnevt, 0);
-
        res = rtl8188eu_init_recv_priv(padapter);
 
        timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl, 0);
@@ -749,6 +746,7 @@ static int sta2ap_data_frame(struct adapter *adapter,
        struct  sta_priv *pstapriv = &adapter->stapriv;
        struct  mlme_priv *pmlmepriv = &adapter->mlmepriv;
        u8 *ptr = precv_frame->rx_data;
+       __le16 fc = *(__le16 *)ptr;
        unsigned char *mybssid  = get_bssid(pmlmepriv);
        int ret = _SUCCESS;
 
@@ -769,9 +767,8 @@ static int sta2ap_data_frame(struct adapter *adapter,
 
                process_pwrbit_data(adapter, precv_frame);
 
-               if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+               if (ieee80211_is_data_qos(fc))
                        process_wmmps_data(adapter, precv_frame);
-               }
 
                if (GetFrameSubType(ptr) & BIT(6)) {
                        /* No data, will not indicate to upper layer, temporily count it here */
@@ -795,143 +792,135 @@ exit:
        return ret;
 }
 
-static int validate_recv_ctrl_frame(struct adapter *padapter,
-                                   struct recv_frame *precv_frame)
+static void validate_recv_ctrl_frame(struct adapter *padapter,
+                                    struct recv_frame *precv_frame)
 {
        struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
        struct sta_priv *pstapriv = &padapter->stapriv;
-       u8 *pframe = precv_frame->rx_data;
-       /* uint len = precv_frame->len; */
-
-       if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
-               return _FAIL;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+       struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)hdr;
+       u8 wmmps_ac;
+       struct sta_info *psta;
 
        /* receive the frames that ra(a1) is my address */
-       if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
-               return _FAIL;
+       if (memcmp(hdr->addr1, myid(&padapter->eeprompriv), ETH_ALEN))
+               return;
 
        /* only handle ps-poll */
-       if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
-               u16 aid;
-               u8 wmmps_ac = 0;
-               struct sta_info *psta = NULL;
+       if (!ieee80211_is_pspoll(hdr->frame_control))
+               return;
 
-               aid = GetAid(pframe);
-               psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+       psta = rtw_get_stainfo(pstapriv, hdr->addr2);
+       if (!psta || psta->aid != (le16_to_cpu(pspoll->aid) & 0x3FFF))
+               return;
 
-               if (!psta || psta->aid != aid)
-                       return _FAIL;
+       /* for rx pkt statistics */
+       psta->sta_stats.rx_ctrl_pkts++;
 
-               /* for rx pkt statistics */
-               psta->sta_stats.rx_ctrl_pkts++;
+       switch (pattrib->priority) {
+       case 1:
+       case 2:
+               wmmps_ac = psta->uapsd_bk & BIT(0);
+               break;
+       case 4:
+       case 5:
+               wmmps_ac = psta->uapsd_vi & BIT(0);
+               break;
+       case 6:
+       case 7:
+               wmmps_ac = psta->uapsd_vo & BIT(0);
+               break;
+       case 0:
+       case 3:
+       default:
+               wmmps_ac = psta->uapsd_be & BIT(0);
+               break;
+       }
 
-               switch (pattrib->priority) {
-               case 1:
-               case 2:
-                       wmmps_ac = psta->uapsd_bk & BIT(0);
-                       break;
-               case 4:
-               case 5:
-                       wmmps_ac = psta->uapsd_vi & BIT(0);
-                       break;
-               case 6:
-               case 7:
-                       wmmps_ac = psta->uapsd_vo & BIT(0);
-                       break;
-               case 0:
-               case 3:
-               default:
-                       wmmps_ac = psta->uapsd_be & BIT(0);
-                       break;
-               }
+       if (wmmps_ac)
+               return;
 
-               if (wmmps_ac)
-                       return _FAIL;
+       if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+               psta->expire_to = pstapriv->expire_to;
+               psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+       }
 
-               if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
-                       psta->expire_to = pstapriv->expire_to;
-                       psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
-               }
+       if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & BIT(psta->aid))) {
+               struct list_head *xmitframe_plist, *xmitframe_phead;
+               struct xmit_frame *pxmitframe = NULL;
+               struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 
-               if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & BIT(psta->aid))) {
-                       struct list_head *xmitframe_plist, *xmitframe_phead;
-                       struct xmit_frame *pxmitframe = NULL;
-                       struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+               spin_lock_bh(&pxmitpriv->lock);
 
-                       spin_lock_bh(&pxmitpriv->lock);
+               xmitframe_phead = get_list_head(&psta->sleep_q);
+               xmitframe_plist = xmitframe_phead->next;
 
-                       xmitframe_phead = get_list_head(&psta->sleep_q);
-                       xmitframe_plist = xmitframe_phead->next;
+               if (xmitframe_phead != xmitframe_plist) {
+                       pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
 
-                       if (xmitframe_phead != xmitframe_plist) {
-                               pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
+                       xmitframe_plist = xmitframe_plist->next;
 
-                               xmitframe_plist = xmitframe_plist->next;
+                       list_del_init(&pxmitframe->list);
 
-                               list_del_init(&pxmitframe->list);
+                       psta->sleepq_len--;
 
-                               psta->sleepq_len--;
+                       if (psta->sleepq_len > 0)
+                               pxmitframe->attrib.mdata = 1;
+                       else
+                               pxmitframe->attrib.mdata = 0;
 
-                               if (psta->sleepq_len > 0)
-                                       pxmitframe->attrib.mdata = 1;
-                               else
-                                       pxmitframe->attrib.mdata = 0;
+                       pxmitframe->attrib.triggered = 1;
 
-                               pxmitframe->attrib.triggered = 1;
+                       if (psta->sleepq_len == 0) {
+                               pstapriv->tim_bitmap &= ~BIT(psta->aid);
 
-                               if (psta->sleepq_len == 0) {
-                                       pstapriv->tim_bitmap &= ~BIT(psta->aid);
+                               /* upate BCN for TIM IE */
+                               /* update_BCNTIM(padapter); */
+                               update_beacon(padapter, _TIM_IE_, NULL, false);
+                       }
+               } else {
+                       if (pstapriv->tim_bitmap & BIT(psta->aid)) {
+                               if (psta->sleepq_len == 0)
+                                       /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
+                                       issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
+                               else
+                                       psta->sleepq_len = 0;
 
-                                       /* upate BCN for TIM IE */
-                                       /* update_BCNTIM(padapter); */
-                                       update_beacon(padapter, _TIM_IE_, NULL, false);
-                               }
-                       } else {
-                               if (pstapriv->tim_bitmap & BIT(psta->aid)) {
-                                       if (psta->sleepq_len == 0)
-                                               /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
-                                               issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
-                                       else
-                                               psta->sleepq_len = 0;
-
-                                       pstapriv->tim_bitmap &= ~BIT(psta->aid);
-
-                                       /* upate BCN for TIM IE */
-                                       /* update_BCNTIM(padapter); */
-                                       update_beacon(padapter, _TIM_IE_, NULL, false);
-                               }
+                               pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+                               /* upate BCN for TIM IE */
+                               /* update_BCNTIM(padapter); */
+                               update_beacon(padapter, _TIM_IE_, NULL, false);
                        }
-                       spin_unlock_bh(&pxmitpriv->lock);
                }
+               spin_unlock_bh(&pxmitpriv->lock);
        }
-
-       return _FAIL;
 }
 
 struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_frame *precv_frame);
 
-static int validate_recv_mgnt_frame(struct adapter *padapter,
-                                   struct recv_frame *precv_frame)
+static void validate_recv_mgnt_frame(struct adapter *padapter,
+                                    struct recv_frame *precv_frame)
 {
        struct sta_info *psta;
+       struct ieee80211_hdr *hdr;
 
        precv_frame = recvframe_chk_defrag(padapter, precv_frame);
        if (!precv_frame)
-               return _SUCCESS;
+               return;
 
-       /* for rx pkt statistics */
-       psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->rx_data));
+       hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
+       psta = rtw_get_stainfo(&padapter->stapriv, hdr->addr2);
        if (psta) {
                psta->sta_stats.rx_mgnt_pkts++;
-               if (GetFrameSubType(precv_frame->rx_data) == WIFI_BEACON) {
+               if (ieee80211_is_beacon(hdr->frame_control))
                        psta->sta_stats.rx_beacon_pkts++;
-               } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBEREQ) {
+               else if (ieee80211_is_probe_req(hdr->frame_control))
                        psta->sta_stats.rx_probereq_pkts++;
-               } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBERSP) {
-                       if (!memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->rx_data), ETH_ALEN))
+               else if (ieee80211_is_probe_resp(hdr->frame_control)) {
+                       if (!memcmp(padapter->eeprompriv.mac_addr, hdr->addr1, ETH_ALEN))
                                psta->sta_stats.rx_probersp_pkts++;
-                       else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)) ||
-                                is_multicast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)))
+                       else if (is_broadcast_mac_addr(hdr->addr1) || is_multicast_mac_addr(hdr->addr1))
                                psta->sta_stats.rx_probersp_bm_pkts++;
                        else
                                psta->sta_stats.rx_probersp_uo_pkts++;
@@ -939,72 +928,44 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
        }
 
        mgt_dispatcher(padapter, precv_frame);
-
-       return _SUCCESS;
 }
 
 static int validate_recv_data_frame(struct adapter *adapter,
                                    struct recv_frame *precv_frame)
 {
-       u8 bretry;
-       u8 *psa, *pda, *pbssid;
        struct sta_info *psta = NULL;
        u8 *ptr = precv_frame->rx_data;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
        struct rx_pkt_attrib    *pattrib = &precv_frame->attrib;
        struct security_priv    *psecuritypriv = &adapter->securitypriv;
-       int ret = _SUCCESS;
-
-       bretry = GetRetry(ptr);
-       pda = get_da(ptr);
-       psa = get_sa(ptr);
-       pbssid = get_hdr_bssid(ptr);
+       int ret;
 
-       if (!pbssid) {
-               ret = _FAIL;
-               goto exit;
-       }
+       memcpy(pattrib->dst, ieee80211_get_DA(hdr), ETH_ALEN);
+       memcpy(pattrib->src, ieee80211_get_SA(hdr), ETH_ALEN);
 
-       memcpy(pattrib->dst, pda, ETH_ALEN);
-       memcpy(pattrib->src, psa, ETH_ALEN);
+       /* address4 is used only if both to_ds and from_ds are set */
+       if (ieee80211_has_a4(hdr->frame_control))
+               return _FAIL;
 
-       memcpy(pattrib->bssid, pbssid, ETH_ALEN);
+       memcpy(pattrib->ra, hdr->addr1, ETH_ALEN);
+       memcpy(pattrib->ta, hdr->addr2, ETH_ALEN);
 
-       switch (pattrib->to_fr_ds) {
-       case 0:
-               memcpy(pattrib->ra, pda, ETH_ALEN);
-               memcpy(pattrib->ta, psa, ETH_ALEN);
-               ret = sta2sta_data_frame(adapter, precv_frame, &psta);
-               break;
-       case 1:
-               memcpy(pattrib->ra, pda, ETH_ALEN);
-               memcpy(pattrib->ta, pbssid, ETH_ALEN);
+       if (ieee80211_has_fromds(hdr->frame_control)) {
+               memcpy(pattrib->bssid, hdr->addr2, ETH_ALEN);
                ret = ap2sta_data_frame(adapter, precv_frame, &psta);
-               break;
-       case 2:
-               memcpy(pattrib->ra, pbssid, ETH_ALEN);
-               memcpy(pattrib->ta, psa, ETH_ALEN);
+       } else if (ieee80211_has_tods(hdr->frame_control)) {
+               memcpy(pattrib->bssid, hdr->addr1, ETH_ALEN);
                ret = sta2ap_data_frame(adapter, precv_frame, &psta);
-               break;
-       case 3:
-               memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
-               memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
-               ret = _FAIL;
-               break;
-       default:
-               ret = _FAIL;
-               break;
+       } else {
+               memcpy(pattrib->bssid, hdr->addr3, ETH_ALEN);
+               ret = sta2sta_data_frame(adapter, precv_frame, &psta);
        }
 
-       if (ret == _FAIL) {
-               goto exit;
-       } else if (ret == RTW_RX_HANDLED) {
-               goto exit;
-       }
+       if (ret == _FAIL || ret == RTW_RX_HANDLED)
+               return ret;
 
-       if (!psta) {
-               ret = _FAIL;
-               goto exit;
-       }
+       if (!psta)
+               return _FAIL;
 
        /* psta->rssi = prxcmd->rssi; */
        /* psta->signal_quality = prxcmd->sq; */
@@ -1014,16 +975,16 @@ static int validate_recv_data_frame(struct adapter *adapter,
        pattrib->ack_policy = 0;
        /* parsing QC field */
        if (pattrib->qos) {
-               pattrib->priority = GetPriority((ptr + 24));
+               pattrib->priority = ieee80211_get_tid(hdr);
                pattrib->ack_policy = GetAckpolicy((ptr + 24));
                pattrib->amsdu = GetAMsdu((ptr + 24));
-               pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26;
+               pattrib->hdrlen = 26;
 
                if (pattrib->priority != 0 && pattrib->priority != 3)
                        adapter->recvpriv.bIsAnyNonBEPkts = true;
        } else {
                pattrib->priority = 0;
-               pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 30 : 24;
+               pattrib->hdrlen = 24;
        }
 
        if (pattrib->order)/* HT-CTRL 11n */
@@ -1032,10 +993,9 @@ static int validate_recv_data_frame(struct adapter *adapter,
        precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
 
        /*  decache, drop duplicate recv packets */
-       if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
-               ret = _FAIL;
-               goto exit;
-       }
+       if (recv_decache(precv_frame, ieee80211_has_retry(hdr->frame_control),
+                        &psta->sta_recvpriv.rxcache) == _FAIL)
+               return _FAIL;
 
        if (pattrib->privacy) {
                GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra));
@@ -1047,9 +1007,7 @@ static int validate_recv_data_frame(struct adapter *adapter,
                pattrib->icv_len = 0;
        }
 
-exit:
-
-       return ret;
+       return _SUCCESS;
 }
 
 static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv_frame)
@@ -1059,11 +1017,8 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
        /* then call check if rx seq/frag. duplicated. */
 
        int retval = _FAIL;
-       u8 bDumpRxPkt;
        struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
-       u8 *ptr = precv_frame->rx_data;
-       __le16 fc = *(__le16 *)ptr;
-       u8  ver = (unsigned char)(*ptr) & 0x3;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)precv_frame->rx_data;
        struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
 
        if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
@@ -1072,32 +1027,26 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
                        pmlmeext->channel_set[ch_set_idx].rx_count++;
        }
 
-       /* add version chk */
-       if (ver != 0)
+       if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_VERS)) != 0)
                return _FAIL;
 
-       pattrib->to_fr_ds = get_tofr_ds(ptr);
-
-       pattrib->frag_num = GetFragNum(ptr);
-       pattrib->seq_num = GetSequence(ptr);
+       pattrib->frag_num = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+       pattrib->seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
 
-       pattrib->pw_save = GetPwrMgt(ptr);
-       pattrib->mfrag = ieee80211_has_morefrags(fc);
-       pattrib->mdata = ieee80211_has_moredata(fc);
-       pattrib->privacy = ieee80211_has_protected(fc);
-       pattrib->order = ieee80211_has_order(fc);
-
-       /* Dump rx packets */
-       GetHalDefVar8188EUsb(adapter, HAL_DEF_DBG_DUMP_RXPKT, &bDumpRxPkt);
+       pattrib->pw_save = ieee80211_has_pm(hdr->frame_control);
+       pattrib->mfrag = ieee80211_has_morefrags(hdr->frame_control);
+       pattrib->mdata = ieee80211_has_moredata(hdr->frame_control);
+       pattrib->privacy = ieee80211_has_protected(hdr->frame_control);
+       pattrib->order = ieee80211_has_order(hdr->frame_control);
 
        /* We return _SUCCESS only for data frames. */
-       if (ieee80211_is_mgmt(fc))
+       if (ieee80211_is_mgmt(hdr->frame_control))
                validate_recv_mgnt_frame(adapter, precv_frame);
-       else if (ieee80211_is_ctl(fc))
+       else if (ieee80211_is_ctl(hdr->frame_control))
                validate_recv_ctrl_frame(adapter, precv_frame);
-       else if (ieee80211_is_data(fc)) {
+       else if (ieee80211_is_data(hdr->frame_control)) {
                rtw_led_control(adapter, LED_CTL_RX);
-               pattrib->qos = ieee80211_is_data_qos(fc);
+               pattrib->qos = ieee80211_is_data_qos(hdr->frame_control);
                retval = validate_recv_data_frame(adapter, precv_frame);
                if (retval == _FAIL) {
                        struct recv_priv *precvpriv = &adapter->recvpriv;
@@ -1284,8 +1233,9 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, struct recv_fr
        psta_addr = pfhdr->attrib.ta;
        psta = rtw_get_stainfo(pstapriv, psta_addr);
        if (!psta) {
-               u8 type = GetFrameType(pfhdr->rx_data);
-               if (type != WIFI_DATA_TYPE) {
+               __le16 fc = *(__le16 *)pfhdr->rx_data;
+
+               if (ieee80211_is_data(fc)) {
                        psta = rtw_get_bcmc_stainfo(padapter);
                        pdefrag_q = &psta->sta_recvpriv.defrag_q;
                } else {
@@ -1723,12 +1673,9 @@ static int recv_func_prehandle(struct adapter *padapter, struct recv_frame *rfra
 
        /* check the frame crtl field and decache */
        ret = validate_recv_frame(padapter, rframe);
-       if (ret != _SUCCESS) {
+       if (ret != _SUCCESS)
                rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
-               goto exit;
-       }
 
-exit:
        return ret;
 }
 
index 2cdcdfd..5bba57d 100644 (file)
@@ -63,7 +63,7 @@ void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
                                arc4_crypt(ctx, payload + length, crc.f1, 4);
 
                                pframe += pxmitpriv->frag_len;
-                               pframe = (u8 *)RND4((size_t)(pframe));
+                               pframe = PTR_ALIGN(pframe, 4);
                        }
                }
        }
@@ -504,7 +504,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
                                        arc4_crypt(ctx, payload + length, crc.f1, 4);
 
                                        pframe += pxmitpriv->frag_len;
-                                       pframe = (u8 *)RND4((size_t)(pframe));
+                                       pframe = PTR_ALIGN(pframe, 4);
                                }
                        }
                } else {
@@ -1133,7 +1133,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
 
                                        aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
                                        pframe += pxmitpriv->frag_len;
-                                       pframe = (u8 *)RND4((size_t)(pframe));
+                                       pframe = PTR_ALIGN(pframe, 4);
                                }
                        }
                } else {
index 91ff82f..357f98e 100644 (file)
@@ -470,9 +470,9 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
        spin_unlock_bh(&pacl_node_q->lock);
 
        if (pacl_list->mode == 1)/* accept unless in deny list */
-               res = (match) ? false : true;
+               res = !match;
        else if (pacl_list->mode == 2)/* deny unless in accept list */
-               res = (match) ? true : false;
+               res = match;
        else
                res = true;
 
index 665b077..392a657 100644 (file)
@@ -276,14 +276,6 @@ void Restore_DM_Func_Flag(struct adapter *padapter)
        SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
 }
 
-void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
-{
-       if (enable)
-               SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
-       else
-               SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
-}
-
 void Set_MSR(struct adapter *padapter, u8 type)
 {
        u8 val8;
@@ -511,6 +503,31 @@ int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
        return true;
 }
 
+static void set_acm_ctrl(struct adapter *adapter, u8 acm_mask)
+{
+       u8 acmctrl = rtw_read8(adapter, REG_ACMHWCTRL);
+
+       if (acm_mask > 1)
+               acmctrl = acmctrl | 0x1;
+
+       if (acm_mask & BIT(3))
+               acmctrl |= ACMHW_VOQEN;
+       else
+               acmctrl &= (~ACMHW_VOQEN);
+
+       if (acm_mask & BIT(2))
+               acmctrl |= ACMHW_VIQEN;
+       else
+               acmctrl &= (~ACMHW_VIQEN);
+
+       if (acm_mask & BIT(1))
+               acmctrl |= ACMHW_BEQEN;
+       else
+               acmctrl &= (~ACMHW_BEQEN);
+
+       rtw_write8(adapter, REG_ACMHWCTRL, acmctrl);
+}
+
 void WMMOnAssocRsp(struct adapter *padapter)
 {
        u8      ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
@@ -522,6 +539,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        struct xmit_priv                *pxmitpriv = &padapter->xmitpriv;
        struct registry_priv    *pregpriv = &padapter->registrypriv;
+       struct hal_data_8188e *haldata = &padapter->haldata;
 
        if (pmlmeinfo->WMM_enable == 0) {
                padapter->mlmepriv.acm_mask = 0;
@@ -550,7 +568,8 @@ void WMMOnAssocRsp(struct adapter *padapter)
 
                switch (ACI) {
                case 0x0:
-                       SetHwReg8188EU(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+                       haldata->AcParam_BE = acParm;
+                       rtw_write32(padapter, REG_EDCA_BE_PARAM, acParm);
                        acm_mask |= (ACM ? BIT(1) : 0);
                        edca[XMIT_BE_QUEUE] = acParm;
                        break;
@@ -572,7 +591,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
        }
 
        if (padapter->registrypriv.acm_method == 1)
-               SetHwReg8188EU(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+               set_acm_ctrl(padapter, acm_mask);
        else
                padapter->mlmepriv.acm_mask = acm_mask;
 
@@ -743,6 +762,35 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
        memcpy(&pmlmeinfo->HT_info, pIE->data, pIE->Length);
 }
 
+static void set_min_ampdu_spacing(struct adapter *adapter, u8 spacing)
+{
+       u8 sec_spacing;
+
+       if (spacing <= 7) {
+               switch (adapter->securitypriv.dot11PrivacyAlgrthm) {
+               case _NO_PRIVACY_:
+               case _AES_:
+                       sec_spacing = 0;
+                       break;
+               case _WEP40_:
+               case _WEP104_:
+               case _TKIP_:
+               case _TKIP_WTMIC_:
+                       sec_spacing = 6;
+                       break;
+               default:
+                       sec_spacing = 7;
+                       break;
+               }
+
+               if (spacing < sec_spacing)
+                       spacing = sec_spacing;
+
+               rtw_write8(adapter, REG_AMPDU_MIN_SPACE,
+                          (rtw_read8(adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | spacing);
+       }
+}
+
 void HTOnAssocRsp(struct adapter *padapter)
 {
        unsigned char           max_AMPDU_len;
@@ -767,7 +815,7 @@ void HTOnAssocRsp(struct adapter *padapter)
 
        min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
 
-       SetHwReg8188EU(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+       set_min_ampdu_spacing(padapter, min_MPDU_spacing);
 
        SetHwReg8188EU(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
 }
@@ -846,7 +894,7 @@ int rtw_check_bcn_info(struct adapter  *Adapter, u8 *pframe, u32 packet_len)
        if (!is_client_associated_to_ap(Adapter))
                return true;
 
-       len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+       len = packet_len - sizeof(struct ieee80211_hdr_3addr);
 
        if (len > MAX_IE_SZ)
                return _FAIL;
@@ -867,7 +915,7 @@ int rtw_check_bcn_info(struct adapter  *Adapter, u8 *pframe, u32 packet_len)
 
        /* below is to copy the information element */
        bssid->IELength = len;
-       memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+       memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
 
        /* check bw and channel offset */
        /* parsing HT_CAP_IE */
@@ -916,7 +964,7 @@ int rtw_check_bcn_info(struct adapter  *Adapter, u8 *pframe, u32 packet_len)
        else
                hidden_ssid = false;
 
-       if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
+       if (p && (!hidden_ssid && (*(p + 1)))) {
                memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
                bssid->Ssid.SsidLength = *(p + 1);
        } else {
@@ -1275,14 +1323,10 @@ void update_IOT_info(struct adapter *padapter)
        case HT_IOT_PEER_RALINK:
                pmlmeinfo->turboMode_cts2self = 0;
                pmlmeinfo->turboMode_rtsen = 1;
-               /* disable high power */
-               Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
                break;
        case HT_IOT_PEER_REALTEK:
                /* rtw_write16(padapter, 0x4cc, 0xffff); */
                /* rtw_write16(padapter, 0x546, 0x01c0); */
-               /* disable high power */
-               Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
                break;
        default:
                pmlmeinfo->turboMode_cts2self = 0;
@@ -1291,26 +1335,36 @@ void update_IOT_info(struct adapter *padapter)
        }
 }
 
+static void set_ack_preamble(struct adapter *adapter, bool short_preamble)
+{
+       struct hal_data_8188e *haldata = &adapter->haldata;
+       u8 val8;
+
+       /*  Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
+       val8 = haldata->nCur40MhzPrimeSC << 5;
+       if (short_preamble)
+               val8 |= 0x80;
+
+       rtw_write8(adapter, REG_RRSR + 2, val8);
+};
+
 void update_capinfo(struct adapter *Adapter, u16 updateCap)
 {
        struct mlme_ext_priv    *pmlmeext = &Adapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
-       bool            ShortPreamble;
 
        /*  Check preamble mode, 2005.01.06, by rcnjko. */
        /*  Mark to update preamble value forever, 2008.03.18 by lanhsin */
 
        if (updateCap & cShortPreamble) { /*  Short Preamble */
                if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /*  PREAMBLE_LONG or PREAMBLE_AUTO */
-                       ShortPreamble = true;
                        pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
-                       SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+                       set_ack_preamble(Adapter, true);
                }
        } else { /*  Long Preamble */
                if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) {  /*  PREAMBLE_SHORT or PREAMBLE_AUTO */
-                       ShortPreamble = false;
                        pmlmeinfo->preamble_mode = PREAMBLE_LONG;
-                       SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+                       set_ack_preamble(Adapter, false);
                }
        }
 
@@ -1338,7 +1392,6 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
 void update_wireless_mode(struct adapter *padapter)
 {
        int ratelen, network_type = 0;
-       u32 SIFS_Timer;
        struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        struct wlan_bssid_ex    *cur_network = &pmlmeinfo->network;
@@ -1365,10 +1418,12 @@ void update_wireless_mode(struct adapter *padapter)
 
        pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
 
-       SIFS_Timer = 0x0a0a0808;/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
-                               /* change this value if having IOT issues. */
-
-       SetHwReg8188EU(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+       /* RESP_SIFS for CCK */
+       rtw_write8(padapter, REG_R2T_SIFS, 0x08);
+       rtw_write8(padapter, REG_R2T_SIFS + 1, 0x08);
+       /* RESP_SIFS for OFDM */
+       rtw_write8(padapter, REG_T2T_SIFS, 0x0a);
+       rtw_write8(padapter, REG_T2T_SIFS + 1, 0x0a);
 
        if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
                update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
@@ -1411,34 +1466,12 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
        return _SUCCESS;
 }
 
-void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
-{
-       struct sta_info *psta;
-       u16 tid;
-       u16 param;
-       struct recv_reorder_ctrl *preorder_ctrl;
-       struct sta_priv *pstapriv = &padapter->stapriv;
-       struct ADDBA_request    *preq = (struct ADDBA_request *)paddba_req;
-       struct mlme_ext_priv    *pmlmeext = &padapter->mlmeextpriv;
-       struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
-
-       psta = rtw_get_stainfo(pstapriv, addr);
-
-       if (psta) {
-               param = le16_to_cpu(preq->BA_para_set);
-               tid = (param >> 2) & 0x0f;
-               preorder_ctrl = &psta->recvreorder_ctrl[tid];
-               preorder_ctrl->indicate_seq = 0xffff;
-               preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false;
-       }
-}
-
 void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
 {
        u8 *pIE;
        __le32 *pbuf;
 
-       pIE = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+       pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
        pbuf = (__le32 *)pIE;
 
        pmlmeext->TSFValue = le32_to_cpu(*(pbuf + 1));
index c2a550e..3d8e9de 100644 (file)
@@ -52,8 +52,8 @@ s32   _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
        sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
 
        /*
-       Please insert all the queue initializaiton using rtw_init_queue below
-       */
+        * Please insert all the queue initializaiton using rtw_init_queue below
+        */
 
        pxmitpriv->adapter = padapter;
 
@@ -66,10 +66,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
        rtw_init_queue(&pxmitpriv->free_xmit_queue);
 
        /*
-       Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
-       and initialize free_xmit_frame below.
-       Please also apply  free_txobj to link_up all the xmit_frames...
-       */
+        * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+        * and initialize free_xmit_frame below.
+        * Please also apply  free_txobj to link_up all the xmit_frames...
+        */
 
        pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
 
@@ -178,7 +178,12 @@ s32        _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 
        pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
 
-       rtw_alloc_hwxmits(padapter);
+       res = rtw_alloc_hwxmits(padapter);
+       if (res) {
+               res = _FAIL;
+               goto exit;
+       }
+
        rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
        for (i = 0; i < 4; i++)
@@ -399,7 +404,7 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
 
        pattrib->priority = user_prio;
        pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
-       pattrib->subtype = WIFI_QOS_DATA_TYPE;
+       pattrib->subtype = IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA;
 }
 
 static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
@@ -448,14 +453,12 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
                _rtw_pktfile_read(&pktfile, &tmp[0], 24);
                pattrib->dhcp_pkt = 0;
                if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
-                       if (ETH_P_IP == pattrib->ether_type) {/*  IP header */
-                               if (((tmp[21] == 68) && (tmp[23] == 67)) ||
-                                   ((tmp[21] == 67) && (tmp[23] == 68))) {
-                                       /*  68 : UDP BOOTP client */
-                                       /*  67 : UDP BOOTP server */
-                                       /*  Use low rate to send DHCP packet. */
-                                       pattrib->dhcp_pkt = 1;
-                               }
+                       if (((tmp[21] == 68) && (tmp[23] == 67)) ||
+                           ((tmp[21] == 67) && (tmp[23] == 68))) {
+                               /*  68 : UDP BOOTP client */
+                               /*  67 : UDP BOOTP server */
+                               /*  Use low rate to send DHCP packet. */
+                               pattrib->dhcp_pkt = 1;
                        }
                }
        }
@@ -497,7 +500,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
        pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
 
        pattrib->hdrlen = WLAN_HDR_A3_LEN;
-       pattrib->subtype = WIFI_DATA_TYPE;
+       pattrib->subtype = IEEE80211_FTYPE_DATA;
        pattrib->priority = 0;
 
        if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
@@ -642,7 +645,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
                        payload = pframe;
 
                        for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
-                               payload = (u8 *)RND4((size_t)(payload));
+                               payload = PTR_ALIGN(payload, 4);
 
                                payload = payload + pattrib->hdrlen + pattrib->iv_len;
                                if ((curfragnum + 1) == pattrib->nr_frags) {
@@ -696,13 +699,13 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
 {
        u16 *qc;
 
-       struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
+       struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
        struct qos_priv *pqospriv = &pmlmepriv->qospriv;
        u8 qos_option = false;
 
        int res = _SUCCESS;
-       __le16 *fctrl = &pwlanhdr->frame_ctl;
+       __le16 *fctrl = &pwlanhdr->frame_control;
 
        struct sta_info *psta;
 
@@ -717,7 +720,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
 
        SetFrameSubType(fctrl, pattrib->subtype);
 
-       if (pattrib->subtype & WIFI_DATA_TYPE) {
+       if (pattrib->subtype & IEEE80211_FTYPE_DATA) {
                if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
                        /* to_ds = 1, fr_ds = 0; */
                        /* Data transfer to AP */
@@ -853,22 +856,19 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pat
 }
 
 /*
-
-This sub-routine will perform all the following:
-
-1. remove 802.3 header.
-2. create wlan_header, based on the info in pxmitframe
-3. append sta's iv/ext-iv
-4. append LLC
-5. move frag chunk from pframe to pxmitframe->mem
-6. apply sw-encrypt, if necessary.
-
-*/
+ * This sub-routine will perform all the following:
+ *
+ * 1. remove 802.3 header.
+ * 2. create wlan_header, based on the info in pxmitframe
+ * 3. append sta's iv/ext-iv
+ * 4. append LLC
+ * 5. move frag chunk from pframe to pxmitframe->mem
+ * 6. apply sw-encrypt, if necessary.
+ */
 s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
 {
        struct pkt_file pktfile;
        s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
-       size_t addr;
        u8 *pframe, *mem_start;
        u8 hw_hdr_offset;
        struct sta_info         *psta;
@@ -985,9 +985,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
                        break;
                }
 
-               addr = (size_t)(pframe);
-
-               mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
+               mem_start = PTR_ALIGN(pframe, 4) + hw_hdr_offset;
                memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
        }
 
@@ -1210,24 +1208,22 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
 }
 
 /*
-Calling context:
-1. OS_TXENTRY
-2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
-
-If we turn on USE_RXTHREAD, then, no need for critical section.
-Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
-
-Must be very very cautious...
-
-*/
-
+ * Calling context:
+ * 1. OS_TXENTRY
+ * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+ *
+ * If we turn on USE_RXTHREAD, then, no need for critical section.
+ * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+ *
+ * Must be very very cautious...
+ */
 struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
 {
        /*
-               Please remember to use all the osdep_service api,
-               and lock/unlock or _enter/_exit critical to protect
-               pfree_xmit_queue
-       */
+        * Please remember to use all the osdep_service api,
+        * and lock/unlock or _enter/_exit critical to protect
+        * pfree_xmit_queue
+        */
 
        struct xmit_frame *pxframe = NULL;
        struct list_head *plist, *phead;
@@ -1474,7 +1470,7 @@ exit:
        return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+int rtw_alloc_hwxmits(struct adapter *padapter)
 {
        struct hw_xmit *hwxmits;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1482,6 +1478,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
        pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
 
        pxmitpriv->hwxmits = kzalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry, GFP_KERNEL);
+       if (!pxmitpriv->hwxmits)
+               return -ENOMEM;
 
        hwxmits = pxmitpriv->hwxmits;
 
@@ -1498,6 +1496,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
                hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
        } else {
        }
+
+       return 0;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
index e7f834b..7901d0a 100644 (file)
@@ -170,7 +170,7 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
 {
        u32     hex         = 0;
        u32     i           = 0;
-       u32     arraylen    = sizeof(array_agc_tab_1t_8188e) / sizeof(u32);
+       u32     arraylen    = ARRAY_SIZE(array_agc_tab_1t_8188e);
        u32    *array       = array_agc_tab_1t_8188e;
        bool            biol = false;
        struct adapter *adapter =  dm_odm->Adapter;
@@ -446,7 +446,7 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
 {
        u32     hex         = 0;
        u32     i           = 0;
-       u32     arraylen    = sizeof(array_phy_reg_1t_8188e) / sizeof(u32);
+       u32     arraylen    = ARRAY_SIZE(array_phy_reg_1t_8188e);
        u32    *array       = array_phy_reg_1t_8188e;
        bool    biol = false;
        struct adapter *adapter =  dm_odm->Adapter;
@@ -651,7 +651,7 @@ void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
 {
        u32  hex;
        u32  i           = 0;
-       u32  arraylen    = sizeof(array_phy_reg_pg_8188e) / sizeof(u32);
+       u32  arraylen    = ARRAY_SIZE(array_phy_reg_pg_8188e);
        u32 *array       = array_phy_reg_pg_8188e;
 
        hex = ODM_ITRF_USB << 8;
index 20ce157..77b2588 100644 (file)
@@ -132,7 +132,7 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
 
        u32     hex         = 0;
        u32     i;
-       u32     array_len    = sizeof(array_MAC_REG_8188E) / sizeof(u32);
+       u32     array_len    = ARRAY_SIZE(array_MAC_REG_8188E);
        u32    *array       = array_MAC_REG_8188E;
        bool    biol = false;
 
index 9dc888a..08cbfce 100644 (file)
@@ -138,7 +138,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
 
        u32     hex         = 0;
        u32     i           = 0;
-       u32     ArrayLen    = sizeof(Array_RadioA_1T_8188E) / sizeof(u32);
+       u32     ArrayLen    = ARRAY_SIZE(Array_RadioA_1T_8188E);
        u32    *Array       = Array_RadioA_1T_8188E;
        bool            biol = false;
        struct adapter *Adapter =  pDM_Odm->Adapter;
index 5b91aec..150ea38 100644 (file)
@@ -1,30 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2007 - 2011 Realtek Corporation. */
 
-/*++
-
-Module Name:
-       HalPwrSeqCmd.c
-
-Abstract:
-       Implement HW Power sequence configuration CMD handling routine for Realtek devices.
-
-Major Change History:
-       When       Who               What
-       ---------- ---------------   -------------------------------
-       2011-10-26 Lucas            Modify to be compatible with SD4-CE driver.
-       2011-07-07 Roger            Create.
-
---*/
-
 #include "../include/HalPwrSeqCmd.h"
 
-/*     Description: */
-/*             This routine deals with the Power Configuration CMDs parsing
- *             for RTL8723/RTL8188E Series IC.
- *     Assumption:
- *             We should follow specific format which was released from HW SD.
- */
 u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
 {
        struct wl_pwr_cfg pwrcfgcmd = {0};
index 06f2a90..910cc07 100644 (file)
@@ -44,7 +44,7 @@ void dump_chip_info(struct HAL_VERSION        chip_vers)
 
        cnt += sprintf((buf + cnt), "1T1R_");
 
-       cnt += sprintf((buf + cnt), "RomVer(%d)\n", chip_vers.ROMVer);
+       cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0);
 
        pr_info("%s", buf);
 }
@@ -267,7 +267,7 @@ static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
 bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
 {
        struct registry_priv *pregistrypriv = &adapter->registrypriv;
-       bool  wifi_cfg = (pregistrypriv->wifi_spec) ? true : false;
+       bool wifi_cfg = pregistrypriv->wifi_spec;
        bool result = true;
 
        switch (numoutpipe) {
index 87e9a52..54cc3d7 100644 (file)
@@ -65,13 +65,13 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
 
        struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
 
-       isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+       isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
 
        if (isCCKrate) {
                u8 cck_agc_rpt;
 
                /*  (1)Hardware does not provide RSSI for CCK */
-               /*  (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+               /*  (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
 
                cck_highpwr = dm_odm->bCckHighPower;
 
@@ -170,7 +170,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
                        /* Get Rx snr value in DB */
                        dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
                }
-               /*  (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+               /*  (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
                rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
 
                PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
@@ -234,7 +234,7 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
        if ((!pPktinfo->bPacketMatchBSSID))
                return;
 
-       isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+       isCCKrate = pPktinfo->Rate >= DESC92C_RATE1M && pPktinfo->Rate <= DESC92C_RATE11M;
 
        /* Smart Antenna Debug Message------------------  */
        if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
index f1464e4..475650d 100644 (file)
@@ -199,16 +199,16 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
 
 static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
 {
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        u32 rate_len, pktlen;
        struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        struct wlan_bssid_ex            *cur_network = &pmlmeinfo->network;
 
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
 
        eth_broadcast_addr(pwlanhdr->addr1);
@@ -218,8 +218,8 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
        SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
        SetFrameSubType(pframe, WIFI_BEACON);
 
-       pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
-       pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pframe += sizeof(struct ieee80211_hdr_3addr);
+       pktlen = sizeof(struct ieee80211_hdr_3addr);
 
        /* timestamp will be inserted by hardware */
        pframe += 8;
@@ -281,15 +281,15 @@ _ConstructBeacon:
 
 static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
 {
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        __le16 *fctrl;
 
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
        /*  Frame control. */
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
        SetPwrMgt(fctrl);
        SetFrameSubType(pframe, WIFI_PSPOLL);
@@ -314,7 +314,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
        u8 bEosp,
        u8 bForcePowerSave)
 {
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        u32 pktlen;
        struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
@@ -322,9 +322,9 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
        struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
 
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
        if (bForcePowerSave)
                SetPwrMgt(fctrl);
@@ -353,19 +353,19 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
        SetSeqNum(pwlanhdr, 0);
 
        if (bQoS) {
-               struct rtw_ieee80211_hdr_3addr_qos *pwlanqoshdr;
+               struct ieee80211_qos_hdr *pwlanqoshdr;
 
                SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
 
-               pwlanqoshdr = (struct rtw_ieee80211_hdr_3addr_qos *)pframe;
-               SetPriority(&pwlanqoshdr->qc, AC);
-               SetEOSP(&pwlanqoshdr->qc, bEosp);
+               pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
+               SetPriority(&pwlanqoshdr->qos_ctrl, AC);
+               SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
 
-               pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+               pktlen = sizeof(struct ieee80211_qos_hdr);
        } else {
                SetFrameSubType(pframe, WIFI_DATA_NULL);
 
-               pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+               pktlen = sizeof(struct ieee80211_qos_hdr);
        }
 
        *pLength = pktlen;
@@ -373,7 +373,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
 
 static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
 {
-       struct rtw_ieee80211_hdr        *pwlanhdr;
+       struct ieee80211_hdr *pwlanhdr;
        __le16 *fctrl;
        u8 *mac, *bssid;
        u32 pktlen;
@@ -381,12 +381,12 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
        struct mlme_ext_info    *pmlmeinfo = &pmlmeext->mlmext_info;
        struct wlan_bssid_ex    *cur_network = &pmlmeinfo->network;
 
-       pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+       pwlanhdr = (struct ieee80211_hdr *)pframe;
 
        mac = myid(&adapt->eeprompriv);
        bssid = cur_network->MacAddress;
 
-       fctrl = &pwlanhdr->frame_ctl;
+       fctrl = &pwlanhdr->frame_control;
        *(fctrl) = 0;
        memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
        memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -395,7 +395,7 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
        SetSeqNum(pwlanhdr, 0);
        SetFrameSubType(fctrl, WIFI_PROBERSP);
 
-       pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+       pktlen = sizeof(struct ieee80211_hdr_3addr);
        pframe += pktlen;
 
        if (cur_network->IELength > MAX_IE_SZ)
@@ -557,8 +557,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
                rtw_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl & (~BIT(6))));
                haldata->RegFwHwTxQCtrl &= (~BIT(6));
 
-               /*  Clear beacon valid check bit. */
-               SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
+               clear_beacon_valid_bit(adapt);
                DLBcnCount = 0;
                poll = 0;
                do {
@@ -569,7 +568,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
                                yield();
                                /* mdelay(10); */
                                /*  check rsvd page download OK. */
-                               GetHwReg8188EU(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
+                               bcn_valid = get_beacon_valid_bit(adapt);
                                poll++;
                        } while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
                } while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
@@ -597,7 +596,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
 
                /*  Update RSVD page location H2C to Fw. */
                if (bcn_valid)
-                       SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
+                       clear_beacon_valid_bit(adapt);
 
                /*  Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
                /*  Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
index 6811be9..e17375a 100644 (file)
@@ -33,17 +33,16 @@ static s32 iol_execute(struct adapter *padapter, u8 control)
 {
        s32 status = _FAIL;
        u8 reg_0x88 = 0;
-       u32 start = 0, passing_time = 0;
+       unsigned long timeout;
 
        control = control & 0x0f;
        reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
        rtw_write8(padapter, REG_HMEBOX_E0,  reg_0x88 | control);
 
-       start = jiffies;
+       timeout = jiffies + msecs_to_jiffies(1000);
        while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
-              (passing_time = rtw_get_passing_time_ms(start)) < 1000) {
+               time_before(jiffies, timeout))
                ;
-       }
 
        reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
        status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
@@ -187,8 +186,8 @@ static void efuse_read_phymap_from_txpktbuf(
        u16 *size       /* for efuse content: the max byte to read. will update to byte read */
        )
 {
+       unsigned long timeout;
        u16 dbg_addr = 0;
-       u32 start  = 0, passing_time = 0;
        __le32 lo32 = 0, hi32 = 0;
        u16 len = 0, count = 0;
        int i = 0;
@@ -207,9 +206,8 @@ static void efuse_read_phymap_from_txpktbuf(
                rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr + i);
 
                rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
-               start = jiffies;
-               while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) &&
-                      (passing_time = rtw_get_passing_time_ms(start)) < 1000)
+               timeout = jiffies + msecs_to_jiffies(1000);
+               while (!rtw_read8(adapter, REG_TXPKTBUF_DBG) && time_before(jiffies, timeout))
                        rtw_usleep_os(100);
 
                /* data from EEPROM needs to be in LE */
@@ -505,7 +503,6 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
 
        ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
        ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /*  IC version (CUT) */
-       ChipVersion.ROMVer = 0; /*  ROM code version. */
 
        dump_chip_info(ChipVersion);
 
index ea75ff1..4864daf 100644 (file)
@@ -378,10 +378,10 @@ phy_InitBBRFRegisterDefinition(
        /*  Tx AGC Gain Stage (same for all path. Should we remove this?) */
        pHalData->PHYRegDef.rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
 
-       /*  Tranceiver A~D HSSI Parameter-1 */
+       /*  Transceiver A~D HSSI Parameter-1 */
        pHalData->PHYRegDef.rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;  /* wire control parameter1 */
 
-       /*  Tranceiver A~D HSSI Parameter-2 */
+       /*  Transceiver A~D HSSI Parameter-2 */
        pHalData->PHYRegDef.rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;  /* wire control parameter2 */
 
        /*  RF switch Control */
@@ -405,10 +405,10 @@ phy_InitBBRFRegisterDefinition(
        /*  Tx AFE control 2 */
        pHalData->PHYRegDef.rfTxAFE = rOFDM0_XATxAFE;
 
-       /*  Tranceiver LSSI Readback SI mode */
+       /*  Transceiver LSSI Readback SI mode */
        pHalData->PHYRegDef.rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
 
-       /*  Tranceiver LSSI Readback PI mode */
+       /*  Transceiver LSSI Readback PI mode */
        pHalData->PHYRegDef.rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
 }
 
index 9bf7a92..dff0cba 100644 (file)
@@ -113,12 +113,13 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
        struct hal_data_8188e *pHalData = &padapter->haldata;
        struct phy_info *pPHYInfo  = &pattrib->phy_info;
        u8 *wlanhdr = precvframe->rx_data;
+       __le16 fc = *(__le16 *)wlanhdr;
        struct odm_per_pkt_info pkt_info;
        u8 *sa = NULL;
        struct sta_priv *pstapriv;
        struct sta_info *psta;
 
-       pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
+       pkt_info.bPacketMatchBSSID = ((!ieee80211_is_ctl(fc)) &&
                !pattrib->icv_err && !pattrib->crc_err &&
                !memcmp(get_hdr_bssid(wlanhdr),
                 get_bssid(&padapter->mlmepriv), ETH_ALEN));
@@ -127,9 +128,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
                                 (!memcmp(get_da(wlanhdr),
                                  myid(&padapter->eeprompriv), ETH_ALEN));
 
-       pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
-                                (GetFrameSubType(wlanhdr) == WIFI_BEACON);
-
+       pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID && ieee80211_is_beacon(fc);
        if (pkt_info.bPacketBeacon) {
                if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
                        sa = padapter->mlmepriv.cur_network.network.MacAddress;
index 55032d7..bdfa519 100644 (file)
@@ -347,7 +347,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
 
                mem_addr += w_sz;
 
-               mem_addr = (u8 *)RND4(((size_t)(mem_addr)));
+               mem_addr = PTR_ALIGN(mem_addr, 4);
        }
 
        rtw_free_xmitframe(pxmitpriv, pxmitframe);
@@ -437,7 +437,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
        pfirstframe = pxmitframe;
        len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
        pbuf_tail = len;
-       pbuf = _RND8(pbuf_tail);
+       pbuf = round_up(pbuf_tail, 8);
 
        /*  check pkt amount in one bulk */
        desc_cnt = 0;
@@ -488,7 +488,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
 
                len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
 
-               if (_RND8(pbuf + len) > MAX_XMITBUF_SZ) {
+               if (pbuf + len > MAX_XMITBUF_SZ) {
                        pxmitframe->agg_num = 1;
                        pxmitframe->pkt_offset = 1;
                        break;
@@ -511,7 +511,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
 
                /*  handle pointer and stop condition */
                pbuf_tail = pbuf + len;
-               pbuf = _RND8(pbuf_tail);
+               pbuf = round_up(pbuf_tail, 8);
 
                pfirstframe->agg_num++;
                if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
index a927743..a217272 100644 (file)
@@ -123,7 +123,7 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
                if (haldata->OutEpQueueSel & TX_SELE_LQ)
                        numLQ = 0x1C;
 
-               /*  NOTE: This step shall be proceed before writting REG_RQPN. */
+               /*  NOTE: This step shall be proceed before writing REG_RQPN. */
                if (haldata->OutEpQueueSel & TX_SELE_NQ)
                        numNQ = 0x1C;
                value8 = (u8)_NPQ(numNQ);
@@ -539,10 +539,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
        /*  Save target channel */
        haldata->CurrentChannel = 6;/* default set to 6 */
 
-       if (pwrctrlpriv->reg_rfoff) {
-               pwrctrlpriv->rf_pwrstate = rf_off;
-       }
-
        /*  2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
        /*  HW GPIO pin. Before PHY_RFConfig8192C. */
        /*  2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
@@ -942,17 +938,6 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 *val)
        }
 }
 
-static void hw_var_set_bssid(struct adapter *Adapter, u8 *val)
-{
-       u8 idx = 0;
-       u32 reg_bssid;
-
-       reg_bssid = REG_BSSID;
-
-       for (idx = 0; idx < 6; idx++)
-               rtw_write8(Adapter, (reg_bssid + idx), val[idx]);
-}
-
 void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
 {
        struct hal_data_8188e *haldata = &Adapter->haldata;
@@ -963,9 +948,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
        case HW_VAR_SET_OPMODE:
                hw_var_set_opmode(Adapter, val);
                break;
-       case HW_VAR_BSSID:
-               hw_var_set_bssid(Adapter, val);
-               break;
        case HW_VAR_BASIC_RATE:
                {
                        u16 BrateCfg = 0;
@@ -1024,17 +1006,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
                                ResumeTxBeacon(Adapter);
                }
                break;
-       case HW_VAR_MLME_DISCONNECT:
-               /* Set RCR to not to receive data frame when NO LINK state */
-               /* reject all data frames */
-               rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
-
-               /* reset TSF */
-               rtw_write8(Adapter, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
-
-               /* disable update TSF */
-               rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) | BIT(4));
-               break;
        case HW_VAR_MLME_SITESURVEY:
                if (*((u8 *)val)) { /* under sitesurvey */
                        /* config RCR to receive different BSSID & not to receive data frame */
@@ -1065,36 +1036,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
                        rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_BCN);
                }
                break;
-       case HW_VAR_MLME_JOIN:
-               {
-                       u8 RetryLimit = 0x30;
-                       u8 type = *((u8 *)val);
-                       struct mlme_priv        *pmlmepriv = &Adapter->mlmepriv;
-
-                       if (type == 0) { /*  prepare to join */
-                               /* enable to rx data frame.Accept all data frame */
-                               rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
-
-                               rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-
-                               if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
-                                       RetryLimit = 48;
-                               else /*  Ad-hoc Mode */
-                                       RetryLimit = 0x7;
-                       } else if (type == 1) {
-                               /* joinbss_event call back when join res < 0 */
-                               rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
-                       } else if (type == 2) {
-                               /* sta add event call back */
-                               /* enable update TSF */
-                               rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
-
-                               if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
-                                       RetryLimit = 0x7;
-                       }
-                       rtw_write16(Adapter, REG_RL, RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit << RETRY_LIMIT_LONG_SHIFT);
-               }
-               break;
        case HW_VAR_SLOT_TIME:
                {
                        u8 u1bAIFS, aSifsTime;
@@ -1119,26 +1060,6 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
                        }
                }
                break;
-       case HW_VAR_RESP_SIFS:
-               /* RESP_SIFS for CCK */
-               rtw_write8(Adapter, REG_R2T_SIFS, val[0]); /*  SIFS_T2T_CCK (0x08) */
-               rtw_write8(Adapter, REG_R2T_SIFS + 1, val[1]); /* SIFS_R2T_CCK(0x08) */
-               /* RESP_SIFS for OFDM */
-               rtw_write8(Adapter, REG_T2T_SIFS, val[2]); /* SIFS_T2T_OFDM (0x0a) */
-               rtw_write8(Adapter, REG_T2T_SIFS + 1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
-               break;
-       case HW_VAR_ACK_PREAMBLE:
-               {
-                       u8 regTmp;
-                       u8 bShortPreamble = *((bool *)val);
-                       /*  Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
-                       regTmp = (haldata->nCur40MhzPrimeSC) << 5;
-                       if (bShortPreamble)
-                               regTmp |= 0x80;
-
-                       rtw_write8(Adapter, REG_RRSR + 2, regTmp);
-               }
-               break;
        case HW_VAR_DM_FLAG:
                podmpriv->SupportAbility = *((u8 *)val);
                break;
@@ -1148,73 +1069,11 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
                else
                        podmpriv->SupportAbility = podmpriv->BK_SupportAbility;
                break;
-       case HW_VAR_DM_FUNC_SET:
-               if (*((u32 *)val) == DYNAMIC_ALL_FUNC_ENABLE) {
-                       podmpriv->SupportAbility =      pdmpriv->InitODMFlag;
-               } else {
-                       podmpriv->SupportAbility |= *((u32 *)val);
-               }
+       case HW_VAR_DM_FUNC_RESET:
+               podmpriv->SupportAbility = pdmpriv->InitODMFlag;
                break;
        case HW_VAR_DM_FUNC_CLR:
-               podmpriv->SupportAbility &= *((u32 *)val);
-               break;
-       case HW_VAR_AC_PARAM_BE:
-               haldata->AcParam_BE = ((u32 *)(val))[0];
-               rtw_write32(Adapter, REG_EDCA_BE_PARAM, ((u32 *)(val))[0]);
-               break;
-       case HW_VAR_ACM_CTRL:
-               {
-                       u8 acm_ctrl = *((u8 *)val);
-                       u8 AcmCtrl = rtw_read8(Adapter, REG_ACMHWCTRL);
-
-                       if (acm_ctrl > 1)
-                               AcmCtrl = AcmCtrl | 0x1;
-
-                       if (acm_ctrl & BIT(3))
-                               AcmCtrl |= AcmHw_VoqEn;
-                       else
-                               AcmCtrl &= (~AcmHw_VoqEn);
-
-                       if (acm_ctrl & BIT(2))
-                               AcmCtrl |= AcmHw_ViqEn;
-                       else
-                               AcmCtrl &= (~AcmHw_ViqEn);
-
-                       if (acm_ctrl & BIT(1))
-                               AcmCtrl |= AcmHw_BeqEn;
-                       else
-                               AcmCtrl &= (~AcmHw_BeqEn);
-
-                       rtw_write8(Adapter, REG_ACMHWCTRL, AcmCtrl);
-               }
-               break;
-       case HW_VAR_AMPDU_MIN_SPACE:
-               {
-                       u8 MinSpacingToSet;
-                       u8 SecMinSpace;
-
-                       MinSpacingToSet = *((u8 *)val);
-                       if (MinSpacingToSet <= 7) {
-                               switch (Adapter->securitypriv.dot11PrivacyAlgrthm) {
-                               case _NO_PRIVACY_:
-                               case _AES_:
-                                       SecMinSpace = 0;
-                                       break;
-                               case _WEP40_:
-                               case _WEP104_:
-                               case _TKIP_:
-                               case _TKIP_WTMIC_:
-                                       SecMinSpace = 6;
-                                       break;
-                               default:
-                                       SecMinSpace = 7;
-                                       break;
-                               }
-                               if (MinSpacingToSet < SecMinSpace)
-                                       MinSpacingToSet = SecMinSpace;
-                               rtw_write8(Adapter, REG_AMPDU_MIN_SPACE, (rtw_read8(Adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | MinSpacingToSet);
-                       }
-               }
+               podmpriv->SupportAbility = 0;
                break;
        case HW_VAR_AMPDU_FACTOR:
                {
@@ -1242,221 +1101,15 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
                        }
                }
                break;
-       case HW_VAR_RXDMA_AGG_PG_TH:
-               {
-                       u8 threshold = *((u8 *)val);
-                       if (threshold == 0)
-                               threshold = USB_RXAGG_PAGE_COUNT;
-                       rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, threshold);
-               }
-               break;
-       case HW_VAR_H2C_FW_PWRMODE:
-               {
-                       u8 psmode = (*(u8 *)val);
-
-                       /*  Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
-                       /*  saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
-                       if (psmode != PS_MODE_ACTIVE)
-                               ODM_RF_Saving(podmpriv, true);
-                       rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
-               }
-               break;
-       case HW_VAR_H2C_FW_JOINBSSRPT:
-               {
-                       u8 mstatus = (*(u8 *)val);
-                       rtl8188e_set_FwJoinBssReport_cmd(Adapter, mstatus);
-               }
-               break;
-       case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
-               {
-                       u8 p2p_ps_state = (*(u8 *)val);
-                       rtl8188e_set_p2p_ps_offload_cmd(Adapter, p2p_ps_state);
-               }
-               break;
-       case HW_VAR_INITIAL_GAIN:
-               {
-                       struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
-                       u32 rx_gain = ((u32 *)(val))[0];
-
-                       if (rx_gain == 0xff) {/* restore rx gain */
-                               ODM_Write_DIG(podmpriv, pDigTable->BackupIGValue);
-                       } else {
-                               pDigTable->BackupIGValue = pDigTable->CurIGValue;
-                               ODM_Write_DIG(podmpriv, rx_gain);
-                       }
-               }
-               break;
-       case HW_VAR_RPT_TIMER_SETTING:
-               {
-                       u16 min_rpt_time = (*(u16 *)val);
-                       ODM_RA_Set_TxRPT_Time(podmpriv, min_rpt_time);
-               }
-               break;
-       case HW_VAR_ANTENNA_DIVERSITY_SELECT:
-               {
-                       u8 Optimum_antenna = (*(u8 *)val);
-                       u8 Ant;
-                       /* switch antenna to Optimum_antenna */
-                       if (haldata->CurAntenna !=  Optimum_antenna) {
-                               Ant = (Optimum_antenna == 2) ? MAIN_ANT : AUX_ANT;
-                               ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, Ant);
-
-                               haldata->CurAntenna = Optimum_antenna;
-                       }
-               }
-               break;
-       case HW_VAR_FIFO_CLEARN_UP:
-               {
-                       struct pwrctrl_priv *pwrpriv = &Adapter->pwrctrlpriv;
-                       u8 trycnt = 100;
-
-                       /* pause tx */
-                       rtw_write8(Adapter, REG_TXPAUSE, 0xff);
-
-                       /* keep sn */
-                       Adapter->xmitpriv.nqos_ssn = rtw_read16(Adapter, REG_NQOS_SEQ);
-
-                       if (!pwrpriv->bkeepfwalive) {
-                               /* RX DMA stop */
-                               rtw_write32(Adapter, REG_RXPKT_NUM, (rtw_read32(Adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
-                               do {
-                                       if (!(rtw_read32(Adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
-                                               break;
-                               } while (trycnt--);
-
-                               /* RQPN Load 0 */
-                               rtw_write16(Adapter, REG_RQPN_NPQ, 0x0);
-                               rtw_write32(Adapter, REG_RQPN, 0x80000000);
-                               mdelay(10);
-                       }
-               }
-               break;
-       case HW_VAR_TX_RPT_MAX_MACID:
-               {
-                       u8 maxMacid = *val;
-                       rtw_write8(Adapter, REG_TX_RPT_CTRL + 1, maxMacid + 1);
-               }
-               break;
        case HW_VAR_H2C_MEDIA_STATUS_RPT:
                rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
                break;
-       case HW_VAR_BCN_VALID:
-               /* BCN_VALID, BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2, write 1 to clear, Clear by sw */
-               rtw_write8(Adapter, REG_TDECTRL + 2, rtw_read8(Adapter, REG_TDECTRL + 2) | BIT(0));
-               break;
-       default:
-               break;
-       }
-
-}
-
-void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
-{
-       struct hal_data_8188e *haldata = &Adapter->haldata;
-       struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
-       switch (variable) {
-       case HW_VAR_BCN_VALID:
-               /* BCN_VALID, BIT(16) of REG_TDECTRL = BIT(0) of REG_TDECTRL+2 */
-               val[0] = (BIT(0) & rtw_read8(Adapter, REG_TDECTRL + 2)) ? true : false;
-               break;
-       case HW_VAR_DM_FLAG:
-               val[0] = podmpriv->SupportAbility;
-               break;
-       case HW_VAR_FWLPS_RF_ON:
-               {
-                       /* When we halt NIC, we should check if FW LPS is leave. */
-                       if (Adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
-                               /*  If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
-                               /*  because Fw is unload. */
-                               val[0] = true;
-                       } else {
-                               u32 valRCR;
-                               valRCR = rtw_read32(Adapter, REG_RCR);
-                               valRCR &= 0x00070000;
-                               if (valRCR)
-                                       val[0] = false;
-                               else
-                                       val[0] = true;
-                       }
-               }
-               break;
-       case HW_VAR_CHK_HI_QUEUE_EMPTY:
-               *val = ((rtw_read32(Adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0) ? true : false;
-               break;
        default:
                break;
        }
 
 }
 
-/* Query setting of specified variable. */
-void GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
-       struct hal_data_8188e *haldata = &Adapter->haldata;
-
-       switch (eVariable) {
-       case HAL_DEF_IS_SUPPORT_ANT_DIV:
-               *((u8 *)pValue) = (haldata->AntDivCfg == 0) ? false : true;
-               break;
-       case HAL_DEF_CURRENT_ANTENNA:
-               *((u8 *)pValue) = haldata->CurAntenna;
-               break;
-       case HAL_DEF_DBG_DM_FUNC:
-               *((u32 *)pValue) = haldata->odmpriv.SupportAbility;
-               break;
-       case HAL_DEF_DBG_DUMP_RXPKT:
-               *((u8 *)pValue) = haldata->bDumpRxPkt;
-               break;
-       case HAL_DEF_DBG_DUMP_TXPKT:
-               *((u8 *)pValue) = haldata->bDumpTxPkt;
-               break;
-       default:
-               break;
-       }
-}
-
-/* Change default setting of specified variable. */
-void SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
-       struct hal_data_8188e *haldata = &Adapter->haldata;
-
-       switch (eVariable) {
-       case HAL_DEF_DBG_DM_FUNC:
-               {
-                       u8 dm_func = *((u8 *)pValue);
-                       struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
-                       if (dm_func == 0) { /* disable all dynamic func */
-                               podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
-                       } else if (dm_func == 1) {/* disable DIG */
-                               podmpriv->SupportAbility  &= (~DYNAMIC_BB_DIG);
-                       } else if (dm_func == 2) {/* disable High power */
-                               podmpriv->SupportAbility  &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
-                       } else if (dm_func == 3) {/* disable tx power tracking */
-                               podmpriv->SupportAbility  &= (~DYNAMIC_RF_CALIBRATION);
-                       } else if (dm_func == 5) {/* disable antenna diversity */
-                               podmpriv->SupportAbility  &= (~DYNAMIC_BB_ANT_DIV);
-                       } else if (dm_func == 6) {/* turn on all dynamic func */
-                               if (!(podmpriv->SupportAbility  & DYNAMIC_BB_DIG)) {
-                                       struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
-                                       pDigTable->CurIGValue = rtw_read8(Adapter, 0xc50);
-                               }
-                               podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
-                       }
-               }
-               break;
-       case HAL_DEF_DBG_DUMP_RXPKT:
-               haldata->bDumpRxPkt = *((u8 *)pValue);
-               break;
-       case HAL_DEF_DBG_DUMP_TXPKT:
-               haldata->bDumpTxPkt = *((u8 *)pValue);
-               break;
-       default:
-               break;
-       }
-}
-
 void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
 {
        u8 init_rate = 0;
index 673c30e..d5e6745 100644 (file)
@@ -16,7 +16,7 @@ static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
        int status;
        u8 io_buf[4];
 
-       if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+       if (adapt->bSurpriseRemoved)
                return -EPERM;
 
        status = usb_control_msg_recv(udev, 0, REALTEK_USB_VENQT_CMD_REQ,
@@ -59,7 +59,7 @@ static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size)
        int status;
        u8 io_buf[VENDOR_CMD_MAX_DATA_LEN];
 
-       if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+       if (adapt->bSurpriseRemoved)
                return -EPERM;
 
        memcpy(io_buf, data, size);
@@ -260,7 +260,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
 
                pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
                if (pkt_copy) {
-                       pkt_copy->dev = adapt->pnetdev;
                        precvframe->pkt = pkt_copy;
                        precvframe->rx_head = pkt_copy->data;
                        precvframe->rx_end = pkt_copy->data + alloc_sz;
@@ -288,7 +287,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
 
                recvframe_put(precvframe, skb_len);
 
-               pkt_offset = (u16)_RND128(pkt_offset);
+               pkt_offset = (u16)round_up(pkt_offset, 128);
 
                if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
                        if (pattrib->physt)
@@ -415,8 +414,7 @@ u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
        size_t alignment = 0;
        u32 ret = _SUCCESS;
 
-       if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
-           adapter->pwrctrlpriv.pnp_bstop_trx)
+       if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
                return _FAIL;
 
        if (!precvbuf)
index 2bc18ea..7a530c7 100644 (file)
@@ -25,7 +25,6 @@ struct HAL_VERSION {
        enum HAL_CHIP_TYPE      ChipType;
        enum HAL_CUT_VERSION    CUTVersion;
        enum HAL_VENDOR         VendorType;
-       u8                      ROMVer;
 };
 
 /*  Get element */
@@ -34,10 +33,10 @@ struct HAL_VERSION {
 
 /* HAL_CHIP_TYPE_E */
 #define IS_NORMAL_CHIP(version)                                \
-       ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
+       (GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP)
 
 /* HAL_VENDOR_E */
 #define IS_CHIP_VENDOR_TSMC(version)                   \
-       ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
+       (GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC)
 
 #endif
index d82b217..ffb2117 100644 (file)
@@ -4,9 +4,6 @@
 #ifndef __BASIC_TYPES_H__
 #define __BASIC_TYPES_H__
 
-#define SUCCESS        0
-#define FAIL   (-1)
-
 #include <linux/types.h>
 #define NDIS_OID uint
 
@@ -14,9 +11,6 @@ typedef void (*proc_t)(void *);
 
 #define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field)
 
-#define MEM_ALIGNMENT_OFFSET   (sizeof(size_t))
-#define MEM_ALIGNMENT_PADDING  (sizeof(size_t) - 1)
-
 /* port from fw */
 /*  TODO: Macros Below are Sync from SD7-Driver. It is necessary
  * to check correctness */
@@ -31,86 +25,21 @@ typedef void (*proc_t)(void *);
 /* Convert little data endian to host ordering */
 #define EF1BYTE(_val)          \
        ((u8)(_val))
-#define EF2BYTE(_val)          \
-       (le16_to_cpu(_val))
-#define EF4BYTE(_val)          \
-       (le32_to_cpu(_val))
-
-/* Read data from memory */
-#define READEF1BYTE(_ptr)      \
-       EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr)      \
-       EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr)      \
-       EF4BYTE(*(_ptr))
 
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val)                       \
-       do {                                            \
-               (*((u8 *)(_ptr))) = EF1BYTE(_val)       \
-       } while (0)
-/* Write le data to memory in host ordering */
-#define WRITEEF2BYTE(_ptr, _val)                       \
-       do {                                            \
-               (*((u16 *)(_ptr))) = EF2BYTE(_val)      \
-       } while (0)
-
-#define WRITEEF4BYTE(_ptr, _val)                       \
-       do {                                            \
-               (*((u32 *)(_ptr))) = EF2BYTE(_val)      \
-       } while (0)
-
-/* Create a bit mask
- * Examples:
- * BIT_LEN_MASK_32(0) => 0x00000000
- * BIT_LEN_MASK_32(1) => 0x00000001
- * BIT_LEN_MASK_32(2) => 0x00000003
- * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
- */
-#define BIT_LEN_MASK_32(__bitlen)       \
-       (0xFFFFFFFF >> (32 - (__bitlen)))
-#define BIT_LEN_MASK_16(__bitlen)       \
-       (0xFFFF >> (16 - (__bitlen)))
+/* Create a bit mask  */
 #define BIT_LEN_MASK_8(__bitlen) \
        (0xFF >> (8 - (__bitlen)))
 
-/* Create an offset bit mask
- * Examples:
- * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
- * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
- */
-#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
-       (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
-       (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
-       (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
-
 /*Description:
  * Return 4-byte value in host byte ordering from
  * 4-byte pointer in little-endian system.
  */
-#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
-       (EF4BYTE(*((__le32 *)(__pstart))))
-#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
-       (EF2BYTE(*((__le16 *)(__pstart))))
 #define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
        (EF1BYTE(*((u8 *)(__pstart))))
 
 /*Description:
 Translate subfield (continuous bits in little-endian) of 4-byte
 value to host byte ordering.*/
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset))  & \
-               BIT_LEN_MASK_32(__bitlen) \
-       )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
-               BIT_LEN_MASK_16(__bitlen) \
-       )
 #define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
        ( \
                (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
index 09fc270..bba88a0 100644 (file)
@@ -26,7 +26,6 @@
 #include "rtw_eeprom.h"
 #include "sta_info.h"
 #include "rtw_mlme.h"
-#include "rtw_debug.h"
 #include "rtw_rf.h"
 #include "rtw_event.h"
 #include "rtw_led.h"
@@ -35,6 +34,7 @@
 #include "rtw_ap.h"
 #include "rtw_br_ext.h"
 #include "rtl8188e_hal.h"
+#include "rtw_fw.h"
 
 #define DRIVERVERSION  "v4.1.4_6773.20130222"
 
@@ -116,11 +116,6 @@ struct registry_priv {
 
 #define MAX_CONTINUAL_URB_ERR          4
 
-struct rt_firmware {
-       u8 *data;
-       u32 size;
-};
-
 struct dvobj_priv {
        struct adapter *if1;
 
index 3cededa..a56f3d6 100644 (file)
 
 enum hw_variables {
        HW_VAR_SET_OPMODE,
-       HW_VAR_BSSID,
        HW_VAR_BASIC_RATE,
        HW_VAR_CORRECT_TSF,
-       HW_VAR_MLME_DISCONNECT,
        HW_VAR_MLME_SITESURVEY,
-       HW_VAR_MLME_JOIN,
        HW_VAR_SLOT_TIME,
-       HW_VAR_RESP_SIFS,
-       HW_VAR_ACK_PREAMBLE,
-       HW_VAR_BCN_VALID,
        HW_VAR_DM_FLAG,
        HW_VAR_DM_FUNC_OP,
-       HW_VAR_DM_FUNC_SET,
+       HW_VAR_DM_FUNC_RESET,
        HW_VAR_DM_FUNC_CLR,
-       HW_VAR_AC_PARAM_BE,
-       HW_VAR_ACM_CTRL,
-       HW_VAR_AMPDU_MIN_SPACE,
        HW_VAR_AMPDU_FACTOR,
-       HW_VAR_RXDMA_AGG_PG_TH,
-       HW_VAR_H2C_FW_PWRMODE,
-       HW_VAR_H2C_FW_JOINBSSRPT,
-       HW_VAR_FWLPS_RF_ON,
-       HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
-       HW_VAR_INITIAL_GAIN,
-       HW_VAR_ANTENNA_DIVERSITY_SELECT,
-       HW_VAR_FIFO_CLEARN_UP,
-       HW_VAR_RPT_TIMER_SETTING,
-       HW_VAR_TX_RPT_MAX_MACID,
        HW_VAR_H2C_MEDIA_STATUS_RPT,
-       HW_VAR_CHK_HI_QUEUE_EMPTY,
-};
-
-enum hal_def_variable {
-       HAL_DEF_IS_SUPPORT_ANT_DIV,
-       HAL_DEF_CURRENT_ANTENNA,
-       HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
-       HAL_DEF_DBG_DM_FUNC,/* for dbg */
-       HAL_DEF_DBG_DUMP_TXPKT,
 };
 
 typedef s32 (*c2h_id_filter)(u8 id);
@@ -70,13 +42,9 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level);
 int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter,
                                struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt);
 
-void SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
-void GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
-
 unsigned int rtl8188eu_inirp_init(struct adapter *Adapter);
 
 void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
-void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
 
 uint rtw_hal_init(struct adapter *padapter);
 uint rtw_hal_deinit(struct adapter *padapter);
index 8c20363..15636a8 100644 (file)
@@ -123,24 +123,6 @@ enum NETWORK_TYPE {
        WIRELESS_11BG_24N = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N),
 };
 
-#define SUPPORTED_24G_NETTYPE_MSK                              \
-        (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
-
-#define IsSupported24G(NetType)                                        \
-       ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
-
-#define IsEnableHWCCK(NetType)                                 \
-       IsSupported24G(NetType)
-
-#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
-
-#define IsSupportedTxCCK(NetType)                              \
-       ((NetType) & (WIRELESS_11B) ? true : false)
-#define IsSupportedTxOFDM(NetType)                             \
-       ((NetType) & (WIRELESS_11G) ? true : false)
-#define IsSupportedTxMCS(NetType)                              \
-       ((NetType) & (WIRELESS_11_24N) ? true : false)
-
 struct ieee_param {
        u32 cmd;
        u8 sta_addr[ETH_ALEN];
@@ -196,35 +178,6 @@ struct ieee_param {
 /* this is stolen from ipw2200 driver */
 #define IEEE_IBSS_MAC_HASH_SIZE 31
 
-struct rtw_ieee80211_hdr {
-       __le16 frame_ctl;
-       __le16 duration_id;
-       u8 addr1[ETH_ALEN];
-       u8 addr2[ETH_ALEN];
-       u8 addr3[ETH_ALEN];
-       u16 seq_ctl;
-       u8 addr4[ETH_ALEN];
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr {
-       __le16 frame_ctl;
-       __le16 duration_id;
-       u8 addr1[ETH_ALEN];
-       u8 addr2[ETH_ALEN];
-       u8 addr3[ETH_ALEN];
-       u16 seq_ctl;
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr_qos {
-       __le16 frame_ctl;
-       __le16 duration_id;
-       u8 addr1[ETH_ALEN];
-       u8 addr2[ETH_ALEN];
-       u8 addr3[ETH_ALEN];
-       u16 seq_ctl;
-       u16     qc;
-}  __packed;
-
 #define IEEE80211_3ADDR_LEN 24
 #define IEEE80211_4ADDR_LEN 30
 #define IEEE80211_FCS_LEN    4
@@ -636,24 +589,8 @@ static inline int is_broadcast_mac_addr(const u8 *addr)
 
 #define MAXTID 16
 
-#define IEEE_A     (1<<0)
-#define IEEE_B     (1<<1)
-#define IEEE_G     (1<<2)
-#define IEEE_MODE_MASK    (IEEE_A|IEEE_B|IEEE_G)
-
 /* Action category code */
 enum rtw_ieee80211_category {
-       RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
-       RTW_WLAN_CATEGORY_QOS = 1,
-       RTW_WLAN_CATEGORY_DLS = 2,
-       RTW_WLAN_CATEGORY_BACK = 3,
-       RTW_WLAN_CATEGORY_PUBLIC = 4, /* IEEE 802.11 public action frames */
-       RTW_WLAN_CATEGORY_RADIO_MEASUREMENT  = 5,
-       RTW_WLAN_CATEGORY_FT = 6,
-       RTW_WLAN_CATEGORY_HT = 7,
-       RTW_WLAN_CATEGORY_SA_QUERY = 8,
-       RTW_WLAN_CATEGORY_TDLS = 12,
-       RTW_WLAN_CATEGORY_WMM = 17,
        RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
 };
 
index 1902aa4..f131e17 100644 (file)
@@ -98,22 +98,6 @@ struct odm_per_pkt_info {
        bool    bPacketBeacon;
 };
 
-enum odm_ability {
-       /*  BB Team */
-       ODM_DIG                 = 0x00000001,
-       ODM_HIGH_POWER          = 0x00000002,
-       ODM_CCK_CCA_TH          = 0x00000004,
-       ODM_FA_STATISTICS       = 0x00000008,
-       ODM_RAMASK              = 0x00000010,
-       ODM_RSSI_MONITOR        = 0x00000020,
-       ODM_SW_ANTDIV           = 0x00000040,
-       ODM_HW_ANTDIV           = 0x00000080,
-       ODM_BB_PWRSV            = 0x00000100,
-       ODM_2TPATHDIV           = 0x00000200,
-       ODM_1TPATHDIV           = 0x00000400,
-       ODM_PSD2AFH             = 0x00000800
-};
-
 /*  2011/10/20 MH Define Common info enum for all team. */
 
 enum odm_common_info_def {
@@ -137,19 +121,6 @@ enum odm_ability_def {
 
 # define ODM_ITRF_USB 0x2
 
-/*  ODM_CMNINFO_OP_MODE */
-enum odm_operation_mode {
-       ODM_NO_LINK             = BIT(0),
-       ODM_LINK                = BIT(1),
-       ODM_SCAN                = BIT(2),
-       ODM_POWERSAVE           = BIT(3),
-       ODM_AP_MODE             = BIT(4),
-       ODM_CLIENT_MODE         = BIT(5),
-       ODM_AD_HOC              = BIT(6),
-       ODM_WIFI_DIRECT         = BIT(7),
-       ODM_WIFI_DISPLAY        = BIT(8),
-};
-
 /*  ODM_CMNINFO_WM_MODE */
 enum odm_wireless_mode {
        ODM_WM_UNKNOW   = 0x0,
index fca8f3d..f1a7036 100644 (file)
@@ -77,10 +77,6 @@ void *rtw_malloc2d(int h, int w, int size);
                spin_lock_init(&((q)->lock));                   \
        } while (0)
 
-u32  rtw_systime_to_ms(u32 systime);
-u32  rtw_ms_to_systime(u32 ms);
-s32  rtw_get_passing_time_ms(u32 start);
-
 void rtw_usleep_os(int us);
 
 static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
@@ -94,49 +90,6 @@ static inline void flush_signals_thread(void)
                flush_signals(current);
 }
 
-#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
-#define RND4(x)        (((x >> 2) + (((x & 3) == 0) ?  0: 1)) << 2)
-
-static inline u32 _RND4(u32 sz)
-{
-       u32     val;
-
-       val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
-       return val;
-}
-
-static inline u32 _RND8(u32 sz)
-{
-       u32     val;
-
-       val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
-       return val;
-}
-
-static inline u32 _RND128(u32 sz)
-{
-       u32     val;
-
-       val = ((sz >> 7) + ((sz & 127) ? 1: 0)) << 7;
-       return val;
-}
-
-static inline u32 _RND256(u32 sz)
-{
-       u32     val;
-
-       val = ((sz >> 8) + ((sz & 255) ? 1: 0)) << 8;
-       return val;
-}
-
-static inline u32 _RND512(u32 sz)
-{
-       u32     val;
-
-       val = ((sz >> 9) + ((sz & 511) ? 1: 0)) << 9;
-       return val;
-}
-
 struct rtw_netdev_priv_indicator {
        void *priv;
        u32 sizeof_priv;
index 82cb4f7..d2a069d 100644 (file)
@@ -160,9 +160,6 @@ struct hal_data_8188e {
        u8      AntDivCfg;
        u8      TRxAntDivType;
 
-       u8      bDumpRxPkt;/* for debug */
-       u8      bDumpTxPkt;/* for debug */
-
        u8      OutEpQueueSel;
        u8      OutEpNumber;
 
index edae053..ef42c4b 100644 (file)
@@ -998,13 +998,9 @@ Current IOREG MAP
 #define STOP_BCNQ              BIT(6)
 
 /* 2 ACMHWCTRL */
-#define        AcmHw_HwEn              BIT(0)
-#define        AcmHw_BeqEn             BIT(1)
-#define        AcmHw_ViqEn             BIT(2)
-#define        AcmHw_VoqEn             BIT(3)
-#define        AcmHw_BeqStatus         BIT(4)
-#define        AcmHw_ViqStatus         BIT(5)
-#define        AcmHw_VoqStatus         BIT(6)
+#define ACMHW_BEQEN            BIT(1)
+#define ACMHW_VIQEN            BIT(2)
+#define ACMHW_VOQEN            BIT(3)
 
 /*     0x0600h ~ 0x07FFh       WMAC Configuration */
 /* 2APSD_CTRL */
diff --git a/drivers/staging/r8188eu/include/rtw_debug.h b/drivers/staging/r8188eu/include/rtw_debug.h
deleted file mode 100644 (file)
index 01a7d98..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTW_DEBUG_H__
-#define __RTW_DEBUG_H__
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define _drv_always_                   1
-#define _drv_emerg_                    2
-#define _drv_alert_                    3
-#define _drv_crit_                     4
-#define _drv_err_                      5
-#define        _drv_warning_                   6
-#define _drv_notice_                   7
-#define _drv_info_                     8
-#define        _drv_debug_                     9
-
-#define _module_rtl871x_xmit_c_                BIT(0)
-#define _module_xmit_osdep_c_          BIT(1)
-#define _module_rtl871x_recv_c_                BIT(2)
-#define _module_recv_osdep_c_          BIT(3)
-#define _module_rtl871x_mlme_c_                BIT(4)
-#define _module_mlme_osdep_c_          BIT(5)
-#define _module_rtl871x_sta_mgt_c_     BIT(6)
-#define _module_rtl871x_cmd_c_         BIT(7)
-#define _module_cmd_osdep_c_           BIT(8)
-#define _module_rtl871x_io_c_          BIT(9)
-#define _module_io_osdep_c_            BIT(10)
-#define _module_os_intfs_c_            BIT(11)
-#define _module_rtl871x_security_c_    BIT(12)
-#define _module_rtl871x_eeprom_c_      BIT(13)
-#define _module_hal_init_c_            BIT(14)
-#define _module_hci_hal_init_c_                BIT(15)
-#define _module_rtl871x_ioctl_c_       BIT(16)
-#define _module_rtl871x_ioctl_set_c_   BIT(17)
-#define _module_rtl871x_ioctl_query_c_ BIT(18)
-#define _module_rtl871x_pwrctrl_c_     BIT(19)
-#define _module_hci_intfs_c_           BIT(20)
-#define _module_hci_ops_c_             BIT(21)
-#define _module_osdep_service_c_       BIT(22)
-#define _module_mp_                    BIT(23)
-#define _module_hci_ops_os_c_          BIT(24)
-#define _module_rtl871x_ioctl_os_c     BIT(25)
-#define _module_rtl8712_cmd_c_         BIT(26)
-#define        _module_rtl8192c_xmit_c_        BIT(27)
-#define _module_hal_xmit_c_            BIT(28)
-#define _module_efuse_                 BIT(29)
-#define _module_rtl8712_recv_c_                BIT(30)
-#define _module_rtl8712_led_c_         BIT(31)
-
-#define DRIVER_PREFIX  "R8188EU: "
-
-#endif /* __RTW_DEBUG_H__ */
index 3e8d3bb..d8d48ac 100644 (file)
 
 struct eeprom_priv {
        u8              bautoload_fail_flag;
-       u8              bloadfile_fail_flag;
-       u8              bloadmac_fail_flag;
        u8              mac_addr[ETH_ALEN] __aligned(2); /* PermanentAddress */
-       u16             channel_plan;
        u8              EepromOrEfuse;
        u8              efuse_eeprom_data[HWSET_MAX_SIZE_512] __aligned(4);
 };
index c4b1a83..8f74157 100644 (file)
@@ -4,6 +4,11 @@
 #ifndef __RTW_FW_H__
 #define __RTW_FW_H__
 
+struct rt_firmware {
+       u8 *data;
+       u32 size;
+};
+
 #include "drv_types.h"
 
 int rtl8188e_firmware_download(struct adapter *padapter);
index a36bd73..c704f30 100644 (file)
@@ -7,86 +7,7 @@
 #include "osdep_service.h"
 #include "drv_types.h"
 
-#ifndef OID_802_11_CAPABILITY
-       #define OID_802_11_CAPABILITY   0x0d010122
-#endif
-
-#ifndef OID_802_11_PMKID
-       #define OID_802_11_PMKID        0x0d010123
-#endif
-
-/*  For DDK-defined OIDs */
-#define OID_NDIS_SEG1  0x00010100
-#define OID_NDIS_SEG2  0x00010200
-#define OID_NDIS_SEG3  0x00020100
-#define OID_NDIS_SEG4  0x01010100
-#define OID_NDIS_SEG5  0x01020100
-#define OID_NDIS_SEG6  0x01020200
-#define OID_NDIS_SEG7  0xFD010100
-#define OID_NDIS_SEG8  0x0D010100
-#define OID_NDIS_SEG9  0x0D010200
-#define OID_NDIS_SEG10 0x0D020200
-
-#define SZ_OID_NDIS_SEG1       23
-#define SZ_OID_NDIS_SEG2       3
-#define SZ_OID_NDIS_SEG3       6
-#define SZ_OID_NDIS_SEG4       6
-#define SZ_OID_NDIS_SEG5       4
-#define SZ_OID_NDIS_SEG6       8
-#define SZ_OID_NDIS_SEG7       7
-#define SZ_OID_NDIS_SEG8       36
-#define SZ_OID_NDIS_SEG9       24
-#define SZ_OID_NDIS_SEG10      19
-
-/*  For Realtek-defined OIDs */
-#define OID_MP_SEG1            0xFF871100
-#define OID_MP_SEG2            0xFF818000
-
-#define OID_MP_SEG3            0xFF818700
-#define OID_MP_SEG4            0xFF011100
-
-enum oid_type {
-       QUERY_OID,
-       SET_OID
-};
-
-struct oid_funs_node {
-       unsigned int oid_start; /* the starting number for OID */
-       unsigned int oid_end; /* the ending number for OID */
-       struct oid_obj_priv *node_array;
-       unsigned int array_sz; /* the size of node_array */
-       int query_counter; /* count the number of query hits for this segment */
-       int set_counter; /* count the number of set hits for this segment */
-};
-
-struct oid_par_priv {
-       void            *adapter_context;
-       NDIS_OID        oid;
-       void            *information_buf;
-       u32             information_buf_len;
-       u32             *bytes_rw;
-       u32             *bytes_needed;
-       enum oid_type   type_of_oid;
-       u32             dbg;
-};
-
-struct oid_obj_priv {
-       unsigned char   dbg; /*  0: without OID debug message
-                             *  1: with OID debug message */
-       int (*oidfuns)(struct oid_par_priv *poid_par_priv);
-};
-
 extern struct iw_handler_def  rtw_handlers_def;
-
-int drv_query_info(struct  net_device *miniportadaptercontext, NDIS_OID oid,
-                  void *informationbuffer, u32 informationbufferlength,
-                  u32 *byteswritten, u32 *bytesneeded);
-
-int drv_set_info(struct  net_device *MiniportAdapterContext,
-                NDIS_OID oid, void *informationbuffer,
-                u32 informationbufferlength, u32 *bytesread,
-                u32 *bytesneeded);
-
 extern int ui_pid[3];
 
 #endif /*  #ifndef __INC_CEINFO_ */
index 42d850f..d816684 100644 (file)
@@ -363,8 +363,6 @@ struct mlme_priv {
 
        u8 *assoc_req;
        u32 assoc_req_len;
-       u8 *assoc_rsp;
-       u32 assoc_rsp_len;
 
        /* Number of associated Non-ERP stations (i.e., stations using 802.11b
         * in 802.11g BSS) */
@@ -558,13 +556,9 @@ void rtw_scan_timeout_handler(struct adapter *adapter);
 #define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
 #define rtw_set_scan_deny(adapter, ms) do {} while (0)
 
-int _rtw_init_mlme_priv(struct adapter *padapter);
-
 void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
 
-void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
-
- struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
 
 void _rtw_free_network(struct mlme_priv *pmlmepriv,
                       struct wlan_network *pnetwork, u8 isfreeall);
@@ -596,7 +590,10 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
 void rtw_set_roaming(struct adapter *adapter, u8 to_roaming);
 u8 rtw_to_roaming(struct adapter *adapter);
 
+void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid);
 void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
                              u32 mstatus);
 
+u8 rtw_current_antenna(struct adapter *adapter);
+
 #endif /* __RTL871X_MLME_H_ */
index 0c555ea..573d65b 100644 (file)
 
 #define REAUTH_LIMIT   (4)
 #define REASSOC_LIMIT  (4)
-#define READDBA_LIMIT  (2)
-
-#define ROAMING_LIMIT  8
 
 #define        DYNAMIC_FUNC_DISABLE                    (0x0)
 
 /*  ====== ODM_ABILITY_E ======== */
 /*  BB ODM section BIT 0-15 */
 #define        DYNAMIC_BB_DIG                          BIT(0)
-#define        DYNAMIC_BB_RA_MASK                      BIT(1)
-#define        DYNAMIC_BB_DYNAMIC_TXPWR        BIT(2)
-#define        DYNAMIC_BB_BB_FA_CNT                    BIT(3)
-
-#define                DYNAMIC_BB_RSSI_MONITOR         BIT(4)
-#define                DYNAMIC_BB_CCK_PD                       BIT(5)
-#define                DYNAMIC_BB_ANT_DIV                      BIT(6)
-#define                DYNAMIC_BB_PWR_SAVE                     BIT(7)
-#define                DYNAMIC_BB_PWR_TRA                      BIT(8)
-#define                DYNAMIC_BB_RATE_ADAPTIVE                BIT(9)
-#define                DYNAMIC_BB_PATH_DIV                     BIT(10)
-#define                DYNAMIC_BB_PSD                          BIT(11)
-
-/*  MAC DM section BIT 16-23 */
-#define                DYNAMIC_MAC_EDCA_TURBO          BIT(16)
-#define                DYNAMIC_MAC_EARLY_MODE          BIT(17)
-
-/*  RF ODM section BIT 24-31 */
-#define                DYNAMIC_RF_TX_PWR_TRACK         BIT(24)
-#define                DYNAMIC_RF_RX_GAIN_TRACK                BIT(25)
-#define                DYNAMIC_RF_CALIBRATION          BIT(26)
 
 #define                DYNAMIC_ALL_FUNC_ENABLE         0xFFFFFFF
 
@@ -208,17 +184,7 @@ enum SCAN_STATE {
        SCAN_STATE_MAX,
 };
 
-struct mlme_handler {
-       unsigned int   num;
-       char *str;
-       unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
-};
-
-struct action_handler {
-       unsigned int   num;
-       char* str;
-       unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
-};
+typedef unsigned int (*mlme_handler)(struct adapter *adapt, struct recv_frame *frame);
 
 struct ss_res {
        int     state;
@@ -419,7 +385,7 @@ struct mlme_ext_priv {
        u8 active_keep_alive_check;
 };
 
-int init_mlme_ext_priv(struct adapter *adapter);
+void init_mlme_ext_priv(struct adapter *adapter);
 int init_hw_mlme_ext(struct adapter *padapter);
 void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
 extern void init_mlme_ext_timer(struct adapter *padapter);
@@ -434,7 +400,6 @@ void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
 
 void Save_DM_Func_Flag(struct adapter *padapter);
 void Restore_DM_Func_Flag(struct adapter *padapter);
-void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable);
 
 void Set_MSR(struct adapter *padapter, u8 type);
 
@@ -563,6 +528,8 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
                     unsigned char action, unsigned short status);
 unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
 unsigned int send_beacon(struct adapter *padapter);
+bool get_beacon_valid_bit(struct adapter *adapter);
+void clear_beacon_valid_bit(struct adapter *adapter);
 
 void start_clnt_assoc(struct adapter *padapter);
 void start_clnt_auth(struct adapter *padapter);
@@ -594,20 +561,10 @@ unsigned int OnDeAuth(struct adapter *padapter,
 unsigned int OnAction(struct adapter *padapter,
                      struct recv_frame *precv_frame);
 
-unsigned int on_action_spct(struct adapter *padapter,
-                           struct recv_frame *precv_frame);
-unsigned int OnAction_qos(struct adapter *padapter,
-                         struct recv_frame *precv_frame);
-unsigned int OnAction_dls(struct adapter *padapter,
-                         struct recv_frame *precv_frame);
 unsigned int OnAction_back(struct adapter *padapter,
                           struct recv_frame *precv_frame);
 unsigned int on_action_public(struct adapter *padapter,
                              struct recv_frame *precv_frame);
-unsigned int OnAction_ht(struct adapter *padapter,
-                        struct recv_frame *precv_frame);
-unsigned int OnAction_wmm(struct adapter *padapter,
-                         struct recv_frame *precv_frame);
 unsigned int OnAction_p2p(struct adapter *padapter,
                          struct recv_frame *precv_frame);
 
@@ -635,8 +592,6 @@ void addba_timer_hdl(struct sta_info *psta);
 bool cckrates_included(unsigned char *rate, int ratelen);
 bool cckratesonly_included(unsigned char *rate, int ratelen);
 
-void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
-
 void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
 void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext);
 
@@ -769,9 +724,6 @@ struct C2HEvent_Header {
        unsigned int rsvd;
 };
 
-void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf);
-void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf);
-
 enum rtw_c2h_event {
        GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
        GEN_EVT_CODE(_Read_BBREG),
@@ -806,7 +758,7 @@ enum rtw_c2h_event {
 #ifdef _RTW_MLME_EXT_C_
 
 static struct fwevent wlanevents[] = {
-       {0, rtw_dummy_event_callback},  /*0*/
+       {0, NULL},      /*0*/
        {0, NULL},
        {0, NULL},
        {0, NULL},
@@ -820,12 +772,12 @@ static struct fwevent wlanevents[] = {
        {sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
        {sizeof(struct stadel_event), &rtw_stadel_event_callback},
        {0, NULL},
-       {0, rtw_dummy_event_callback},
+       {0, NULL},
        {0, NULL},      /*15*/
        {0, NULL},
        {0, NULL},
        {0, NULL},
-       {0, rtw_fwdbg_event_callback},
+       {0, NULL},
        {0, NULL},       /*20*/
        {0, NULL},
        {0, NULL},
index 7c3cb89..6e9fdd6 100644 (file)
@@ -47,16 +47,8 @@ struct pwrctrl_priv {
        u8      smart_ps;
        u8      bcn_ant_mode;
 
-       u32     alives;
-       struct work_struct cpwm_event;
        bool    bpower_saving;
 
-       u8      reg_rfoff;
-       u8      reg_pdnmode; /* powerdown mode */
-
-       /* RF OFF Level */
-       u32     cur_ps_level;
-       u32     reg_rfps_level;
        uint    ips_enter_cnts;
        uint    ips_leave_cnts;
 
@@ -64,7 +56,7 @@ struct pwrctrl_priv {
        u8      ips_mode_req;   /*  used to accept the mode setting request,
                                 *  will update to ipsmode later */
        uint bips_processing;
-       u32 ips_deny_time; /* will deny IPS when system time less than this */
+       unsigned long ips_deny_time; /* will deny IPS when system time less than this */
        u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
 
        u8      bLeisurePs;
@@ -72,21 +64,15 @@ struct pwrctrl_priv {
        u8      power_mgnt;
        u8      bFwCurrentInPSMode;
        u32     DelayLPSLastTimeStamp;
-       s32             pnp_current_pwr_state;
-       u8              pnp_bstop_trx;
 
        u8              bInSuspend;
        u8              bSupportRemoteWakeup;
        struct timer_list pwr_state_check_timer;
        int             pwr_state_check_interval;
-       u8              pwr_state_check_cnts;
-
-       int             ps_flag;
 
        enum rt_rf_power_state  rf_pwrstate;/* cur power state */
        enum rt_rf_power_state  change_rfpwrstate;
 
-       u8              wepkeymask;
        u8              bkeepfwalive;
 };
 
@@ -109,6 +95,7 @@ struct pwrctrl_priv {
 
 void rtw_init_pwrctrl_priv(struct adapter *adapter);
 
+void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode);
 void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
                     u8 bcn_ant_mode);
 void LeaveAllPowerSaveMode(struct adapter *adapter);
@@ -117,14 +104,10 @@ int ips_leave(struct adapter *padapter);
 
 void rtw_ps_processor(struct adapter *padapter);
 
-s32 LPS_RF_ON_check(struct adapter *adapter, u32 delay_ms);
 void LPS_Enter(struct adapter *adapter);
 void LPS_Leave(struct adapter *adapter);
 
-int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
-                   const char *caller);
-#define rtw_pwr_wakeup(adapter)                                                \
-        _rtw_pwr_wakeup(adapter, RTW_PWR_STATE_CHK_INTERVAL, __func__)
+int rtw_pwr_wakeup(struct adapter *adapter);
 int rtw_pm_set_ips(struct adapter *adapter, u8 mode);
 int rtw_pm_set_lps(struct adapter *adapter, u8 mode);
 
index 4ac4e6b..66d240a 100644 (file)
@@ -80,7 +80,6 @@ struct rx_pkt_attrib {
        u8      drvinfo_sz;
        u8      shift_sz;
        u8      hdrlen; /* the WLAN Header Len */
-       u8      to_fr_ds;
        u8      amsdu;
        bool    qos;
        u8      priority;
@@ -167,7 +166,6 @@ struct recv_priv {
        uint  rx_largepacket_crcerr;
        uint  rx_smallpacket_crcerr;
        uint  rx_middlepacket_crcerr;
-       struct semaphore allrxreturnevt;
        u8      rx_pending_cnt;
 
        struct tasklet_struct recv_tasklet;
@@ -230,7 +228,6 @@ struct recv_buf {
 struct recv_frame {
        struct list_head list;
        struct sk_buff   *pkt;
-       struct sk_buff   *pkt_newalloc;
        struct adapter  *adapter;
        u8 fragcnt;
        int frame_tag;
index b2df148..034a9f8 100644 (file)
@@ -198,7 +198,7 @@ struct xmit_buf {
        u32  len;
        struct submit_ctx *sctx;
        u32     ff_hwaddr;
-       struct urb *pxmit_urb[8];
+       struct urb *pxmit_urb;
        dma_addr_t dma_transfer_addr;   /* (in) dma addr for transfer_buffer */
        u8 bpending[8];
        int last[8];
@@ -341,7 +341,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
 void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+int rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
 
index b7e6b1f..4112c83 100644 (file)
@@ -48,7 +48,6 @@ struct        stainfo_stats   {
        u64 rx_ctrl_pkts;
        u64 rx_data_pkts;
 
-       u64     last_rx_mgnt_pkts;
        u64 last_rx_beacon_pkts;
        u64 last_rx_probereq_pkts;
        u64 last_rx_probersp_pkts;
@@ -230,7 +229,6 @@ struct sta_info {
 
 #define sta_update_last_rx_pkts(sta) \
 do { \
-       sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
        sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
        sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
        sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
index 14526fc..ddc46cb 100644 (file)
  * @return true:
  * @return false:
  */
-static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+static inline bool rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
 {
-       int ret = false;
-       int value;
-       value = atomic_inc_return(&dvobj->continual_urb_error);
+       int value = atomic_inc_return(&dvobj->continual_urb_error);
+
        if (value > MAX_CONTINUAL_URB_ERR)
-               ret = true;
+               return true;
 
-       return ret;
+       return false;
 }
 
 /*
@@ -47,19 +46,14 @@ static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
 #define USB_HIGH_SPEED_BULK_SIZE       512
 #define USB_FULL_SPEED_BULK_SIZE       64
 
-static inline u8 rtw_usb_bulk_size_boundary(struct adapter *padapter,
-                                           int buf_len)
+static inline bool rtw_usb_bulk_size_boundary(struct adapter *padapter, int buf_len)
 {
-       u8 rst = true;
        struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
 
        if (pdvobjpriv->pusbdev->speed == USB_SPEED_HIGH)
-               rst = (0 == (buf_len) % USB_HIGH_SPEED_BULK_SIZE) ?
-                     true : false;
+               return buf_len % USB_HIGH_SPEED_BULK_SIZE == 0;
        else
-               rst = (0 == (buf_len) % USB_FULL_SPEED_BULK_SIZE) ?
-                     true : false;
-       return rst;
+               return buf_len % USB_FULL_SPEED_BULK_SIZE == 0;
 }
 
 #endif /* __USB_OPS_H_ */
index 3e777ca..f271e93 100644 (file)
@@ -6,16 +6,12 @@
 
 #include "osdep_service.h"
 #include "drv_types.h"
-#include "usb_vendor_req.h"
 
 extern char *rtw_initmac;
 extern int rtw_mc2u_disable;
 
 #define USBD_HALTED(Status) ((u32)(Status) >> 30 == 3)
 
-u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
-                   enum rt_usb_wvalue wvalue, u8 windex, void *data,
-                   u8 datalen, u8 isdirectionin);
 void netdev_br_init(struct net_device *netdev);
 void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
 void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr);
diff --git a/drivers/staging/r8188eu/include/usb_vendor_req.h b/drivers/staging/r8188eu/include/usb_vendor_req.h
deleted file mode 100644 (file)
index 7337b1b..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _USB_VENDOR_REQUEST_H_
-#define _USB_VENDOR_REQUEST_H_
-
-/* 4   Set/Get Register related wIndex/Data */
-#define        RT_USB_RESET_MASK_OFF           0
-#define        RT_USB_RESET_MASK_ON            1
-#define        RT_USB_SLEEP_MASK_OFF           0
-#define        RT_USB_SLEEP_MASK_ON            1
-#define        RT_USB_LDO_ON                           1
-#define        RT_USB_LDO_OFF                          0
-
-/* 4   Set/Get SYSCLK related  wValue or Data */
-#define        RT_USB_SYSCLK_32KHZ             0
-#define        RT_USB_SYSCLK_40MHZ             1
-#define        RT_USB_SYSCLK_60MHZ             2
-
-enum bt_usb_request {
-       RT_USB_SET_REGISTER             = 1,
-       RT_USB_SET_SYSCLK               = 2,
-       RT_USB_GET_SYSCLK               = 3,
-       RT_USB_GET_REGISTER             = 4
-};
-
-enum rt_usb_wvalue {
-       RT_USB_RESET_MASK       =       1,
-       RT_USB_SLEEP_MASK       =       2,
-       RT_USB_USB_HRCPWM       =       3,
-       RT_USB_LDO                      =       4,
-       RT_USB_BOOT_TYPE        =       5
-};
-
-#endif
index c331be1..0254310 100644 (file)
@@ -4,25 +4,14 @@
 #ifndef _WIFI_H_
 #define _WIFI_H_
 
+#include <linux/bits.h>
 #include <linux/ieee80211.h>
 
-#ifdef BIT
-/* error       "BIT define occurred earlier elsewhere!\n" */
-#undef BIT
-#endif
-#define BIT(x) (1 << (x))
-
 #define WLAN_ETHHDR_LEN                14
 #define WLAN_HDR_A3_LEN                24
 #define WLAN_HDR_A3_QOS_LEN    26
 #define WLAN_SSID_MAXLEN       32
 
-enum WIFI_FRAME_TYPE {
-       WIFI_CTRL_TYPE =        (BIT(2)),
-       WIFI_DATA_TYPE =        (BIT(3)),
-       WIFI_QOS_DATA_TYPE      = (BIT(7)|BIT(3)),      /*  QoS Data */
-};
-
 enum WIFI_FRAME_SUBTYPE {
        /*  below is for mgt frame */
        WIFI_ASSOCREQ       = (0 | IEEE80211_FTYPE_MGMT),
@@ -39,24 +28,15 @@ enum WIFI_FRAME_SUBTYPE {
        WIFI_ACTION         = (BIT(7) | BIT(6) | BIT(4) | IEEE80211_FTYPE_MGMT),
 
        /*  below is for control frame */
-       WIFI_PSPOLL         = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
-       WIFI_RTS            = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
-       WIFI_CTS            = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
-       WIFI_ACK            = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
-       WIFI_CFEND          = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
-       WIFI_CFEND_CFACK    = (BIT(7) | BIT(6) | BIT(5) | BIT(4) |
-       WIFI_CTRL_TYPE),
+       WIFI_PSPOLL         = (BIT(7) | BIT(5) | IEEE80211_FTYPE_CTL),
 
        /*  below is for data frame */
-       WIFI_DATA           = (0 | WIFI_DATA_TYPE),
-       WIFI_DATA_CFACK     = (BIT(4) | WIFI_DATA_TYPE),
-       WIFI_DATA_CFPOLL    = (BIT(5) | WIFI_DATA_TYPE),
-       WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
-       WIFI_DATA_NULL      = (BIT(6) | WIFI_DATA_TYPE),
-       WIFI_CF_ACK         = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
-       WIFI_CF_POLL        = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
-       WIFI_CF_ACKPOLL     = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
-       WIFI_QOS_DATA_NULL      = (BIT(6) | WIFI_QOS_DATA_TYPE),
+       WIFI_DATA           = (0 | IEEE80211_FTYPE_DATA),
+       WIFI_DATA_CFACK     = (BIT(4) | IEEE80211_FTYPE_DATA),
+       WIFI_DATA_CFPOLL    = (BIT(5) | IEEE80211_FTYPE_DATA),
+       WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | IEEE80211_FTYPE_DATA),
+       WIFI_DATA_NULL      = (BIT(6) | IEEE80211_FTYPE_DATA),
+       WIFI_QOS_DATA_NULL      = (BIT(6) | IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA),
 };
 
 enum WIFI_REASON_CODE  {
@@ -172,8 +152,6 @@ enum WIFI_REG_DOMAIN {
 
 #define GetFrDs(pbuf)  (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
 
-#define get_tofr_ds(pframe)    ((GetToDs(pframe) << 1) | GetFrDs(pframe))
-
 #define SetMFrag(pbuf) \
        *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
 
@@ -209,12 +187,6 @@ enum WIFI_REG_DOMAIN {
                *(__le16 *)(pbuf) |= cpu_to_le16(type); \
        } while (0)
 
-#define GetSequence(pbuf)                      \
-       (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) >> 4)
-
-#define GetFragNum(pbuf)                       \
-       (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) & 0x0f)
-
 #define GetTupleCache(pbuf)                    \
        (cpu_to_le16(*(unsigned short *)((size_t)(pbuf) + 22)))
 
@@ -239,8 +211,6 @@ enum WIFI_REG_DOMAIN {
 #define SetPriority(pbuf, tid) \
        *(__le16 *)(pbuf) |= cpu_to_le16(tid & 0xf)
 
-#define GetPriority(pbuf)      ((le16_to_cpu(*(__le16 *)(pbuf))) & 0xf)
-
 #define SetEOSP(pbuf, eosp)    \
                *(__le16 *)(pbuf) |= cpu_to_le16((eosp & 1) << 4)
 
@@ -254,8 +224,6 @@ enum WIFI_REG_DOMAIN {
 #define SetAMsdu(pbuf, amsdu)  \
        *(__le16 *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7)
 
-#define GetAid(pbuf)   (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 2)) & 0x3fff)
-
 #define GetTid(pbuf)   (le16_to_cpu(*(__le16 *)((size_t)(pbuf) +       \
                        (((GetToDs(pbuf)<<1) | GetFrDs(pbuf)) == 3 ?    \
                        30 : 24))) & 0x000f)
@@ -270,10 +238,7 @@ enum WIFI_REG_DOMAIN {
 
 static inline bool IS_MCAST(unsigned char *da)
 {
-       if ((*da) & 0x01)
-               return true;
-       else
-               return false;
+       return (*da) & 0x01;
 }
 
 static inline unsigned char *get_da(unsigned char *pframe)
@@ -345,13 +310,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
        return sa;
 }
 
-static inline bool IsFrameTypeCtrl(unsigned char *pframe)
-{
-       if (WIFI_CTRL_TYPE == GetFrameType(pframe))
-               return true;
-       else
-               return false;
-}
 /*-----------------------------------------------------------------------------
                        Below is for the security related definition
 ------------------------------------------------------------------------------*/
index eb9375b..1b09462 100644 (file)
@@ -4,7 +4,6 @@
 #include "../include/osdep_service.h"
 #include "../include/drv_types.h"
 #include "../include/wlan_bssdef.h"
-#include "../include/rtw_debug.h"
 #include "../include/wifi.h"
 #include "../include/rtw_mlme.h"
 #include "../include/rtw_mlme_ext.h"
@@ -1131,9 +1130,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
                                                break;
                                        }
                                        sec_len = *(pos++); len -= 1;
-                                       if (sec_len > 0 && sec_len <= len) {
+                                       if (sec_len > 0 &&
+                                           sec_len <= len &&
+                                           sec_len <= 32) {
                                                ssid[ssid_index].SsidLength = sec_len;
-                                               memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+                                               memcpy(ssid[ssid_index].Ssid, pos, sec_len);
                                                ssid_index++;
                                        }
                                        pos += sec_len;
@@ -1886,88 +1887,6 @@ static int rtw_wx_get_nick(struct net_device *dev,
        return 0;
 }
 
-static int rtw_wx_read32(struct net_device *dev,
-                           struct iw_request_info *info,
-                           union iwreq_data *wrqu, char *extra)
-{
-       struct adapter *padapter;
-       struct iw_point *p;
-       u16 len;
-       u32 addr;
-       u32 data32;
-       u32 bytes;
-       u8 *ptmp;
-       int ret;
-
-       padapter = (struct adapter *)rtw_netdev_priv(dev);
-       p = &wrqu->data;
-       len = p->length;
-       ptmp = memdup_user(p->pointer, len);
-       if (IS_ERR(ptmp))
-               return PTR_ERR(ptmp);
-
-       bytes = 0;
-       addr = 0;
-       sscanf(ptmp, "%d,%x", &bytes, &addr);
-
-       switch (bytes) {
-       case 1:
-               data32 = rtw_read8(padapter, addr);
-               sprintf(extra, "0x%02X", data32);
-               break;
-       case 2:
-               data32 = rtw_read16(padapter, addr);
-               sprintf(extra, "0x%04X", data32);
-               break;
-       case 4:
-               data32 = rtw_read32(padapter, addr);
-               sprintf(extra, "0x%08X", data32);
-               break;
-       default:
-               ret = -EINVAL;
-               goto err_free_ptmp;
-       }
-
-       kfree(ptmp);
-       return 0;
-
-err_free_ptmp:
-       kfree(ptmp);
-       return ret;
-}
-
-static int rtw_wx_write32(struct net_device *dev,
-                           struct iw_request_info *info,
-                           union iwreq_data *wrqu, char *extra)
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
-       u32 addr;
-       u32 data32;
-       u32 bytes;
-
-       bytes = 0;
-       addr = 0;
-       data32 = 0;
-       sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
-
-       switch (bytes) {
-       case 1:
-               rtw_write8(padapter, addr, (u8)data32);
-               break;
-       case 2:
-               rtw_write16(padapter, addr, (u16)data32);
-               break;
-       case 4:
-               rtw_write32(padapter, addr, data32);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int rtw_wx_read_rf(struct net_device *dev,
                            struct iw_request_info *info,
                            union iwreq_data *wrqu, char *extra)
@@ -2363,114 +2282,6 @@ static void rtw_p2p_setDN(struct net_device *dev,
        pwdinfo->device_name_len = wrqu->data.length - 1;
 }
 
-static void rtw_p2p_get_status(struct net_device *dev,
-                              struct iw_request_info *info,
-                              union iwreq_data *wrqu, char *extra)
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       /*      Commented by Albert 2010/10/12 */
-       /*      Because of the output size limitation, I had removed the "Role" information. */
-       /*      About the "Role" information, we will use the new private IOCTL to get the "Role" information. */
-       sprintf(extra, "\n\nStatus =%.2d\n", rtw_p2p_state(pwdinfo));
-       wrqu->data.length = strlen(extra);
-}
-
-/*     Commented by Albert 20110520 */
-/*     This function will return the config method description */
-/*     This config method description will show us which config method the remote P2P device is intended to use */
-/*     by sending the provisioning discovery request frame. */
-
-static void rtw_p2p_get_req_cm(struct net_device *dev,
-                              struct iw_request_info *info,
-                              union iwreq_data *wrqu, char *extra)
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       sprintf(extra, "\n\nCM =%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
-       wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_role(struct net_device *dev,
-                            struct iw_request_info *info,
-                            union iwreq_data *wrqu, char *extra)
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       sprintf(extra, "\n\nRole =%.2d\n", rtw_p2p_role(pwdinfo));
-       wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_ifaddr(struct net_device *dev,
-                                   struct iw_request_info *info,
-                                   union iwreq_data *wrqu, char *extra)
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       sprintf(extra, "\nMAC %pM",
-               pwdinfo->p2p_peer_interface_addr);
-       wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_devaddr(struct net_device *dev,
-                                    struct iw_request_info *info,
-                                    union iwreq_data *wrqu, char *extra)
-
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       sprintf(extra, "\n%pM",
-               pwdinfo->rx_prov_disc_info.peerDevAddr);
-       wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_peer_devaddr_by_invitation(struct net_device *dev,
-                                                  struct iw_request_info *info,
-                                                  union iwreq_data *wrqu,
-                                                  char *extra)
-
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       sprintf(extra, "\nMAC %pM",
-               pwdinfo->p2p_peer_device_addr);
-       wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_groupid(struct net_device *dev,
-                               struct iw_request_info *info,
-                               union iwreq_data *wrqu, char *extra)
-
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       sprintf(extra, "\n%.2X:%.2X:%.2X:%.2X:%.2X:%.2X %s",
-               pwdinfo->groupid_info.go_device_addr[0], pwdinfo->groupid_info.go_device_addr[1],
-               pwdinfo->groupid_info.go_device_addr[2], pwdinfo->groupid_info.go_device_addr[3],
-               pwdinfo->groupid_info.go_device_addr[4], pwdinfo->groupid_info.go_device_addr[5],
-               pwdinfo->groupid_info.ssid);
-       wrqu->data.length = strlen(extra);
-}
-
-static void rtw_p2p_get_op_ch(struct net_device *dev,
-                             struct iw_request_info *info,
-                             union iwreq_data *wrqu, char *extra)
-
-{
-       struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-       struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-
-       sprintf(extra, "\n\nOp_ch =%.2d\n", pwdinfo->operating_channel);
-       wrqu->data.length = strlen(extra);
-}
-
 static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
                               struct iw_request_info *info,
                               union iwreq_data *wrqu, char *extra)
@@ -3229,32 +3040,6 @@ static int rtw_p2p_set(struct net_device *dev,
        return ret;
 }
 
-static int rtw_p2p_get(struct net_device *dev,
-                              struct iw_request_info *info,
-                              union iwreq_data *wrqu, char *extra)
-{
-       if (!memcmp(wrqu->data.pointer, "status", 6)) {
-               rtw_p2p_get_status(dev, info, wrqu, extra);
-       } else if (!memcmp(wrqu->data.pointer, "role", 4)) {
-               rtw_p2p_get_role(dev, info, wrqu, extra);
-       } else if (!memcmp(wrqu->data.pointer, "peer_ifa", 8)) {
-               rtw_p2p_get_peer_ifaddr(dev, info, wrqu, extra);
-       } else if (!memcmp(wrqu->data.pointer, "req_cm", 6)) {
-               rtw_p2p_get_req_cm(dev, info, wrqu, extra);
-       } else if (!memcmp(wrqu->data.pointer, "peer_deva", 9)) {
-               /*      Get the P2P device address when receiving the provision discovery request frame. */
-               rtw_p2p_get_peer_devaddr(dev, info, wrqu, extra);
-       } else if (!memcmp(wrqu->data.pointer, "group_id", 8)) {
-               rtw_p2p_get_groupid(dev, info, wrqu, extra);
-       } else if (!memcmp(wrqu->data.pointer, "peer_deva_inv", 9)) {
-               /*      Get the P2P device address when receiving the P2P Invitation request frame. */
-               rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
-       } else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
-               rtw_p2p_get_op_ch(dev, info, wrqu, extra);
-       }
-       return 0;
-}
-
 static int rtw_p2p_get2(struct net_device *dev,
                               struct iw_request_info *info,
                               union iwreq_data *wrqu, char *extra)
@@ -3389,6 +3174,34 @@ static void rf_reg_dump(struct adapter *padapter)
        }
 }
 
+static void rtw_set_dynamic_functions(struct adapter *adapter, u8 dm_func)
+{
+       struct hal_data_8188e *haldata = &adapter->haldata;
+       struct odm_dm_struct *odmpriv = &haldata->odmpriv;
+
+       switch (dm_func) {
+       case 0:
+               /* disable all dynamic func */
+               odmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
+               break;
+       case 1:
+               /* disable DIG */
+               odmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
+               break;
+       case 6:
+               /* turn on all dynamic func */
+               if (!(odmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
+                       struct rtw_dig *digtable = &odmpriv->DM_DigTable;
+
+                       digtable->CurIGValue = rtw_read8(adapter, 0xc50);
+               }
+               odmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
+               break;
+       default:
+               break;
+       }
+}
+
 static int rtw_dbg_port(struct net_device *dev,
                               struct iw_request_info *info,
                               union iwreq_data *wrqu, char *extra)
@@ -3620,9 +3433,7 @@ static int rtw_dbg_port(struct net_device *dev,
                        break;
                case 0x06:
                        {
-                               u32     ODMFlag;
-                               GetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
-                               ODMFlag = (u32)(0x0f & arg);
+                               u32 ODMFlag = (u32)(0x0f & arg);
                                SetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
                        }
                        break;
@@ -3632,13 +3443,6 @@ static int rtw_dbg_port(struct net_device *dev,
                        break;
                case 0x09:
                        break;
-               case 0x0c:/* dump rx/tx packet */
-                       if (arg == 0) {
-                               SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_RXPKT, &extra_arg);
-                       } else if (arg == 1) {
-                               SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_TXPKT, &extra_arg);
-                       }
-                       break;
                case 0x15:
                        break;
                case 0x10:/*  driver version display */
@@ -3683,23 +3487,14 @@ static int rtw_dbg_port(struct net_device *dev,
                                rf_reg_dump(padapter);
                        break;
                case 0xee:/* turn on/off dynamic funcs */
-                       {
-                               u32 odm_flag;
-
-                               if (0xf == extra_arg) {
-                                       GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
-                               } else {
-                                       /*      extra_arg = 0  - disable all dynamic func
-                                               extra_arg = 1  - disable DIG
-                                               extra_arg = 2  - disable tx power tracking
-                                               extra_arg = 3  - turn on all dynamic func
-                                       */
-                                       SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &extra_arg);
-                                       GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
-                               }
+                       if (extra_arg != 0xf) {
+                               /* extra_arg = 0  - disable all dynamic func
+                                * extra_arg = 1  - disable DIG
+                                * extra_arg = 6  - turn on all dynamic func
+                                */
+                               rtw_set_dynamic_functions(padapter, extra_arg);
                        }
                        break;
-
                case 0xfd:
                        rtw_write8(padapter, 0xc50, arg);
                        rtw_write8(padapter, 0xc58, arg);
@@ -3895,8 +3690,8 @@ static const struct iw_priv_args rtw_private_args[] = {
 };
 
 static iw_handler rtw_private_handler[] = {
-rtw_wx_write32,                                /* 0x00 */
-rtw_wx_read32,                         /* 0x01 */
+       NULL,                           /* 0x00 */
+       NULL,                           /* 0x01 */
        NULL,                           /* 0x02 */
 NULL,                                  /* 0x03 */
 /*  for MM DTV platform */
@@ -3919,7 +3714,7 @@ NULL,                                     /* 0x03 */
        NULL,                           /* 0x0F */
 
        rtw_p2p_set,                    /* 0x10 */
-       rtw_p2p_get,                    /* 0x11 */
+       NULL,                           /* 0x11 */
        rtw_p2p_get2,                   /* 0x12 */
 
        NULL,                           /* 0x13 */
@@ -3958,10 +3753,10 @@ static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
 
 struct iw_handler_def rtw_handlers_def = {
        .standard = rtw_handlers,
-       .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+       .num_standard = ARRAY_SIZE(rtw_handlers),
        .private = rtw_private_handler,
        .private_args = (struct iw_priv_args *)rtw_private_args,
-       .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
-       .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+       .num_private = ARRAY_SIZE(rtw_private_handler),
+       .num_private_args = ARRAY_SIZE(rtw_private_args),
        .get_wireless_stats = rtw_get_wireless_stats,
 };
index 72ad970..899d8e9 100644 (file)
@@ -66,7 +66,6 @@ void rtw_reset_securitypriv(struct adapter *adapter)
                /*  We have to backup the PMK information for WiFi PMK Caching test item. */
                /*  Backup the btkip_countermeasure information. */
                /*  When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
-               memset(&backup_pmkid[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
                memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
                backup_index = adapter->securitypriv.PMKIDIndex;
                backup_counter = adapter->securitypriv.btkip_countermeasure;
index 550721e..891c85b 100644 (file)
@@ -441,7 +441,6 @@ static void rtw_init_default_value(struct adapter *padapter)
 u8 rtw_reset_drv_sw(struct adapter *padapter)
 {
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-       struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
 
        /* hal_priv */
        rtl8188eu_init_default_value(padapter);
@@ -457,8 +456,6 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
 
        _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
 
-       pwrctrlpriv->pwr_state_check_cnts = 0;
-
        /* mlmeextpriv */
        padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
 
@@ -490,10 +487,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
        init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
        reset_global_wifidirect_info(padapter);
 
-       if (init_mlme_ext_priv(padapter) == _FAIL) {
-               dev_err(dvobj_to_dev(padapter->dvobj), "init_mlme_ext_priv failed\n");
-               goto free_mlme_priv;
-       }
+       init_mlme_ext_priv(padapter);
 
        if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
                dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_xmit_priv failed\n");
@@ -534,7 +528,6 @@ free_xmit_priv:
 free_mlme_ext:
        free_mlme_ext_priv(&padapter->mlmeextpriv);
 
-free_mlme_priv:
        rtw_free_mlme_priv(&padapter->mlmepriv);
 
 free_evt_priv:
@@ -632,12 +625,6 @@ int _netdev_open(struct net_device *pnetdev)
 {
        uint status;
        struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
-       struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
-       if (pwrctrlpriv->ps_flag) {
-               padapter->net_closed = false;
-               goto netdev_open_normal_process;
-       }
 
        if (!padapter->bup) {
                padapter->bDriverStopped = false;
@@ -681,7 +668,6 @@ int _netdev_open(struct net_device *pnetdev)
 
        netdev_br_init(pnetdev);
 
-netdev_open_normal_process:
        return 0;
 
 netdev_open_error:
@@ -750,9 +736,36 @@ void rtw_ips_pwr_down(struct adapter *padapter)
        padapter->bCardDisableWOHSM = false;
 }
 
+static void rtw_fifo_cleanup(struct adapter *adapter)
+{
+       struct pwrctrl_priv *pwrpriv = &adapter->pwrctrlpriv;
+       u8 trycnt = 100;
+
+       /* pause tx */
+       rtw_write8(adapter, REG_TXPAUSE, 0xff);
+
+       /* keep sn */
+       adapter->xmitpriv.nqos_ssn = rtw_read16(adapter, REG_NQOS_SEQ);
+
+       if (!pwrpriv->bkeepfwalive) {
+               /* RX DMA stop */
+               rtw_write32(adapter, REG_RXPKT_NUM,
+                           (rtw_read32(adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
+               do {
+                       if (!(rtw_read32(adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
+                               break;
+               } while (trycnt--);
+
+               /* RQPN Load 0 */
+               rtw_write16(adapter, REG_RQPN_NPQ, 0x0);
+               rtw_write32(adapter, REG_RQPN, 0x80000000);
+               mdelay(10);
+       }
+}
+
 void rtw_ips_dev_unload(struct adapter *padapter)
 {
-       SetHwReg8188EU(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
+       rtw_fifo_cleanup(padapter);
 
        if (padapter->intf_stop)
                padapter->intf_stop(padapter);
index 7a6fcc9..812acd5 100644 (file)
@@ -42,22 +42,6 @@ Otherwise, there will be racing condition.
 Caller must check if the list is empty before calling rtw_list_delete
 */
 
-inline u32 rtw_systime_to_ms(u32 systime)
-{
-       return systime * 1000 / HZ;
-}
-
-inline u32 rtw_ms_to_systime(u32 ms)
-{
-       return ms * HZ / 1000;
-}
-
-/*  the input parameter start use the same unit as jiffies */
-inline s32 rtw_get_passing_time_ms(u32 start)
-{
-       return rtw_systime_to_ms(jiffies - start);
-}
-
 void rtw_usleep_os(int us)
 {
        if (1 < (us / 1000))
@@ -116,19 +100,10 @@ void rtw_free_netdev(struct net_device *netdev)
 {
        struct rtw_netdev_priv_indicator *pnpi;
 
-       if (!netdev)
-               goto RETURN;
-
        pnpi = netdev_priv(netdev);
 
-       if (!pnpi->priv)
-               goto RETURN;
-
        vfree(pnpi->priv);
        free_netdev(netdev);
-
-RETURN:
-       return;
 }
 
 int rtw_change_ifname(struct adapter *padapter, const char *ifname)
@@ -220,7 +195,7 @@ keep_ori:
  */
 inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
 {
-       return (cbuf->write == cbuf->read) ? true : false;
+       return cbuf->write == cbuf->read;
 }
 
 /**
index ffd727f..68869c5 100644 (file)
@@ -8,7 +8,6 @@
 #include "../include/xmit_osdep.h"
 #include "../include/hal_intf.h"
 #include "../include/osdep_intf.h"
-#include "../include/usb_vendor_req.h"
 #include "../include/usb_ops.h"
 #include "../include/usb_osintf.h"
 #include "../include/rtw_ioctl.h"
@@ -200,8 +199,6 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
        struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
 
-       int ret = 0;
-
        if ((!padapter->bup) || (padapter->bDriverStopped) ||
            (padapter->bSurpriseRemoved))
                goto exit;
@@ -240,7 +237,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
                rtw_indicate_disconnect(padapter);
 
 exit:
-               return ret;
+               return 0;
 }
 
 static int rtw_resume(struct usb_interface *pusb_intf)
index c4b6dbc..0269e60 100644 (file)
@@ -106,8 +106,7 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
        struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
        struct usb_device *pusbd = pdvobj->pusbdev;
 
-       if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
-           (padapter->pwrctrlpriv.pnp_bstop_trx)) {
+       if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
                rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
                goto exit;
        }
@@ -141,7 +140,7 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
 
        spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
 
-       purb    = pxmitbuf->pxmit_urb[0];
+       purb    = pxmitbuf->pxmit_urb;
 
        /* translate DMA FIFO addr to pipehandle */
        pipe = ffaddr2pipehdl(pdvobj, addr);
@@ -179,25 +178,21 @@ exit:
 
 void rtw_write_port_cancel(struct adapter *padapter)
 {
-       int i, j;
+       int i;
        struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
 
        padapter->bWritePortCancel = true;
 
        for (i = 0; i < NR_XMITBUFF; i++) {
-               for (j = 0; j < 8; j++) {
-                       if (pxmitbuf->pxmit_urb[j])
-                               usb_kill_urb(pxmitbuf->pxmit_urb[j]);
-               }
+               if (pxmitbuf->pxmit_urb)
+                       usb_kill_urb(pxmitbuf->pxmit_urb);
                pxmitbuf++;
        }
 
        pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf;
        for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
-               for (j = 0; j < 8; j++) {
-                       if (pxmitbuf->pxmit_urb[j])
-                               usb_kill_urb(pxmitbuf->pxmit_urb[j]);
-               }
+               if (pxmitbuf->pxmit_urb)
+                       usb_kill_urb(pxmitbuf->pxmit_urb);
                pxmitbuf++;
        }
 }
index a6012cf..e430c64 100644 (file)
@@ -67,8 +67,6 @@ bool rtw_endofpktfile(struct pkt_file *pfile)
 
 int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
 {
-       int i;
-
        pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
        if (!pxmitbuf->pallocated_buf)
                return _FAIL;
@@ -76,21 +74,17 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
        pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
        pxmitbuf->dma_transfer_addr = 0;
 
-       for (i = 0; i < 8; i++) {
-               pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
-               if (!pxmitbuf->pxmit_urb[i])
-                       return _FAIL;
-       }
+       pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (!pxmitbuf->pxmit_urb)
+               return _FAIL;
+
        return _SUCCESS;
 }
 
 void rtw_os_xmit_resource_free(struct adapter *padapter,
                               struct xmit_buf *pxmitbuf, u32 free_sz)
 {
-       int i;
-
-       for (i = 0; i < 8; i++)
-               usb_free_urb(pxmitbuf->pxmit_urb[i]);
+       usb_free_urb(pxmitbuf->pxmit_urb);
 
        kfree(pxmitbuf->pallocated_buf);
 }
index 52eeb56..4abec7b 100644 (file)
@@ -185,10 +185,10 @@ void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
        for (index = 0; index < 6; index++) {
                writeVal = (u32)(priv->MCSTxPowerLevelOriginalOffset[index] +
                           ((index < 2) ? powerBase0 : powerBase1));
-               byte0 = (u8)(writeVal & 0x7f);
-               byte1 = (u8)((writeVal & 0x7f00)>>8);
-               byte2 = (u8)((writeVal & 0x7f0000)>>16);
-               byte3 = (u8)((writeVal & 0x7f000000)>>24);
+               byte0 = writeVal & 0x7f;
+               byte1 = (writeVal & 0x7f00) >> 8;
+               byte2 = (writeVal & 0x7f0000) >> 16;
+               byte3 = (writeVal & 0x7f000000) >> 24;
                if (byte0 > 0x24)
                        byte0 = 0x24;
                if (byte1 > 0x24)
index c5e44bb..cd8bbc3 100644 (file)
@@ -58,7 +58,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
                        memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
                        memset(pTxFwInfo, 0x12, 8);
                } else {
-                       tcb_desc->txbuf_size = (u16)frag_length;
+                       tcb_desc->txbuf_size = frag_length;
                }
 
                seg_ptr = skb_put(skb, frag_length);
index 7f9dee4..4b92491 100644 (file)
@@ -221,7 +221,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
                         &priv->rtllib->current_network.qos_data.parameters;
                u8 pAcParam = *val;
                u32 eACI = pAcParam;
-               union aci_aifsn *pAciAifsn = (union aci_aifsn *) &
+               union aci_aifsn *pAciAifsn = (union aci_aifsn *)&
                                              (qos_parameters->aifs[0]);
                u8 acm = pAciAifsn->f.acm;
                u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
@@ -320,8 +320,8 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
                priv->eeprom_did = rtl92e_eeprom_read(dev, EEPROM_DID >> 1);
 
                usValue = rtl92e_eeprom_read(dev,
-                                            (u16)(EEPROM_Customer_ID>>1)) >> 8;
-               priv->eeprom_CustomerID = (u8)(usValue & 0xff);
+                                            (EEPROM_Customer_ID >> 1)) >> 8;
+               priv->eeprom_CustomerID = usValue & 0xff;
                usValue = rtl92e_eeprom_read(dev,
                                             EEPROM_ICVersion_ChannelPlan>>1);
                priv->eeprom_ChannelPlan = usValue&0xff;
@@ -399,9 +399,9 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
                        priv->EEPROMLegacyHTTxPowerDiff);
 
                if (!priv->AutoloadFailFlag)
-                       priv->EEPROMThermalMeter = (u8)(((rtl92e_eeprom_read(dev,
+                       priv->EEPROMThermalMeter = ((rtl92e_eeprom_read(dev,
                                                   (EEPROM_ThermalMeter>>1))) &
-                                                  0xff00)>>8);
+                                                  0xff00) >> 8;
                else
                        priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
                RT_TRACE(COMP_INIT, "ThermalMeter = %d\n",
@@ -413,8 +413,8 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
                                usValue = rtl92e_eeprom_read(dev,
                                          EEPROM_TxPwDiff_CrystalCap >> 1);
                                priv->EEPROMAntPwDiff = usValue & 0x0fff;
-                               priv->EEPROMCrystalCap = (u8)((usValue & 0xf000)
-                                                        >> 12);
+                               priv->EEPROMCrystalCap = (usValue & 0xf000)
+                                                        >> 12;
                        } else {
                                priv->EEPROMAntPwDiff =
                                         EEPROM_Default_AntTxPowerDiff;
@@ -811,7 +811,7 @@ start:
 
        rtl92e_config_mac(dev);
 
-       if (priv->card_8192_version > (u8) VERSION_8190_BD) {
+       if (priv->card_8192_version > VERSION_8190_BD) {
                rtl92e_get_tx_power(dev);
                rtl92e_set_tx_power(dev, priv->chan);
        }
@@ -894,9 +894,8 @@ start:
 
                        for (i = 0; i < TxBBGainTableLength; i++) {
                                if (tmpRegA == dm_tx_bb_gain[i]) {
-                                       priv->rfa_txpowertrackingindex = (u8)i;
-                                       priv->rfa_txpowertrackingindex_real =
-                                                (u8)i;
+                                       priv->rfa_txpowertrackingindex = i;
+                                       priv->rfa_txpowertrackingindex_real = i;
                                        priv->rfa_txpowertracking_default =
                                                 priv->rfa_txpowertrackingindex;
                                        break;
@@ -908,7 +907,7 @@ start:
 
                        for (i = 0; i < CCKTxBBGainTableLength; i++) {
                                if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
-                                       priv->CCKPresentAttentuation_20Mdefault = (u8)i;
+                                       priv->CCKPresentAttentuation_20Mdefault = i;
                                        break;
                                }
                        }
@@ -1176,7 +1175,7 @@ void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
        pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
        memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
        pTxFwInfo->TxHT = (cb_desc->data_rate & 0x80) ? 1 : 0;
-       pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw((u8)cb_desc->data_rate);
+       pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw(cb_desc->data_rate);
        pTxFwInfo->EnableCPUDur = cb_desc->bTxEnableFwCalcDur;
        pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
                                                  pTxFwInfo->TxRate, cb_desc);
@@ -1195,7 +1194,7 @@ void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
        pTxFwInfo->CtsEnable = (cb_desc->bCTSEnable) ? 1 : 0;
        pTxFwInfo->RtsSTBC = (cb_desc->bRTSSTBC) ? 1 : 0;
        pTxFwInfo->RtsHT = (cb_desc->rts_rate&0x80) ? 1 : 0;
-       pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw((u8)cb_desc->rts_rate);
+       pTxFwInfo->RtsRate = _rtl92e_rate_mgn_to_hw(cb_desc->rts_rate);
        pTxFwInfo->RtsBandwidth = 0;
        pTxFwInfo->RtsSubcarrier = cb_desc->RTSSC;
        pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ?
@@ -1226,7 +1225,7 @@ void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
        pdesc->LINIP = 0;
        pdesc->CmdInit = 1;
        pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
-       pdesc->PktSize = (u16)skb->len-sizeof(struct tx_fwinfo_8190pci);
+       pdesc->PktSize = skb->len - sizeof(struct tx_fwinfo_8190pci);
 
        pdesc->SecCAMID = 0;
        pdesc->RATid = cb_desc->RATRIndex;
@@ -1299,11 +1298,10 @@ void  rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
 
                entry_tmp->CmdInit = DESC_PACKET_TYPE_NORMAL;
                entry_tmp->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
-               entry_tmp->PktSize = (u16)(cb_desc->pkt_size +
-                                     entry_tmp->Offset);
+               entry_tmp->PktSize = cb_desc->pkt_size + entry_tmp->Offset;
                entry_tmp->QueueSelect = QSLT_CMD;
                entry_tmp->TxFWInfoSize = 0x08;
-               entry_tmp->RATid = (u8)DESC_PACKET_TYPE_INIT;
+               entry_tmp->RATid = DESC_PACKET_TYPE_INIT;
        }
        entry->TxBufferSize = skb->len;
        entry->TxBuffAddr = mapping;
@@ -1613,9 +1611,8 @@ static void _rtl92e_query_rxphystatus(
                                total_rssi += RSSI;
 
                        if (bpacket_match_bssid) {
-                               pstats->RxMIMOSignalStrength[i] = (u8) RSSI;
-                               precord_stats->RxMIMOSignalStrength[i] =
-                                                               (u8) RSSI;
+                               pstats->RxMIMOSignalStrength[i] = RSSI;
+                               precord_stats->RxMIMOSignalStrength[i] = RSSI;
                        }
                }
 
@@ -1661,14 +1658,14 @@ static void _rtl92e_query_rxphystatus(
 
        if (is_cck_rate) {
                pstats->SignalStrength = precord_stats->SignalStrength =
-                                        (u8)(_rtl92e_signal_scale_mapping(priv,
-                                        (long)pwdb_all));
+                                        _rtl92e_signal_scale_mapping(priv,
+                                        (long)pwdb_all);
 
        } else {
                if (rf_rx_num != 0)
                        pstats->SignalStrength = precord_stats->SignalStrength =
-                                        (u8)(_rtl92e_signal_scale_mapping(priv,
-                                        (long)(total_rssi /= rf_rx_num)));
+                                        _rtl92e_signal_scale_mapping(priv,
+                                        (long)(total_rssi /= rf_rx_num));
        }
 }
 
@@ -1709,8 +1706,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
                slide_rssi_index = 0;
 
        tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
-       priv->stats.signal_strength = rtl92e_translate_to_dbm(priv,
-                                                             (u8)tmp_val);
+       priv->stats.signal_strength = rtl92e_translate_to_dbm(priv, tmp_val);
        curr_st->rssi = priv->stats.signal_strength;
        if (!prev_st->bPacketMatchBSSID) {
                if (!prev_st->bToSelfBA)
@@ -2036,7 +2032,7 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
        pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
 
        stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
-                                            (u8)pDrvInfo->RxRate);
+                                            pDrvInfo->RxRate);
        stats->bShortPreamble = pDrvInfo->SPLCP;
 
        _rtl92e_update_received_rate_histogram_stats(dev, stats);
index 9b025b9..38110fa 100644 (file)
@@ -34,8 +34,7 @@ static bool _rtl92e_fw_boot_cpu(struct net_device *dev)
        netdev_dbg(dev, "Download Firmware: Put code ok!\n");
 
        CPU_status = rtl92e_readl(dev, CPU_GEN);
-       rtl92e_writeb(dev, CPU_GEN,
-                     (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+       rtl92e_writeb(dev, CPU_GEN, (CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff);
        mdelay(1);
 
        if (!_rtl92e_wait_for_fw(dev, CPU_GEN_BOOT_RDY, 200)) {
index 4111381..f925510 100644 (file)
@@ -919,7 +919,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
                                continue;
                        switch (CurrentCmd->CmdID) {
                        case CmdID_SetTxPowerLevel:
-                               if (priv->IC_Cut > (u8)VERSION_8190_BD)
+                               if (priv->IC_Cut > VERSION_8190_BD)
                                        _rtl92e_set_tx_power_level(dev,
                                                                   channel);
                                break;
@@ -929,11 +929,11 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
                                break;
                        case CmdID_WritePortUshort:
                                rtl92e_writew(dev, CurrentCmd->Para1,
-                                             (u16)CurrentCmd->Para2);
+                                             CurrentCmd->Para2);
                                break;
                        case CmdID_WritePortUchar:
                                rtl92e_writeb(dev, CurrentCmd->Para1,
-                                             (u8)CurrentCmd->Para2);
+                                             CurrentCmd->Para2);
                                break;
                        case CmdID_RF_WriteReg:
                                for (eRFPath = 0; eRFPath <
@@ -1299,17 +1299,17 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
                            DIG_ALGO_BY_FALSE_ALARM)
                                rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
                        priv->initgain_backup.xaagccore1 =
-                                (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
-                                                      BitMask);
+                                rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
+                                                  BitMask);
                        priv->initgain_backup.xbagccore1 =
-                                (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
-                                                      BitMask);
+                                rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
+                                                  BitMask);
                        priv->initgain_backup.xcagccore1 =
-                                (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
-                                                      BitMask);
+                                rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
+                                                  BitMask);
                        priv->initgain_backup.xdagccore1 =
-                                (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
-                                                      BitMask);
+                                rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
+                                                  BitMask);
                        BitMask = bMaskByte2;
                        priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
                                                    rCCK0_CCA, BitMask);
index 756d8db..d58800d 100644 (file)
@@ -633,7 +633,7 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
        rtl92e_writeb(dev, FW_Busy_Flag, 0);
        priv->rtllib->bdynamic_txpower_enable = false;
 
-       powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
+       powerlevelOFDM24G = priv->Pwr_Track >> 24;
        RF_Type = priv->rf_type;
        Value = (RF_Type<<8) | powerlevelOFDM24G;
 
@@ -833,7 +833,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
                                            bMaskDWord);
                for (i = 0; i < OFDM_Table_Length; i++) {
                        if (tmpRegA == OFDMSwingTable[i]) {
-                               priv->OFDM_index[0] = (u8)i;
+                               priv->OFDM_index[0] = i;
                                RT_TRACE(COMP_POWER_TRACKING,
                                         "Initial reg0x%x = 0x%x, OFDM_index = 0x%x\n",
                                         rOFDM0_XATxIQImbalance, tmpRegA,
@@ -844,7 +844,7 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
                TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
                for (i = 0; i < CCK_Table_length; i++) {
                        if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
-                               priv->CCK_index = (u8) i;
+                               priv->CCK_index = i;
                                RT_TRACE(COMP_POWER_TRACKING,
                                         "Initial reg0x%x = 0x%x, CCK_index = 0x%x\n",
                                         rCCK0_TxFilter1, TempCCk,
@@ -1041,7 +1041,7 @@ static void _rtl92e_dm_cck_tx_power_adjust_tssi(struct net_device *dev,
 {
        u32 TempVal;
        struct r8192_priv *priv = rtllib_priv(dev);
-       u8 attenuation = (u8)priv->CCKPresentAttentuation;
+       u8 attenuation = priv->CCKPresentAttentuation;
 
        TempVal = 0;
        if (!bInCH14) {
@@ -1245,10 +1245,10 @@ void rtl92e_dm_backup_state(struct net_device *dev)
                return;
 
        rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
-       priv->initgain_backup.xaagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
-       priv->initgain_backup.xbagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
-       priv->initgain_backup.xcagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
-       priv->initgain_backup.xdagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
+       priv->initgain_backup.xaagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
+       priv->initgain_backup.xbagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
+       priv->initgain_backup.xcagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
+       priv->initgain_backup.xdagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
        bit_mask  = bMaskByte2;
        priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
 
@@ -1535,7 +1535,7 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev)
 
        if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
            || !initialized || force_write) {
-               initial_gain = (u8)dm_digtable.cur_ig_value;
+               initial_gain = dm_digtable.cur_ig_value;
                rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
                rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
                rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
@@ -2513,5 +2513,5 @@ static void _rtl92e_dm_send_rssi_to_fw(struct net_device *dev)
 {
        struct r8192_priv *priv = rtllib_priv(dev);
 
-       rtl92e_writeb(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
+       rtl92e_writeb(dev, DRIVER_RSSI, priv->undecorated_smoothed_pwdb);
 }
index 97afea4..7d04966 100644 (file)
@@ -238,7 +238,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
                             skb->data, skb->len);
 #endif
 
-       req = (struct rtllib_hdr_3addr *) skb->data;
+       req = (struct rtllib_hdr_3addr *)skb->data;
        tag = (u8 *)req;
        dst = (u8 *)(&req->addr2[0]);
        tag += sizeof(struct rtllib_hdr_3addr);
@@ -343,7 +343,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
                goto OnADDBARsp_Reject;
        }
 
-
        if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
                   (u8)(pBaParamSet->field.tid), TX_DIR, false)) {
                netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
@@ -355,7 +354,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
        pPendingBA = &pTS->TxPendingBARecord;
        pAdmittedBA = &pTS->TxAdmittedBARecord;
 
-
        if (pAdmittedBA->b_valid) {
                netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
                           __func__);
@@ -374,7 +372,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
                DeActivateBAEntry(ieee, pPendingBA);
        }
 
-
        if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
                if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
                        pTS->bAddBaReqDelayed = true;
index c985e4e..0ecd81a 100644 (file)
@@ -1585,7 +1585,7 @@ struct rtllib_device {
        short sta_sleep;
        int ps_timeout;
        int ps_period;
-       struct tasklet_struct ps_task;
+       struct work_struct ps_task;
        u64 ps_time;
        bool polling;
 
index ed968c0..a8d22da 100644 (file)
@@ -103,7 +103,7 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
        if (a4_included)
                aad_len += 6;
        if (qc_included) {
-               pos = (u8 *) &hdr->addr4;
+               pos = (u8 *)&hdr->addr4;
                if (a4_included)
                        pos += 6;
                qc = *pos & 0x0f;
@@ -130,13 +130,13 @@ static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
         * A4 (if present)
         * QC (if present)
         */
-       pos = (u8 *) hdr;
+       pos = (u8 *)hdr;
        aad[0] = pos[0] & 0x8f;
        aad[1] = pos[1] & 0xc7;
        memcpy(&aad[2], &hdr->addr1, ETH_ALEN);
        memcpy(&aad[8], &hdr->addr2, ETH_ALEN);
        memcpy(&aad[14], &hdr->addr3, ETH_ALEN);
-       pos = (u8 *) &hdr->seq_ctl;
+       pos = (u8 *)&hdr->seq_ctl;
        aad[20] = pos[0] & 0x0f;
        aad[21] = 0; /* all bits masked */
        memset(aad + 22, 0, 8);
@@ -186,7 +186,7 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        *pos++ = key->tx_pn[1];
        *pos++ = key->tx_pn[0];
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        if (!tcb_desc->bHwSec) {
                struct aead_request *req;
                struct scatterlist sg[2];
@@ -235,7 +235,7 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
                return -1;
        }
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        pos = skb->data + hdr_len;
        keyidx = pos[3];
        if (!(keyidx & (1 << 5))) {
index 4a760ec..8bc9565 100644 (file)
@@ -136,7 +136,7 @@ static inline u16 Hi16(u32 val)
 
 static inline u16 Mk16(u8 hi, u8 lo)
 {
-       return lo | (((u16) hi) << 8);
+       return lo | (hi << 8);
 }
 
 
@@ -220,7 +220,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
        /* Make temporary area overlap WEP seed so that the final copy can be
         * avoided on little endian hosts.
         */
-       u16 *PPK = (u16 *) &WEPSeed[4];
+       u16 *PPK = (u16 *)&WEPSeed[4];
 
        /* Step 1 - make copy of TTAK and bring in TSC */
        PPK[0] = TTAK[0];
@@ -231,15 +231,15 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
        PPK[5] = TTAK[4] + IV16;
 
        /* Step 2 - 96-bit bijective mixing using S-box */
-       PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
-       PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
-       PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
-       PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
-       PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
-       PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
-
-       PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
-       PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
+       PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *)&TK[0]));
+       PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *)&TK[2]));
+       PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *)&TK[4]));
+       PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *)&TK[6]));
+       PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *)&TK[8]));
+       PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *)&TK[10]));
+
+       PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *)&TK[12]));
+       PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *)&TK[14]));
        PPK[2] += RotR1(PPK[1]);
        PPK[3] += RotR1(PPK[2]);
        PPK[4] += RotR1(PPK[3]);
@@ -251,7 +251,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
        WEPSeed[0] = Hi8(IV16);
        WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
        WEPSeed[2] = Lo8(IV16);
-       WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
+       WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *)&TK[0])) >> 1);
 
 #ifdef __BIG_ENDIAN
        {
@@ -280,7 +280,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
            skb->len < hdr_len)
                return -1;
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
 
        if (!tcb_desc->bHwSec) {
                if (!tkey->tx_phase1_done) {
@@ -357,7 +357,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        if (skb->len < hdr_len + 8 + 4)
                return -1;
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        pos = skb->data + hdr_len;
        keyidx = pos[3];
        if (!(keyidx & (1 << 5))) {
@@ -485,7 +485,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
 {
        struct rtllib_hdr_4addr *hdr11;
 
-       hdr11 = (struct rtllib_hdr_4addr *) skb->data;
+       hdr11 = (struct rtllib_hdr_4addr *)skb->data;
        switch (le16_to_cpu(hdr11->frame_ctl) &
                (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) {
        case RTLLIB_FCTL_TODS:
@@ -518,7 +518,7 @@ static int rtllib_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
        u8 *pos;
        struct rtllib_hdr_4addr *hdr;
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
 
        if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
                netdev_dbg(skb->dev,
@@ -558,7 +558,7 @@ static void rtllib_michael_mic_failure(struct net_device *dev,
        ether_addr_copy(ev.src_addr.sa_data, hdr->addr2);
        memset(&wrqu, 0, sizeof(wrqu));
        wrqu.data.length = sizeof(ev);
-       wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
+       wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
 }
 
 static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
@@ -568,7 +568,7 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
        u8 mic[8];
        struct rtllib_hdr_4addr *hdr;
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
 
        if (!tkey->key_set)
                return -1;
@@ -584,7 +584,7 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
        if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
                struct rtllib_hdr_4addr *hdr;
 
-               hdr = (struct rtllib_hdr_4addr *) skb->data;
+               hdr = (struct rtllib_hdr_4addr *)skb->data;
                netdev_dbg(skb->dev,
                           "Michael MIC verification failed for MSDU from %pM keyidx=%d\n",
                           hdr->addr2, keyidx);
index eb904b4..abe5c15 100644 (file)
@@ -250,7 +250,7 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
        if (skb->len < 24)
                return 0;
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
 
        /* check that the frame is unicast frame to us */
@@ -299,7 +299,7 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
                        tcb_desc->bHwSec = 0;
        }
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
 
        atomic_inc(&crypt->refcnt);
@@ -339,7 +339,7 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
                        tcb_desc->bHwSec = 0;
        }
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
 
        atomic_inc(&crypt->refcnt);
@@ -936,7 +936,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
        } else {
                struct rx_ts_record *pRxTS = NULL;
 
-               if (GetTs(ieee, (struct ts_common_info **) &pRxTS, hdr->addr2,
+               if (GetTs(ieee, (struct ts_common_info **)&pRxTS, hdr->addr2,
                        (u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) {
                        if ((fc & (1<<11)) && (frag == pRxTS->rx_last_frag_num) &&
                            (WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num))
@@ -1100,7 +1100,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
                return -1;
        }
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) {
                int flen;
                struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr);
@@ -1152,7 +1152,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
                 * delivered, so remove skb from fragment cache
                 */
                skb = frag_skb;
-               hdr = (struct rtllib_hdr_4addr *) skb->data;
+               hdr = (struct rtllib_hdr_4addr *)skb->data;
                rtllib_frag_cache_invalidate(ieee, hdr);
        }
 
@@ -1165,7 +1165,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
                return -1;
        }
 
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) {
                if (/*ieee->ieee802_1x &&*/
                    rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
@@ -1397,13 +1397,13 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
                goto rx_exit;
 
        /* Get TS for Rx Reorder  */
-       hdr = (struct rtllib_hdr_4addr *) skb->data;
+       hdr = (struct rtllib_hdr_4addr *)skb->data;
        if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
                && !is_multicast_ether_addr(hdr->addr1)
                && (!bToOtherSTA)) {
                TID = Frame_QoSTID(skb->data);
                SeqNum = WLAN_GET_SEQ_SEQ(sc);
-               GetTs(ieee, (struct ts_common_info **) &pTS, hdr->addr2, TID,
+               GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID,
                      RX_DIR, true);
                if (TID != 0 && TID != 3)
                        ieee->bis_any_nonbepkts = true;
@@ -2053,7 +2053,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
                        }
 
                        network->ssid_len = min(info_element->len,
-                                               (u8) IW_ESSID_MAX_SIZE);
+                                               (u8)IW_ESSID_MAX_SIZE);
                        memcpy(network->ssid, info_element->data,
                               network->ssid_len);
                        if (network->ssid_len < IW_ESSID_MAX_SIZE)
@@ -2721,7 +2721,7 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
                if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED &&
                    ieee->iw_mode == IW_MODE_INFRA &&
                    ieee->state == RTLLIB_LINKED))
-                       tasklet_schedule(&ieee->ps_task);
+                       schedule_work(&ieee->ps_task);
 
                break;
 
index 4b6c229..b5f4d35 100644 (file)
@@ -202,7 +202,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
        unsigned long flags;
        short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
        struct rtllib_hdr_3addr  *header =
-               (struct rtllib_hdr_3addr  *) skb->data;
+               (struct rtllib_hdr_3addr  *)skb->data;
 
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
 
@@ -279,7 +279,7 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
 {
        short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
        struct rtllib_hdr_3addr  *header =
-               (struct rtllib_hdr_3addr  *) skb->data;
+               (struct rtllib_hdr_3addr  *)skb->data;
        u16 fc, type, stype;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + 8);
 
@@ -651,9 +651,9 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee)
        spin_lock_irqsave(&ieee->beacon_lock, flags);
 
        ieee->beacon_txing = 0;
-       del_timer_sync(&ieee->beacon_timer);
 
        spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+       del_timer_sync(&ieee->beacon_timer);
 
 }
 
@@ -856,9 +856,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
        encrypt = ieee->host_encrypt && crypt && crypt->ops &&
                ((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
        if (ieee->pHTInfo->bCurrentHTSupport) {
-               tmp_ht_cap_buf = (u8 *) &(ieee->pHTInfo->SelfHTCap);
+               tmp_ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
                tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
-               tmp_ht_info_buf = (u8 *) &(ieee->pHTInfo->SelfHTInfo);
+               tmp_ht_info_buf = (u8 *)&(ieee->pHTInfo->SelfHTInfo);
                tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
                HTConstructCapabilityElement(ieee, tmp_ht_cap_buf,
                                             &tmp_ht_cap_len, encrypt, false);
@@ -912,7 +912,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
        beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
        beacon_buf->info_element[0].len = ssid_len;
 
-       tag = (u8 *) beacon_buf->info_element[0].data;
+       tag = (u8 *)beacon_buf->info_element[0].data;
 
        memcpy(tag, ssid, ssid_len);
 
@@ -1303,7 +1303,7 @@ rtllib_association_req(struct rtllib_network *beacon,
                        0x00};
                struct octet_string osCcxRmCap;
 
-               osCcxRmCap.Octet = (u8 *) CcxRmCapBuf;
+               osCcxRmCap.Octet = (u8 *)CcxRmCapBuf;
                osCcxRmCap.Length = sizeof(CcxRmCapBuf);
                tag = skb_put(skb, ccxrm_ie_len);
                *tag++ = MFIE_TYPE_GENERIC;
@@ -1764,7 +1764,7 @@ static void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
        spin_unlock_irqrestore(&ieee->lock, flags);
 }
 
-static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
+static inline int auth_parse(struct net_device *dev, struct sk_buff *skb,
                             u8 **challenge, int *chlen)
 {
        struct rtllib_authentication *a;
@@ -1773,10 +1773,10 @@ static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
        if (skb->len <  (sizeof(struct rtllib_authentication) -
            sizeof(struct rtllib_info_element))) {
                netdev_dbg(dev, "invalid len in auth resp: %d\n", skb->len);
-               return 0xcafe;
+               return -EINVAL;
        }
        *challenge = NULL;
-       a = (struct rtllib_authentication *) skb->data;
+       a = (struct rtllib_authentication *)skb->data;
        if (skb->len > (sizeof(struct rtllib_authentication) + 3)) {
                t = skb->data + sizeof(struct rtllib_authentication);
 
@@ -1787,7 +1787,13 @@ static inline u16 auth_parse(struct net_device *dev, struct sk_buff *skb,
                                return -ENOMEM;
                }
        }
-       return le16_to_cpu(a->status);
+
+       if (a->status) {
+               netdev_dbg(dev, "auth_parse() failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
@@ -1799,7 +1805,7 @@ static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
                netdev_dbg(dev, "invalid len in auth request: %d\n", skb->len);
                return -1;
        }
-       a = (struct rtllib_authentication *) skb->data;
+       a = (struct rtllib_authentication *)skb->data;
 
        ether_addr_copy(dest, a->header.addr2);
 
@@ -1817,7 +1823,7 @@ static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
        u8 *ssid = NULL;
        u8 ssidlen = 0;
        struct rtllib_hdr_3addr   *header =
-               (struct rtllib_hdr_3addr   *) skb->data;
+               (struct rtllib_hdr_3addr   *)skb->data;
        bool bssid_match;
 
        if (skb->len < sizeof(struct rtllib_hdr_3addr))
@@ -1865,7 +1871,7 @@ static int assoc_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
                return -1;
        }
 
-       a = (struct rtllib_assoc_request_frame *) skb->data;
+       a = (struct rtllib_assoc_request_frame *)skb->data;
 
        ether_addr_copy(dest, a->header.addr2);
 
@@ -1884,7 +1890,7 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
                return 0xcafe;
        }
 
-       response_head = (struct rtllib_assoc_response_frame *) skb->data;
+       response_head = (struct rtllib_assoc_response_frame *)skb->data;
        *aid = le16_to_cpu(response_head->aid) & 0x3fff;
 
        status_code = le16_to_cpu(response_head->status);
@@ -2042,13 +2048,15 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
 
 }
 
-static inline void rtllib_sta_ps(struct tasklet_struct *t)
+static inline void rtllib_sta_ps(struct work_struct *work)
 {
-       struct rtllib_device *ieee = from_tasklet(ieee, t, ps_task);
+       struct rtllib_device *ieee;
        u64 time;
        short sleep;
        unsigned long flags, flags2;
 
+       ieee = container_of(work, struct rtllib_device, ps_task);
+
        spin_lock_irqsave(&ieee->lock, flags);
 
        if ((ieee->ps == RTLLIB_PS_DISABLED ||
@@ -2167,7 +2175,7 @@ EXPORT_SYMBOL(rtllib_ps_tx_ack);
 static void rtllib_process_action(struct rtllib_device *ieee,
                                  struct sk_buff *skb)
 {
-       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
        u8 *act = rtllib_get_payload((struct rtllib_hdr *)header);
        u8 category = 0;
 
@@ -2206,7 +2214,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
        int aid;
        u8 *ies;
        struct rtllib_assoc_response_frame *assoc_resp;
-       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
        u16 frame_ctl = le16_to_cpu(header->frame_ctl);
 
        netdev_dbg(ieee->dev, "received [RE]ASSOCIATION RESPONSE (%d)\n",
@@ -2278,7 +2286,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
 
 static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
 {
-       u16 errcode;
+       int errcode;
        u8 *challenge;
        int chlen = 0;
        bool bSupportNmode = true, bHalfSupportNmode = false;
@@ -2288,8 +2296,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
        if (errcode) {
                ieee->softmac_stats.rx_auth_rs_err++;
                netdev_info(ieee->dev,
-                           "Authentication response status code 0x%x",
-                           errcode);
+                           "Authentication response status code %d", errcode);
                rtllib_associate_abort(ieee);
                return;
        }
@@ -2351,7 +2358,7 @@ rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
 static inline int
 rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
 {
-       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
        u16 frame_ctl;
 
        if (memcmp(header->addr3, ieee->current_network.bssid, ETH_ALEN) != 0)
@@ -2391,7 +2398,7 @@ inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
                                   struct rtllib_rx_stats *rx_stats, u16 type,
                                   u16 stype)
 {
-       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
+       struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *)skb->data;
        u16 frame_ctl;
 
        if (!ieee->proto_started)
@@ -2811,7 +2818,7 @@ static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
        if (!skb)
                return NULL;
 
-       b = (struct rtllib_probe_response *) skb->data;
+       b = (struct rtllib_probe_response *)skb->data;
        b->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_BEACON);
 
        return skb;
@@ -2827,7 +2834,7 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
        if (!skb)
                return NULL;
 
-       b = (struct rtllib_probe_response *) skb->data;
+       b = (struct rtllib_probe_response *)skb->data;
        b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
 
        if (ieee->seq_ctrl[0] == 0xFFF)
@@ -3028,7 +3035,7 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
        spin_lock_init(&ieee->mgmt_tx_lock);
        spin_lock_init(&ieee->beacon_lock);
 
-       tasklet_setup(&ieee->ps_task, rtllib_sta_ps);
+       INIT_WORK(&ieee->ps_task, rtllib_sta_ps);
 
        return 0;
 }
@@ -3050,8 +3057,8 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
        cancel_work_sync(&ieee->associate_complete_wq);
        cancel_work_sync(&ieee->ips_leave_wq);
        cancel_work_sync(&ieee->wx_sync_scan_wq);
+       cancel_work_sync(&ieee->ps_task);
        mutex_unlock(&ieee->wx_mutex);
-       tasklet_kill(&ieee->ps_task);
 }
 
 static inline struct sk_buff *
index 57a6d11..70a62ca 100644 (file)
@@ -41,8 +41,8 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
 
        /* if setting by freq convert to channel */
        if (fwrq->e == 1) {
-               if ((fwrq->m >= (int) 2.412e8 &&
-                    fwrq->m <= (int) 2.487e8)) {
+               if ((fwrq->m >= (int)2.412e8 &&
+                    fwrq->m <= (int)2.487e8)) {
                        int f = fwrq->m / 100000;
                        int c = 0;
 
index 0d67d58..cf9a240 100644 (file)
@@ -660,7 +660,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
 {
        u8 i = 0;
        bool deauth = false;
-       struct iw_mlme *mlme = (struct iw_mlme *) extra;
+       struct iw_mlme *mlme = (struct iw_mlme *)extra;
 
        if (ieee->state != RTLLIB_LINKED)
                return -ENOLINK;
index 68c0bf9..b577f9c 100644 (file)
@@ -1790,7 +1790,7 @@ struct ieee80211_device {
        short sta_sleep;
        int ps_timeout;
        int ps_period;
-       struct tasklet_struct ps_task;
+       struct work_struct ps_task;
        u32 ps_th;
        u32 ps_tl;
 
index 101c282..f17d07d 100644 (file)
@@ -362,7 +362,7 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
        struct ieee80211_ccmp_data *data = priv;
 
        if (len < CCMP_TK_LEN)
-               return -1;
+               return 0;
 
        if (!data->key_set)
                return 0;
index 689d884..7b120b8 100644 (file)
@@ -637,7 +637,7 @@ static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
        struct ieee80211_tkip_data *tkey = priv;
 
        if (len < TKIP_KEY_LEN)
-               return -1;
+               return 0;
 
        if (!tkey->key_set)
                return 0;
index 8a51ea1..a2cdf3b 100644 (file)
@@ -201,7 +201,7 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
        struct prism2_wep_data *wep = priv;
 
        if (len < wep->key_len)
-               return -1;
+               return 0;
 
        memcpy(key, wep->key, wep->key_len);
 
index 1a43979..92001cb 100644 (file)
@@ -528,9 +528,9 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
        spin_lock_irqsave(&ieee->beacon_lock, flags);
 
        ieee->beacon_txing = 0;
-       del_timer_sync(&ieee->beacon_timer);
 
        spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+       del_timer_sync(&ieee->beacon_timer);
 }
 
 void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
@@ -1461,13 +1461,13 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
        spin_unlock_irqrestore(&ieee->lock, flags);
 }
 
-static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
+static inline int auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
 {
        struct ieee80211_authentication *a;
        u8 *t;
        if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
                IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
-               return 0xcafe;
+               return -EINVAL;
        }
        *challenge = NULL;
        a = (struct ieee80211_authentication *)skb->data;
@@ -1482,7 +1482,12 @@ static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
                }
        }
 
-       return le16_to_cpu(a->status);
+       if (a->status) {
+               IEEE80211_DEBUG_MGMT("auth_parse() failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
@@ -1687,14 +1692,15 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
        return 1;
 }
 
-static inline void ieee80211_sta_ps(struct tasklet_struct *t)
+static inline void ieee80211_sta_ps(struct work_struct *work)
 {
-       struct ieee80211_device *ieee = from_tasklet(ieee, t, ps_task);
+       struct ieee80211_device *ieee;
        u32 th, tl;
        short sleep;
-
        unsigned long flags, flags2;
 
+       ieee = container_of(work, struct ieee80211_device, ps_task);
+
        spin_lock_irqsave(&ieee->lock, flags);
 
        if ((ieee->ps == IEEE80211_PS_DISABLED ||
@@ -1826,7 +1832,7 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
 {
        /* default support N mode, disable halfNmode */
        bool bSupportNmode = true, bHalfSupportNmode = false;
-       u16 errcode;
+       int errcode;
        u8 *challenge;
        int chlen = 0;
        u32 iotAction;
@@ -1875,7 +1881,7 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
                }
        } else {
                ieee->softmac_stats.rx_auth_rs_err++;
-               IEEE80211_DEBUG_MGMT("Auth response status code 0x%x", errcode);
+               IEEE80211_DEBUG_MGMT("Auth response status code %d\n", errcode);
                ieee80211_associate_abort(ieee);
        }
 }
@@ -1897,7 +1903,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
        if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
                                ieee->iw_mode == IW_MODE_INFRA &&
                                ieee->state == IEEE80211_LINKED))
-               tasklet_schedule(&ieee->ps_task);
+               schedule_work(&ieee->ps_task);
 
        if (WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
            WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
@@ -2602,7 +2608,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
        spin_lock_init(&ieee->mgmt_tx_lock);
        spin_lock_init(&ieee->beacon_lock);
 
-       tasklet_setup(&ieee->ps_task, ieee80211_sta_ps);
+       INIT_WORK(&ieee->ps_task, ieee80211_sta_ps);
 }
 
 void ieee80211_softmac_free(struct ieee80211_device *ieee)
@@ -2613,7 +2619,7 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
        del_timer_sync(&ieee->associate_timer);
 
        cancel_delayed_work(&ieee->associate_retry_wq);
-
+       cancel_work_sync(&ieee->ps_task);
        mutex_unlock(&ieee->wx_mutex);
 }
 
index 78cc8f3..d6829cf 100644 (file)
@@ -470,7 +470,9 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
                return 0;
        }
        len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
-       erq->length = (len >= 0 ? len : 0);
+       if (len < 0)
+               len = 0;
+       erq->length = len;
 
        erq->flags |= IW_ENCODE_ENABLED;
 
@@ -686,9 +688,9 @@ int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
        } else {
                if (strcmp(crypt->ops->name, "WEP") == 0)
                        ext->alg = IW_ENCODE_ALG_WEP;
-               else if (strcmp(crypt->ops->name, "TKIP"))
+               else if (strcmp(crypt->ops->name, "TKIP") == 0)
                        ext->alg = IW_ENCODE_ALG_TKIP;
-               else if (strcmp(crypt->ops->name, "CCMP"))
+               else if (strcmp(crypt->ops->name, "CCMP") == 0)
                        ext->alg = IW_ENCODE_ALG_CCMP;
                else
                        return -EINVAL;
index dba3f2d..a93f090 100644 (file)
@@ -480,7 +480,7 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
        }
        memset(posHTCap, 0, *len);
        if (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
-               u8      EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};       // For 11n EWC definition, 2007.07.17, by Emily
+               static const u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
 
                memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
                pCapELE = (struct ht_capability_ele *)&posHTCap[4];
@@ -940,10 +940,8 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
                        else
                                pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
                } else {
-                       if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
-                               pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
-                       else
-                               pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
+                       pHTInfo->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
+                                                           HT_AGG_SIZE_32K);
                }
        }
 
@@ -951,10 +949,9 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
         * <2> Set AMPDU Minimum MPDU Start Spacing
         * 802.11n 3.0 section 9.7d.3
         */
-       if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
-               pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
-       else
-               pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
+       pHTInfo->CurrentMPDUDensity = max_t(u32, pHTInfo->MPDU_Density,
+                                           pPeerHTCap->MPDUDensity);
+
        if (ieee->pairwise_key_type != KEY_TYPE_NA)
                pHTInfo->CurrentMPDUDensity     = 7; // 8us
        // Force TX AMSDU
index ce807c9..2ca925f 100644 (file)
@@ -2537,7 +2537,7 @@ static short rtl8192_init(struct net_device *dev)
        }
 #else
        {
-               const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
+               static const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
 
                memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
        }
index a44d04e..76ac798 100644 (file)
@@ -157,12 +157,11 @@ struct _adapter {
        struct iw_statistics iwstats;
        int pid; /*process id from UI*/
        struct work_struct wk_filter_rx_ff0;
-       u8 blnEnableRxFF0Filter;
-       spinlock_t lock_rx_ff0_filter;
        const struct firmware *fw;
        struct usb_interface *pusb_intf;
        struct mutex mutex_start;
        struct completion rtl8712_fw_ready;
+       struct completion rx_filter_ready;
 };
 
 static inline u8 *myid(struct eeprom_priv *peepriv)
index f926809..7d8f1a2 100644 (file)
@@ -162,13 +162,13 @@ int r8712_generate_ie(struct registry_priv *registrypriv)
        uint sz = 0;
        struct wlan_bssid_ex *dev_network = &registrypriv->dev_network;
        u8 *ie = dev_network->IEs;
-       u16 beaconPeriod = (u16)dev_network->Configuration.BeaconPeriod;
+       u16 beacon_period = (u16)dev_network->Configuration.BeaconPeriod;
 
        /*timestamp will be inserted by hardware*/
        sz += 8;
        ie += sz;
        /*beacon interval : 2bytes*/
-       *(__le16 *)ie = cpu_to_le16(beaconPeriod);
+       *(__le16 *)ie = cpu_to_le16(beacon_period);
        sz += 2;
        ie += 2;
        /*capability info*/
index d15d52c..003e972 100644 (file)
@@ -332,7 +332,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
        r8712_free_evt_priv(&padapter->evtpriv);
        r8712_DeInitSwLeds(padapter);
        r8712_free_mlme_priv(&padapter->mlmepriv);
-       r8712_free_io_queue(padapter);
        _free_xmit_priv(&padapter->xmitpriv);
        _r8712_free_sta_priv(&padapter->stapriv);
        _r8712_free_recv_priv(&padapter->recvpriv);
index e125c72..68bdec0 100644 (file)
@@ -91,6 +91,5 @@
 #define        _BCNSPACE_MSK                   0x0FFF
 #define        _BCNSPACE_SHT                   0
 
-
 #endif /* __RTL8712_CMDCTRL_BITDEF_H__*/
 
index 4969d30..2e1ea9d 100644 (file)
@@ -15,8 +15,8 @@
 
 #define GET_EFUSE_OFFSET(header)       ((header & 0xF0) >> 4)
 #define GET_EFUSE_WORD_EN(header)      (header & 0x0F)
-#define MAKE_EFUSE_HEADER(offset, word_en)     (((offset & 0x0F) << 4) | \
-                                               (word_en & 0x0F))
+#define MAKE_EFUSE_HEADER(offset, word_en)     ((((offset) & 0x0F) << 4) | \
+                                               ((word_en) & 0x0F))
 /*--------------------------------------------------------------------------*/
 struct PGPKT_STRUCT {
        u8 offset;
index 3d9f40f..46d758d 100644 (file)
@@ -7,7 +7,6 @@
 #ifndef __RTL8712_MACSETTING_BITDEF_H__
 #define __RTL8712_MACSETTING_BITDEF_H__
 
-
 /*MACID*/
 /*BSSID*/
 
@@ -28,7 +27,5 @@
 
 /*BUILDUSER*/
 
-
-
 #endif /* __RTL8712_MACSETTING_BITDEF_H__*/
 
index e8cb2ee..64740d9 100644 (file)
@@ -16,7 +16,5 @@
 #define BUILDTIME                      (RTL8712_MACIDSETTING_ + 0x0024)
 #define BUILDUSER                      (RTL8712_MACIDSETTING_ + 0x0028)
 
-
-
 #endif /*__RTL8712_MACSETTING_REGDEF_H__*/
 
index a3eaee0..9ed5653 100644 (file)
@@ -39,6 +39,5 @@
 #define MCS_TXAGC7                     (RTL8712_RATECTRL_ + 0x67)
 #define CCK_TXAGC                      (RTL8712_RATECTRL_ + 0x68)
 
-
 #endif /*__RTL8712_RATECTRL_REGDEF_H__*/
 
index 0ffb30f..7f1fdd0 100644 (file)
@@ -56,7 +56,7 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv,
                precvbuf->ref_cnt = 0;
                precvbuf->adapter = padapter;
                list_add_tail(&precvbuf->list,
-                             &(precvpriv->free_recv_buf_queue.queue));
+                             &precvpriv->free_recv_buf_queue.queue);
                precvbuf++;
        }
        precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
@@ -123,8 +123,8 @@ void r8712_free_recvframe(union recv_frame *precvframe,
                precvframe->u.hdr.pkt = NULL;
        }
        spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
-       list_del_init(&(precvframe->u.hdr.list));
-       list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
+       list_del_init(&precvframe->u.hdr.list);
+       list_add_tail(&precvframe->u.hdr.list, &pfree_recv_queue->queue);
        if (padapter) {
                if (pfree_recv_queue == &precvpriv->free_recv_queue)
                        precvpriv->free_recvframe_cnt++;
@@ -319,7 +319,7 @@ static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
        struct rx_pkt_attrib *pattrib;
        _pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
        struct recv_priv *precvpriv = &padapter->recvpriv;
-       struct  __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
+       struct  __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
 
        nr_subframes = 0;
        pattrib = &prframe->u.hdr.attrib;
@@ -485,8 +485,8 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
                else
                        break;
        }
-       list_del_init(&(prframe->u.hdr.list));
-       list_add_tail(&(prframe->u.hdr.list), plist);
+       list_del_init(&prframe->u.hdr.list);
+       list_add_tail(&prframe->u.hdr.list, plist);
        return true;
 }
 
@@ -520,7 +520,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
                pattrib = &prframe->u.hdr.attrib;
                if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
                        plist = plist->next;
-                       list_del_init(&(prframe->u.hdr.list));
+                       list_del_init(&prframe->u.hdr.list);
                        if (SN_EQUAL(preorder_ctrl->indicate_seq,
                                     pattrib->seq_num))
                                preorder_ctrl->indicate_seq =
@@ -980,7 +980,7 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
        union recv_frame *precvframe = NULL;
        struct recv_priv *precvpriv = &padapter->recvpriv;
 
-       pfree_recv_queue = &(precvpriv->free_recv_queue);
+       pfree_recv_queue = &precvpriv->free_recv_queue;
        pbuf = pskb->data;
        prxstat = (struct recv_stat *)pbuf;
        pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
index 1c26a7e..44275ef 100644 (file)
@@ -30,6 +30,5 @@
 #define        _RXUSEDK                                        BIT(1)
 #define        _TXUSEDK                                        BIT(0)
 
-
 #endif /*__RTL8712_SECURITY_BITDEF_H__*/
 
index c0bab4c..613a410 100644 (file)
@@ -30,7 +30,6 @@
 
 #define RTL8712_IOBASE_FF      0x10300000 /*IOBASE_FIFO 0x1031000~0x103AFFFF*/
 
-
 /*IOREG Offset for 8712*/
 #define RTL8712_SYSCFG_                RTL8712_IOBASE_IOREG
 #define RTL8712_CMDCTRL_       (RTL8712_IOBASE_IOREG + 0x40)
@@ -47,7 +46,6 @@
 #define RTL8712_DEBUGCTRL_     (RTL8712_IOBASE_IOREG + 0x310)
 #define RTL8712_OFFLOAD_       (RTL8712_IOBASE_IOREG + 0x2D0)
 
-
 /*FIFO for 8712*/
 #define RTL8712_DMA_BCNQ       (RTL8712_IOBASE_FF + 0x10000)
 #define RTL8712_DMA_MGTQ       (RTL8712_IOBASE_FF + 0x20000)
@@ -60,7 +58,6 @@
 #define RTL8712_DMA_H2CCMD     (RTL8712_IOBASE_FF + 0x90000)
 #define RTL8712_DMA_C2HCMD     (RTL8712_IOBASE_FF + 0xA0000)
 
-
 /*------------------------------*/
 
 /*BIT 16 15*/
index a328ca9..d92df3f 100644 (file)
                                                       * Block's Bandgap.
                                                       */
 
-
 /*--------------------------------------------------------------------------*/
 /*       SPS1_CTRL bits                                (Offset 0x18-1E, 56bits)*/
 /*--------------------------------------------------------------------------*/
 #define        SPS1_SWEN               BIT(1)  /* Enable vsps18 SW Macro Block.*/
 #define        SPS1_LDEN               BIT(0)  /* Enable VSPS12 LDO Macro block.*/
 
-
 /*----------------------------------------------------------------------------*/
 /*       LDOA15_CTRL bits              (Offset 0x20, 8bits)*/
 /*----------------------------------------------------------------------------*/
 #define        LDA15_EN                BIT(0)  /* Enable LDOA15 Macro Block*/
 
-
 /*----------------------------------------------------------------------------*/
 /*       8192S LDOV12D_CTRL bit                (Offset 0x21, 8bits)*/
 /*----------------------------------------------------------------------------*/
 /*CLK_PS_CTRL*/
 #define        _CLK_GATE_EN            BIT(0)
 
-
 /* EFUSE_CTRL*/
 #define EF_FLAG                        BIT(31)         /* Access Flag, Write:1;
                                                 *              Read:0
index e95eb58..da5efcd 100644 (file)
@@ -14,7 +14,6 @@
 #ifndef __RTL8712_SYSCFG_REGDEF_H__
 #define __RTL8712_SYSCFG_REGDEF_H__
 
-
 #define SYS_ISO_CTRL           (RTL8712_SYSCFG_ + 0x0000)
 #define SYS_FUNC_EN            (RTL8712_SYSCFG_ + 0x0002)
 #define PMC_FSM                        (RTL8712_SYSCFG_ + 0x0004)
@@ -39,6 +38,5 @@
 #define RCLK_MON               (RTL8712_SYSCFG_ + 0x003E)
 #define EFUSE_CLK_CTRL         (RTL8712_SYSCFG_ + 0x02F8)
 
-
 #endif /*__RTL8712_SYSCFG_REGDEF_H__*/
 
index d3b45c6..ea164e4 100644 (file)
@@ -45,6 +45,5 @@
 #define        _RPT_CNT_MSK                    0x000FFFFF
 #define        _RPT_CNT_SHT                    0
 
-
 #endif /*__RTL8712_WMAC_BITDEF_H__*/
 
index acda930..4be96df 100644 (file)
@@ -202,7 +202,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
        mod_timer(&pmlmepriv->scan_to_timer,
                  jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
        padapter->ledpriv.LedControlHandler(padapter, LED_CTL_SITE_SURVEY);
-       padapter->blnEnableRxFF0Filter = 0;
+       complete(&padapter->rx_filter_ready);
        return _SUCCESS;
 }
 
@@ -536,7 +536,7 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
                return;
        }
        init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
-       ph2c->rsp = (u8 *) psetstakey_rsp;
+       ph2c->rsp = (u8 *)psetstakey_rsp;
        ph2c->rspsz = sizeof(struct set_stakey_rsp);
        ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
        if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
index 95e9ea5..8453d8d 100644 (file)
@@ -66,7 +66,6 @@ struct        evt_priv {
        u8      *evt_buf;       /*shall be non-paged, and 4 bytes aligned*/
        u8      *evt_allocated_buf;
        u32     evt_done_cnt;
-       struct tasklet_struct event_tasklet;
 };
 
 #define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
@@ -316,7 +315,6 @@ enum _RT_CHANNEL_DOMAIN {
        RT_CHANNEL_DOMAIN_MAX,
 };
 
-
 struct SetChannelPlan_param {
        enum _RT_CHANNEL_DOMAIN ChannelPlan;
 };
@@ -338,7 +336,6 @@ struct getdatarate_rsp {
        u8 datarates[NumRates];
 };
 
-
 /*
  *     Caller Mode: Any
  *     AP: AP can use the info for the contents of beacon frame
index 634e674..d6332a8 100644 (file)
@@ -13,7 +13,6 @@
        #define OID_802_11_PMKID                        0x0d010123
 #endif
 
-
 /* For DDK-defined OIDs*/
 #define OID_NDIS_SEG1  0x00010100
 #define OID_NDIS_SEG2  0x00010200
index 3b69266..36f6904 100644 (file)
@@ -82,9 +82,9 @@ static inline void handle_pairwise_key(struct sta_info *psta,
               (param->u.crypt. key_len > 16 ? 16 : param->u.crypt.key_len));
        if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
                memcpy(psta->tkiptxmickey. skey,
-                      &(param->u.crypt.key[16]), 8);
+                      &param->u.crypt.key[16], 8);
                memcpy(psta->tkiprxmickey. skey,
-                      &(param->u.crypt.key[24]), 8);
+                      &param->u.crypt.key[24], 8);
                padapter->securitypriv. busetkipkey = false;
                mod_timer(&padapter->securitypriv.tkip_timer,
                          jiffies + msecs_to_jiffies(50));
@@ -600,7 +600,7 @@ static int r8711_wx_get_name(struct net_device *dev,
        u32 ht_ielen = 0;
        char *p;
        u8 ht_cap = false;
-       struct  mlme_priv       *pmlmepriv = &(padapter->mlmepriv);
+       struct  mlme_priv       *pmlmepriv = &padapter->mlmepriv;
        struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
        u8 *prates;
 
@@ -659,8 +659,8 @@ static int r8711_wx_set_freq(struct net_device *dev,
 
 /* If setting by frequency, convert to a channel */
        if ((fwrq->e == 1) &&
-         (fwrq->m >= (int) 2.412e8) &&
-         (fwrq->m <= (int) 2.487e8)) {
+         (fwrq->m >= 241200000) &&
+         (fwrq->m <= 248700000)) {
                int f = fwrq->m / 100000;
                int c = 0;
 
@@ -1494,7 +1494,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
        u32 keyindex_provided;
        struct NDIS_802_11_WEP   wep;
        enum NDIS_802_11_AUTHENTICATION_MODE authmode;
-       struct iw_point *erq = &(wrqu->encoding);
+       struct iw_point *erq = &wrqu->encoding;
        struct _adapter *padapter = netdev_priv(dev);
 
        key = erq->flags & IW_ENCODE_INDEX;
@@ -1589,8 +1589,8 @@ static int r8711_wx_get_enc(struct net_device *dev,
 {
        uint key;
        struct _adapter *padapter = netdev_priv(dev);
-       struct iw_point *erq = &(wrqu->encoding);
-       struct  mlme_priv       *pmlmepriv = &(padapter->mlmepriv);
+       struct iw_point *erq = &wrqu->encoding;
+       struct  mlme_priv       *pmlmepriv = &padapter->mlmepriv;
        union Keytype *dk = padapter->securitypriv.DefKey;
 
        if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -1670,7 +1670,7 @@ static int r871x_wx_set_auth(struct net_device *dev,
                                union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
-       struct iw_param *param = (struct iw_param *)&(wrqu->param);
+       struct iw_param *param = (struct iw_param *)&wrqu->param;
        int paramid;
        int paramval;
        int ret = 0;
@@ -1964,7 +1964,7 @@ static int r871x_get_ap_info(struct net_device *dev,
                return -EINVAL;
        data[32] = 0;
 
-       spin_lock_irqsave(&(pmlmepriv->scanned_queue.lock), irqL);
+       spin_lock_irqsave(&pmlmepriv->scanned_queue.lock, irqL);
        phead = &queue->queue;
        plist = phead->next;
        while (1) {
@@ -1974,7 +1974,7 @@ static int r871x_get_ap_info(struct net_device *dev,
                if (!mac_pton(data, bssid)) {
                        netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
                                    (u8 *)data);
-                       spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock),
+                       spin_unlock_irqrestore(&pmlmepriv->scanned_queue.lock,
                                               irqL);
                        return -EINVAL;
                }
@@ -1996,7 +1996,7 @@ static int r871x_get_ap_info(struct net_device *dev,
                }
                plist = plist->next;
        }
-       spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock), irqL);
+       spin_unlock_irqrestore(&pmlmepriv->scanned_queue.lock, irqL);
        if (pdata->length >= 34) {
                if (copy_to_user((u8 __user *)pdata->pointer + 32,
                    (u8 *)&pdata->flags, 1))
index b78101a..2b53933 100644 (file)
@@ -367,7 +367,6 @@ uint oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv)
        return RNDIS_STATUS_SUCCESS;
 }
 
-
 uint oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv)
 {
        return RNDIS_STATUS_SUCCESS;
index 6cdc6f1..34c9a52 100644 (file)
@@ -22,7 +22,6 @@
 #include "usb_osintf.h"
 #include "usb_ops.h"
 
-
 static u8 validate_ssid(struct ndis_802_11_ssid *ssid)
 {
        u8 i;
@@ -76,7 +75,7 @@ static u8 do_join(struct _adapter *padapter)
                         * acquired by caller...
                         */
                        struct wlan_bssid_ex *pdev_network =
-                               &(padapter->registrypriv.dev_network);
+                               &padapter->registrypriv.dev_network;
                        pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
                        pibss = padapter->registrypriv.dev_network.MacAddress;
                        memcpy(&pdev_network->Ssid,
index 92b7c9c..63e12b1 100644 (file)
@@ -431,8 +431,7 @@ static int is_desired_network(struct _adapter *adapter,
                bselected = false;
        if (check_fwstate(&adapter->mlmepriv, WIFI_ADHOC_STATE)) {
                if (pnetwork->network.InfrastructureMode !=
-                       adapter->mlmepriv.cur_network.network.
-                       InfrastructureMode)
+                       adapter->mlmepriv.cur_network.network.InfrastructureMode)
                        bselected = false;
        }
        return bselected;
@@ -539,8 +538,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
                                        struct wlan_bssid_ex *pdev_network =
                                          &(adapter->registrypriv.dev_network);
                                        u8 *pibss =
-                                                adapter->registrypriv.
-                                                       dev_network.MacAddress;
+                                                adapter->registrypriv.dev_network.MacAddress;
                                        pmlmepriv->fw_state ^= _FW_UNDER_SURVEY;
                                        memcpy(&pdev_network->Ssid,
                                                &pmlmepriv->assoc_ssid,
@@ -688,11 +686,9 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
        pnetwork->network.Configuration.DSConfig =
                 le32_to_cpu(pnetwork->network.Configuration.DSConfig);
        pnetwork->network.Configuration.FHConfig.DwellTime =
-                le32_to_cpu(pnetwork->network.Configuration.FHConfig.
-                            DwellTime);
+                le32_to_cpu(pnetwork->network.Configuration.FHConfig.DwellTime);
        pnetwork->network.Configuration.FHConfig.HopPattern =
-                le32_to_cpu(pnetwork->network.Configuration.
-                            FHConfig.HopPattern);
+                le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopPattern);
        pnetwork->network.Configuration.FHConfig.HopSet =
                 le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopSet);
        pnetwork->network.Configuration.FHConfig.Length =
@@ -717,36 +713,29 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
                        if (check_fwstate(pmlmepriv, _FW_LINKED)) {
                                if (the_same_macaddr) {
                                        ptarget_wlan =
-                                           r8712_find_network(&pmlmepriv->
-                                           scanned_queue,
+                                           r8712_find_network(&pmlmepriv->scanned_queue,
                                            cur_network->network.MacAddress);
                                } else {
                                        pcur_wlan =
-                                            r8712_find_network(&pmlmepriv->
-                                            scanned_queue,
+                                            r8712_find_network(&pmlmepriv->scanned_queue,
                                             cur_network->network.MacAddress);
                                        if (pcur_wlan)
                                                pcur_wlan->fixed = false;
 
                                        pcur_sta = r8712_get_stainfo(pstapriv,
                                             cur_network->network.MacAddress);
-                                       spin_lock_irqsave(&pstapriv->
-                                               sta_hash_lock, irqL2);
+                                       spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL2);
                                        r8712_free_stainfo(adapter, pcur_sta);
-                                       spin_unlock_irqrestore(&(pstapriv->
-                                               sta_hash_lock), irqL2);
+                                       spin_unlock_irqrestore(&(pstapriv->sta_hash_lock), irqL2);
 
                                        ptarget_wlan =
-                                                r8712_find_network(&pmlmepriv->
-                                                scanned_queue,
-                                                pnetwork->network.
-                                                MacAddress);
+                                                r8712_find_network(&pmlmepriv->scanned_queue,
+                                                pnetwork->network.MacAddress);
                                        if (ptarget_wlan)
                                                ptarget_wlan->fixed = true;
                                }
                        } else {
-                               ptarget_wlan = r8712_find_network(&pmlmepriv->
-                                               scanned_queue,
+                               ptarget_wlan = r8712_find_network(&pmlmepriv->scanned_queue,
                                                pnetwork->network.MacAddress);
                                if (ptarget_wlan)
                                        ptarget_wlan->fixed = true;
@@ -779,39 +768,25 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
                                        ptarget_sta->aid = pnetwork->join_res;
                                        ptarget_sta->qos_option = 1;
                                        ptarget_sta->mac_id = 5;
-                                       if (adapter->securitypriv.
-                                           AuthAlgrthm == 2) {
-                                               adapter->securitypriv.
-                                                       binstallGrpkey =
-                                                        false;
-                                               adapter->securitypriv.
-                                                       busetkipkey =
-                                                        false;
-                                               adapter->securitypriv.
-                                                       bgrpkey_handshake =
-                                                        false;
-                                               ptarget_sta->ieee8021x_blocked
-                                                        = true;
-                                               ptarget_sta->XPrivacy =
-                                                        adapter->securitypriv.
-                                                        PrivacyAlgrthm;
-                                               memset((u8 *)&ptarget_sta->
-                                                        x_UncstKey,
+                                       if (adapter->securitypriv.AuthAlgrthm == 2) {
+                                               adapter->securitypriv.binstallGrpkey = false;
+                                               adapter->securitypriv.busetkipkey = false;
+                                               adapter->securitypriv.bgrpkey_handshake = false;
+                                               ptarget_sta->ieee8021x_blocked = true;
+                                               ptarget_sta->XPrivacy = adapter->
+                                               securitypriv.PrivacyAlgrthm;
+                                               memset((u8 *)&ptarget_sta->x_UncstKey,
                                                         0,
                                                         sizeof(union Keytype));
-                                               memset((u8 *)&ptarget_sta->
-                                                        tkiprxmickey,
+                                               memset((u8 *)&ptarget_sta->tkiprxmickey,
                                                         0,
                                                         sizeof(union Keytype));
-                                               memset((u8 *)&ptarget_sta->
-                                                        tkiptxmickey,
+                                               memset((u8 *)&ptarget_sta->tkiptxmickey,
                                                         0,
                                                         sizeof(union Keytype));
-                                               memset((u8 *)&ptarget_sta->
-                                                        txpn, 0,
+                                               memset((u8 *)&ptarget_sta->txpn, 0,
                                                         sizeof(union pn48));
-                                               memset((u8 *)&ptarget_sta->
-                                                        rxpn, 0,
+                                               memset((u8 *)&ptarget_sta->rxpn, 0,
                                                         sizeof(union pn48));
                                        }
                                } else {
@@ -942,8 +917,7 @@ void r8712_stadel_event_callback(struct _adapter *adapter, u8 *pbuf)
                        pdev_network = &(adapter->registrypriv.dev_network);
                        pibss = adapter->registrypriv.dev_network.MacAddress;
                        memcpy(pdev_network, &tgt_network->network,
-                               r8712_get_wlan_bssid_ex_sz(&tgt_network->
-                                                       network));
+                               r8712_get_wlan_bssid_ex_sz(&tgt_network->network));
                        memcpy(&pdev_network->Ssid,
                                &pmlmepriv->assoc_ssid,
                                sizeof(struct ndis_802_11_ssid));
@@ -1092,8 +1066,7 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
                        src_ssid = pmlmepriv->assoc_bssid;
                        if (!memcmp(dst_ssid, src_ssid, ETH_ALEN)) {
                                if (check_fwstate(pmlmepriv, _FW_LINKED)) {
-                                       if (is_same_network(&pmlmepriv->
-                                           cur_network.network,
+                                       if (is_same_network(&pmlmepriv->cur_network.network,
                                            &pnetwork->network)) {
                                                _clr_fwstate_(pmlmepriv,
                                                        _FW_UNDER_LINKING);
@@ -1284,26 +1257,13 @@ int r8712_restruct_wmm_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie,
  */
 static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
 {
-       struct security_priv *psecuritypriv = &Adapter->securitypriv;
-       int i = 0;
-
-       do {
-               if (psecuritypriv->PMKIDList[i].bUsed &&
-                  (!memcmp(psecuritypriv->PMKIDList[i].Bssid,
-                           bssid, ETH_ALEN)))
-                       break;
-               i++;
-
-       } while (i < NUM_PMKID_CACHE);
+       struct security_priv *p = &Adapter->securitypriv;
+       int i;
 
-       if (i == NUM_PMKID_CACHE) {
-               i = -1; /* Could not find. */
-       } else {
-               ; /* There is one Pre-Authentication Key for the
-                  * specific BSSID.
-                  */
-       }
-       return i;
+       for (i = 0; i < NUM_PMKID_CACHE; i++)
+               if (p->PMKIDList[i].bUsed && !memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN))
+                       return i;
+       return -1;
 }
 
 sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
index 9820449..aa4d5ce 100644 (file)
@@ -148,7 +148,6 @@ extern struct oid_obj_priv oid_rtl_seg_87_12_00[32];
 
 #endif /* _RTL871X_MP_IOCTL_C_ */
 
-
 enum MP_MODE {
        MP_START_MODE,
        MP_STOP_MODE,
index ca5072e..a08c5d2 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef __RTL871X_MP_PHY_REGDEF_H
 #define __RTL871X_MP_PHY_REGDEF_H
 
-
 /*--------------------------Define Parameters-------------------------------*/
 
 /*============================================================
 #define        ANTENNA_C       0x4
 #define        ANTENNA_D       0x8
 
-
 /* accept all physical address */
 #define RCR_AAP                BIT(0)
 #define RCR_APM                BIT(1)          /* accept physical match */
 
 /*--------------------------Define Parameters-------------------------------*/
 
-
 #endif /*__INC_HAL8192SPHYREG_H */
 
index 66cc50f..de9a568 100644 (file)
@@ -455,7 +455,6 @@ static sint validate_recv_mgnt_frame(struct _adapter *adapter,
        return _FAIL;
 }
 
-
 static sint validate_recv_data_frame(struct _adapter *adapter,
                                     union recv_frame *precv_frame)
 {
index e0a1c30..e46a5db 100644 (file)
@@ -381,7 +381,6 @@ void seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code,
 #define P1K_SIZE         10    /*  80-bit Phase1 key                */
 #define RC4_KEY_SIZE     16    /* 128-bit RC4KEY (104 bits unknown) */
 
-
 /* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
 static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */
        {
index 9b7e5ff..6286c62 100644 (file)
@@ -21,7 +21,6 @@
 #define NUM_STA 32
 #define NUM_ACL 64
 
-
 /* if mode ==0, then the sta is allowed once the addr is hit.
  * if mode ==1, then the sta is rejected once the addr is non-hit.
  */
index ee4c61f..37364d3 100644 (file)
@@ -265,6 +265,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
 
 static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
 {
+       r8712_free_io_queue(padapter);
 }
 
 void rtl871x_intf_stop(struct _adapter *padapter)
@@ -302,9 +303,6 @@ void r871x_dev_unload(struct _adapter *padapter)
                        rtl8712_hal_deinit(padapter);
                }
 
-               /*s6.*/
-               if (padapter->dvobj_deinit)
-                       padapter->dvobj_deinit(padapter);
                padapter->bup = false;
        }
 }
@@ -538,13 +536,13 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
                } else {
                        AutoloadFail = false;
                }
-               if (((mac[0] == 0xff) && (mac[1] == 0xff) &&
+               if ((!AutoloadFail) ||
+                   ((mac[0] == 0xff) && (mac[1] == 0xff) &&
                     (mac[2] == 0xff) && (mac[3] == 0xff) &&
                     (mac[4] == 0xff) && (mac[5] == 0xff)) ||
                    ((mac[0] == 0x00) && (mac[1] == 0x00) &&
                     (mac[2] == 0x00) && (mac[3] == 0x00) &&
-                    (mac[4] == 0x00) && (mac[5] == 0x00)) ||
-                    (!AutoloadFail)) {
+                    (mac[4] == 0x00) && (mac[5] == 0x00))) {
                        mac[0] = 0x00;
                        mac[1] = 0xe0;
                        mac[2] = 0x4c;
@@ -568,7 +566,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
        /* step 6. Load the firmware asynchronously */
        if (rtl871x_load_fw(padapter))
                goto deinit_drv_sw;
-       spin_lock_init(&padapter->lock_rx_ff0_filter);
+       init_completion(&padapter->rx_filter_ready);
        mutex_init(&padapter->mutex_start);
        return 0;
 
@@ -607,6 +605,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
        /* Stop driver mlme relation timer */
        r8712_stop_drv_timers(padapter);
        r871x_dev_unload(padapter);
+       if (padapter->dvobj_deinit)
+               padapter->dvobj_deinit(padapter);
        r8712_free_drv_sw(padapter);
        free_netdev(pnetdev);
 
index e64845e..af9966d 100644 (file)
@@ -29,7 +29,8 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
        u16 wvalue;
        u16 index;
        u16 len;
-       __le32 data;
+       int status;
+       __le32 data = 0;
        struct intf_priv *intfpriv = intfhdl->pintfpriv;
 
        request = 0x05;
@@ -37,8 +38,10 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
        index = 0;
        wvalue = (u16)(addr & 0x0000ffff);
        len = 1;
-       r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
-                               requesttype);
+       status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+                                        &data, len, requesttype);
+       if (status < 0)
+               return 0;
        return (u8)(le32_to_cpu(data) & 0x0ff);
 }
 
@@ -49,7 +52,8 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
        u16 wvalue;
        u16 index;
        u16 len;
-       __le32 data;
+       int status;
+       __le32 data = 0;
        struct intf_priv *intfpriv = intfhdl->pintfpriv;
 
        request = 0x05;
@@ -57,8 +61,10 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
        index = 0;
        wvalue = (u16)(addr & 0x0000ffff);
        len = 2;
-       r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
-                               requesttype);
+       status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+                                        &data, len, requesttype);
+       if (status < 0)
+               return 0;
        return (u16)(le32_to_cpu(data) & 0xffff);
 }
 
@@ -69,7 +75,8 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
        u16 wvalue;
        u16 index;
        u16 len;
-       __le32 data;
+       int status;
+       __le32 data = 0;
        struct intf_priv *intfpriv = intfhdl->pintfpriv;
 
        request = 0x05;
@@ -77,8 +84,10 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
        index = 0;
        wvalue = (u16)(addr & 0x0000ffff);
        len = 4;
-       r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
-                               requesttype);
+       status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+                                        &data, len, requesttype);
+       if (status < 0)
+               return 0;
        return le32_to_cpu(data);
 }
 
index f984a5a..b2181e1 100644 (file)
@@ -495,14 +495,21 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
        }
        status = usb_control_msg(udev, pipe, request, reqtype, value, index,
                                 pIo_buf, len, 500);
-       if (status > 0) {  /* Success this control transfer. */
-               if (requesttype == 0x01) {
-                       /* For Control read transfer, we have to copy the read
-                        * data from pIo_buf to pdata.
-                        */
-                       memcpy(pdata, pIo_buf,  status);
-               }
+       if (status < 0)
+               goto free;
+       if (status != len) {
+               status = -EREMOTEIO;
+               goto free;
+       }
+       /* Success this control transfer. */
+       if (requesttype == 0x01) {
+               /* For Control read transfer, we have to copy the read
+                * data from pIo_buf to pdata.
+                */
+               memcpy(pdata, pIo_buf, status);
        }
+
+free:
        kfree(palloc_buf);
        return status;
 }
index b8acb9c..498e6de 100644 (file)
@@ -186,7 +186,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
 #define _CAPABILITY_                   2
 #define _TIMESTAMP_                            8
 
-
 /*-----------------------------------------------------------------------------
  *                     Below is the definition for WMM
  *------------------------------------------------------------------------------
index 90d34cf..4a93839 100644 (file)
@@ -95,18 +95,12 @@ void r8712_SetFilter(struct work_struct *work)
        struct _adapter *adapter = container_of(work, struct _adapter,
                                                wk_filter_rx_ff0);
        u8  oldvalue = 0x00, newvalue = 0x00;
-       unsigned long irqL;
 
        oldvalue = r8712_read8(adapter, 0x117);
        newvalue = oldvalue & 0xfe;
        r8712_write8(adapter, 0x117, newvalue);
 
-       spin_lock_irqsave(&adapter->lock_rx_ff0_filter, irqL);
-       adapter->blnEnableRxFF0Filter = 1;
-       spin_unlock_irqrestore(&adapter->lock_rx_ff0_filter, irqL);
-       do {
-               msleep(100);
-       } while (adapter->blnEnableRxFF0Filter == 1);
+       wait_for_completion(&adapter->rx_filter_ready);
        r8712_write8(adapter, 0x117, oldvalue);
 }
 
index 5478188..d30d6e6 100644 (file)
@@ -520,12 +520,12 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
 
                /*  B0 Config LDPC Coding Capability */
                if (TEST_FLAG(phtpriv_ap->ldpc_cap, LDPC_HT_ENABLE_TX) &&
-                             GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap)))
+                   GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap)))
                        SET_FLAG(cur_ldpc_cap, (LDPC_HT_ENABLE_TX | LDPC_HT_CAP_TX));
 
                /*  B7 B8 B9 Config STBC setting */
                if (TEST_FLAG(phtpriv_ap->stbc_cap, STBC_HT_ENABLE_TX) &&
-                             GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap)))
+                   GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap)))
                        SET_FLAG(cur_stbc_cap, (STBC_HT_ENABLE_TX | STBC_HT_CAP_TX));
        } else {
                phtpriv_sta->ampdu_enable = false;
@@ -1065,10 +1065,12 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
                );
 
                if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
-                    (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
-                       pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & (0x07 << 2));
+                   (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
+                       pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
+                                                      (0x07 << 2));
                } else {
-                       pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00);
+                       pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY &
+                                                      0x00);
                }
 
                rtw_hal_get_def_var(
@@ -1116,7 +1118,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
        pmlmepriv->htpriv.ht_option = false;
 
        if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
-            (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
+           (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
                /* todo: */
                /* ht_cap = false; */
        }
@@ -1725,7 +1727,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
                        pmlmepriv->num_sta_no_short_preamble--;
 
                        if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
-                               (pmlmepriv->num_sta_no_short_preamble == 0)) {
+                           (pmlmepriv->num_sta_no_short_preamble == 0)) {
                                beacon_updated = true;
                                update_beacon(padapter, 0xFF, NULL, true);
                        }
@@ -1763,7 +1765,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
                        pmlmepriv->num_sta_no_short_slot_time++;
 
                        if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
-                                (pmlmepriv->num_sta_no_short_slot_time == 1)) {
+                           (pmlmepriv->num_sta_no_short_slot_time == 1)) {
                                beacon_updated = true;
                                update_beacon(padapter, 0xFF, NULL, true);
                        }
@@ -1775,7 +1777,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
                        pmlmepriv->num_sta_no_short_slot_time--;
 
                        if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
-                                (pmlmepriv->num_sta_no_short_slot_time == 0)) {
+                           (pmlmepriv->num_sta_no_short_slot_time == 0)) {
                                beacon_updated = true;
                                update_beacon(padapter, 0xFF, NULL, true);
                        }
@@ -2024,7 +2026,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
        start_bss_network(padapter);
 
        if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
-               (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+           (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
                /* restore group key, WEP keys is restored in ips_leave() */
                rtw_set_key(
                        padapter,
@@ -2062,7 +2064,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
                        /* pairwise key */
                        /* per sta pairwise key and settings */
                        if ((psecuritypriv->dot11PrivacyAlgrthm == _TKIP_) ||
-                               (psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
+                           (psecuritypriv->dot11PrivacyAlgrthm == _AES_)) {
                                rtw_setstakey_cmd(padapter, psta, true, false);
                        }
                }
index 14d37b3..b4170f6 100644 (file)
@@ -1238,7 +1238,7 @@ u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
                /*&& !MgntInitAdapterInProgress(pMgntInfo)*/) {
                /*  if we raise bBusyTraffic in last watchdog, using lower threshold. */
                if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
-                               BusyThreshold = BusyThresholdLow;
+                       BusyThreshold = BusyThresholdLow;
 
                if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
                        pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) {
@@ -1885,11 +1885,8 @@ void rtw_survey_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 {
        struct  mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
-       if (pcmd->res == H2C_DROPPED) {
+       if (pcmd->res != H2C_SUCCESS) {
                /* TODO: cancel timer and do timeout handler directly... */
-               /* need to make timeout handlerOS independent */
-               _set_timer(&pmlmepriv->scan_to_timer, 1);
-       } else if (pcmd->res != H2C_SUCCESS) {
                _set_timer(&pmlmepriv->scan_to_timer, 1);
        }
 
@@ -1916,11 +1913,8 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 {
        struct  mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
-       if (pcmd->res == H2C_DROPPED) {
+       if (pcmd->res != H2C_SUCCESS) {
                /* TODO: cancel timer and do timeout handler directly... */
-               /* need to make timeout handlerOS independent */
-               _set_timer(&pmlmepriv->assoc_timer, 1);
-       } else if (pcmd->res != H2C_SUCCESS) {
                _set_timer(&pmlmepriv->assoc_timer, 1);
        }
 
index 3d3c772..06e727c 100644 (file)
@@ -100,7 +100,7 @@ u8 PwrState)
 u16
 Efuse_GetCurrentSize(
        struct adapter *padapter,
-       u8      efuseType,
+       u8      efuseType,
        bool            bPseudoTest)
 {
        return padapter->HalFunc.EfuseGetCurrentSize(padapter, efuseType,
@@ -124,29 +124,29 @@ Efuse_CalculateWordCnts(u8 word_en)
 }
 
 /*  */
-/*     Description: */
-/*             1. Execute E-Fuse read byte operation according as map offset and */
-/*                 save to E-Fuse table. */
-/*             2. Referred from SD1 Richard. */
+/* Description: */
+/*             1. Execute E-Fuse read byte operation according as map offset and */
+/*                     save to E-Fuse table. */
+/*             2. Referred from SD1 Richard. */
 /*  */
-/*     Assumption: */
-/*             1. Boot from E-Fuse and successfully auto-load. */
-/*             2. PASSIVE_LEVEL (USB interface) */
+/* Assumption: */
+/*             1. Boot from E-Fuse and successfully auto-load. */
+/*             2. PASSIVE_LEVEL (USB interface) */
 /*  */
-/*     Created by Roger, 2008.10.21. */
+/* Created by Roger, 2008.10.21. */
 /*  */
-/*     2008/12/12 MH   1. Reorganize code flow and reserve bytes. and add description. */
-/*                                     2. Add efuse utilization collect. */
-/*     2008/12/22 MH   Read Efuse must check if we write section 1 data again!!! Sec1 */
-/*                                     write addr must be after sec5. */
+/* 2008/12/12 MH       1. Reorganize code flow and reserve bytes. and add description. */
+/*                                     2. Add efuse utilization collect. */
+/* 2008/12/22 MH       Read Efuse must check if we write section 1 data again!!! Sec1 */
+/*                                     write addr must be after sec5. */
 /*  */
 
 void
 efuse_ReadEFuse(
        struct adapter *Adapter,
        u8 efuseType,
-       u16     _offset,
-       u16     _size_byte,
+       u16             _offset,
+       u16             _size_byte,
        u8 *pbuf,
 bool   bPseudoTest
        );
@@ -154,8 +154,8 @@ void
 efuse_ReadEFuse(
        struct adapter *Adapter,
        u8 efuseType,
-       u16     _offset,
-       u16     _size_byte,
+       u16             _offset,
+       u16             _size_byte,
        u8 *pbuf,
 bool   bPseudoTest
        )
@@ -168,7 +168,7 @@ EFUSE_GetEfuseDefinition(
        struct adapter *padapter,
        u8 efuseType,
        u8 type,
-       void    *pOut,
+       void    *pOut,
        bool            bPseudoTest
        )
 {
@@ -194,7 +194,7 @@ EFUSE_GetEfuseDefinition(
 u8
 EFUSE_Read1Byte(
 struct adapter *Adapter,
-u16    Address)
+u16            Address)
 {
        u8 Bytetemp = {0x00};
        u8 temp = {0x00};
@@ -235,8 +235,8 @@ u16         Address)
 u8
 efuse_OneByteRead(
 struct adapter *padapter,
-u16            addr,
-u8     *data,
+u16    addr,
+u8     *data,
 bool           bPseudoTest)
 {
        u32 tmpidx = 0;
@@ -324,8 +324,8 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
 
 int
 Efuse_PgPacketRead(struct adapter *padapter,
-                               u8      offset,
-                               u8      *data,
+                               u8      offset,
+                               u8      *data,
                                bool            bPseudoTest)
 {
        return padapter->HalFunc.Efuse_PgPacketRead(padapter, offset, data,
@@ -334,9 +334,9 @@ Efuse_PgPacketRead(struct adapter *padapter,
 
 int
 Efuse_PgPacketWrite(struct adapter *padapter,
-                               u8      offset,
-                               u8      word_en,
-                               u8      *data,
+                               u8      offset,
+                               u8      word_en,
+                               u8      *data,
                                bool            bPseudoTest)
 {
        return padapter->HalFunc.Efuse_PgPacketWrite(padapter, offset, word_en,
@@ -386,7 +386,7 @@ efuse_WordEnableDataRead(u8 word_en,
 
 u8
 Efuse_WordEnableDataWrite(struct adapter *padapter,
-                                               u16     efuse_addr,
+                                               u16             efuse_addr,
                                                u8 word_en,
                                                u8 *data,
                                                bool            bPseudoTest)
index b449be5..68e41d9 100644 (file)
@@ -94,16 +94,14 @@ bool rtw_is_cckratesonly_included(u8 *rate)
 
 int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
 {
-       if (channel > 14) {
+       if (channel > 14)
                return WIRELESS_INVALID;
-       } else { /*  could be pure B, pure G, or B/G */
-               if (rtw_is_cckratesonly_included(rate))
-                       return WIRELESS_11B;
-               else if (rtw_is_cckrates_included(rate))
-                       return  WIRELESS_11BG;
-               else
-                       return WIRELESS_11G;
-       }
+       /* could be pure B, pure G, or B/G */
+       if (rtw_is_cckratesonly_included(rate))
+               return WIRELESS_11B;
+       if (rtw_is_cckrates_included(rate))
+               return WIRELESS_11BG;
+       return WIRELESS_11G;
 }
 
 u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
@@ -151,11 +149,10 @@ u8 *rtw_get_ie(u8 *pbuf, signed int index, signed int *len, signed int limit)
                if (*p == index) {
                        *len = *(p + 1);
                        return p;
-               } else {
-                       tmp = *(p + 1);
-                       p += (tmp + 2);
-                       i += (tmp + 2);
                }
+               tmp = *(p + 1);
+               p += (tmp + 2);
+               i += (tmp + 2);
                if (i >= limit)
                        break;
        }
@@ -199,9 +196,8 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
                                *ielen = in_ie[cnt+1]+2;
 
                        break;
-               } else {
-                       cnt += in_ie[cnt+1]+2; /* goto next */
                }
+               cnt += in_ie[cnt+1]+2; /* goto next */
        }
 
        return target_ie;
@@ -339,9 +335,8 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
 
        ie = rtw_set_ie(ie, WLAN_EID_IBSS_PARAMS, 2, (u8 *)&(pdev_network->configuration.atim_window), &sz);
 
-       if (rateLen > 8) {
+       if (rateLen > 8)
                ie = rtw_set_ie(ie, WLAN_EID_EXT_SUPP_RATES, (rateLen - 8), (pdev_network->supported_rates + 8), &sz);
-       }
 
        /* HT Cap. */
        if ((pregistrypriv->wireless_mode & WIRELESS_11_24N) &&
@@ -370,9 +365,8 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
 
                if (pbuf) {
                        /* check if oui matches... */
-                       if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type))) {
+                       if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)))
                                goto check_next_ie;
-                       }
 
                        /* check version... */
                        memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
@@ -497,9 +491,8 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
        if (is_8021x) {
                if (left >= 6) {
                        pos += 2;
-                       if (!memcmp(pos, SUITE_1X, 4)) {
+                       if (!memcmp(pos, SUITE_1X, 4))
                                *is_8021x = 1;
-                       }
                }
        }
 
@@ -518,9 +511,8 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
                return _FAIL;
        }
 
-       if ((*rsn_ie != WLAN_EID_RSN) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) {
+       if ((*rsn_ie != WLAN_EID_RSN) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2)))
                return _FAIL;
-       }
 
        pos = rsn_ie;
        pos += 4;
@@ -697,9 +689,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
                        cnt += in_ie[cnt+1]+2;
 
                        break;
-               } else {
-                       cnt += in_ie[cnt+1]+2; /* goto next */
                }
+               cnt += in_ie[cnt+1]+2; /* goto next */
        }
 
        return wpsie_ptr;
@@ -748,9 +739,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
                                *len_attr = attr_len;
 
                        break;
-               } else {
-                       attr_ptr += attr_len; /* goto next */
                }
+               attr_ptr += attr_len; /* goto next */
        }
 
        return target_attr_ptr;
index ed2d3b7..f2242cf 100644 (file)
@@ -751,7 +751,9 @@ void rtw_surveydone_event_callback(struct adapter   *adapter, u8 *pbuf)
        }
 
        if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+               spin_unlock_bh(&pmlmepriv->lock);
                del_timer_sync(&pmlmepriv->scan_to_timer);
+               spin_lock_bh(&pmlmepriv->lock);
                _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
        }
 
@@ -792,7 +794,7 @@ void rtw_surveydone_event_callback(struct adapter   *adapter, u8 *pbuf)
                        set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
                        pmlmepriv->to_join = false;
                        s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
-                       if (_SUCCESS == s_ret) {
+                       if (s_ret == _SUCCESS) {
                             _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
                        } else if (s_ret == 2) {/* there is no need to wait for join */
                                _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
@@ -1238,8 +1240,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
 
                        spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
 
+                       spin_unlock_bh(&pmlmepriv->lock);
                        /* s5. Cancel assoc_timer */
                        del_timer_sync(&pmlmepriv->assoc_timer);
+                       spin_lock_bh(&pmlmepriv->lock);
                } else {
                        spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
                }
@@ -1545,7 +1549,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
        if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
                return;
 
-       spin_lock_bh(&pmlmepriv->lock);
+       spin_lock_irq(&pmlmepriv->lock);
 
        if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
                while (1) {
@@ -1554,7 +1558,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
                                int do_join_r;
 
                                do_join_r = rtw_do_join(adapter);
-                               if (_SUCCESS != do_join_r) {
+                               if (do_join_r != _SUCCESS) {
                                        continue;
                                }
                                break;
@@ -1573,7 +1577,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
 
        }
 
-       spin_unlock_bh(&pmlmepriv->lock);
+       spin_unlock_irq(&pmlmepriv->lock);
 }
 
 /*
@@ -1586,11 +1590,11 @@ void rtw_scan_timeout_handler(struct timer_list *t)
                                                  mlmepriv.scan_to_timer);
        struct  mlme_priv *pmlmepriv = &adapter->mlmepriv;
 
-       spin_lock_bh(&pmlmepriv->lock);
+       spin_lock_irq(&pmlmepriv->lock);
 
        _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
 
-       spin_unlock_bh(&pmlmepriv->lock);
+       spin_unlock_irq(&pmlmepriv->lock);
 
        rtw_indicate_scan_done(adapter, true);
 }
@@ -2036,28 +2040,14 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
 
 static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
 {
-       struct security_priv *psecuritypriv = &Adapter->securitypriv;
-       int i = 0;
-
-       do {
-               if ((psecuritypriv->PMKIDList[i].bUsed) &&
-                               (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
-                       break;
-               } else {
-                       i++;
-                       /* continue; */
-               }
-
-       } while (i < NUM_PMKID_CACHE);
-
-       if (i == NUM_PMKID_CACHE) {
-               i = -1;/*  Could not find. */
-       } else {
-               /*  There is one Pre-Authentication Key for the specific BSSID. */
-       }
-
-       return i;
+       struct security_priv *p = &Adapter->securitypriv;
+       int i;
 
+       for (i = 0; i < NUM_PMKID_CACHE; i++)
+               if ((p->PMKIDList[i].bUsed) &&
+                               (!memcmp(p->PMKIDList[i].Bssid, bssid, ETH_ALEN)))
+                       return i;
+       return -1;
 }
 
 /*  */
@@ -2558,7 +2548,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
                issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
                issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
 
-               if (0 == issued) {
+               if (issued == 0) {
                        psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
                        rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
                }
@@ -2608,30 +2598,20 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
 {
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
        struct wlan_network *cur_network = &pmlmepriv->cur_network;
-       int do_join_r;
 
-       if (0 < rtw_to_roam(padapter)) {
+       if (rtw_to_roam(padapter) > 0) {
                memcpy(&pmlmepriv->assoc_ssid, &cur_network->network.ssid, sizeof(struct ndis_802_11_ssid));
 
                pmlmepriv->assoc_by_bssid = false;
 
-               while (1) {
-                       do_join_r = rtw_do_join(padapter);
-                       if (_SUCCESS == do_join_r) {
+               while (rtw_do_join(padapter) != _SUCCESS) {
+                       rtw_dec_to_roam(padapter);
+                       if (rtw_to_roam(padapter) <= 0) {
+                               rtw_indicate_disconnect(padapter);
                                break;
-                       } else {
-                               rtw_dec_to_roam(padapter);
-
-                               if (rtw_to_roam(padapter) > 0) {
-                                       continue;
-                               } else {
-                                       rtw_indicate_disconnect(padapter);
-                                       break;
-                               }
                        }
                }
        }
-
 }
 
 signed int rtw_linked_check(struct adapter *padapter)
index 49a3f45..1bdbd09 100644 (file)
@@ -271,11 +271,9 @@ static int has_channel(struct rt_channel_info *channel_set,
 {
        int i;
 
-       for (i = 0; i < chanset_size; i++) {
-               if (channel_set[i].ChannelNum == chan) {
+       for (i = 0; i < chanset_size; i++)
+               if (channel_set[i].ChannelNum == chan)
                        return 1;
-               }
-       }
 
        return 0;
 }
@@ -311,11 +309,11 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
                        if (!has_channel(channel_set, chanset_size, ch))
                                continue;
 
-                       if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+                       if ((padapter->registrypriv.ht_enable == 0) && (o->inc == 8))
                                continue;
 
                        if ((0 < (padapter->registrypriv.bw_mode & 0xf0)) &&
-                               ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+                               ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
                                continue;
 
                        if (!reg) {
@@ -345,7 +343,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
 
        if (is_supported_24g(padapter->registrypriv.wireless_mode)) {
                b2_4GBand = true;
-               if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+               if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
                        Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
                else
                        Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -355,14 +353,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
                for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
                        channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
 
-                       if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
-                               (RT_CHANNEL_DOMAIN_GLOBAL_NULL == ChannelPlan)) {
+                       if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+                               (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_NULL)) {
                                if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
                                        channel_set[chanset_size].ScanType = SCAN_ACTIVE;
                                else if ((channel_set[chanset_size].ChannelNum  >= 12 && channel_set[chanset_size].ChannelNum  <= 14))
                                        channel_set[chanset_size].ScanType  = SCAN_PASSIVE;
-                       } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
-                               RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) { /*  channel 12~13, passive scan */
+                       } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+                                Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) { /*  channel 12~13, passive scan */
                                if (channel_set[chanset_size].ChannelNum <= 11)
                                        channel_set[chanset_size].ScanType = SCAN_ACTIVE;
                                else
@@ -649,9 +647,8 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
                        if (psta) {
                                /* update WMM, ERP in the beacon */
                                /* todo: the timer is used instead of the number of the beacon received */
-                               if ((sta_rx_pkts(psta) & 0xf) == 0) {
+                               if ((sta_rx_pkts(psta) & 0xf) == 0)
                                        update_beacon_info(padapter, pframe, len, psta);
-                               }
                        } else {
                                /* allocate a new CAM entry for IBSS station */
                                cam_idx = allocate_fw_sta_entry(padapter);
@@ -911,16 +908,14 @@ unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_fram
                        set_link_timer(pmlmeext, REAUTH_TO);
 
                        return _SUCCESS;
-               } else {
-                       /*  open system */
-                       go2asoc = 1;
                }
+               /* open system */
+               go2asoc = 1;
        } else if (seq == 4) {
-               if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
+               if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
                        go2asoc = 1;
-               } else {
+               else
                        goto authclnt_fail;
-               }
        } else {
                /*  this is also illegal */
                goto authclnt_fail;
@@ -1331,7 +1326,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
        spin_unlock_bh(&pstapriv->asoc_list_lock);
 
        /*  now the station is qualified to join our BSS... */
-       if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (WLAN_STATUS_SUCCESS == status)) {
+       if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == WLAN_STATUS_SUCCESS)) {
                /* 1 bss_cap_update & sta_info_update */
                bss_cap_update_on_sta_join(padapter, pstat);
                sta_info_update(padapter, pstat);
@@ -1455,11 +1450,10 @@ unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
        UpdateBrateTbl(padapter, pmlmeinfo->network.supported_rates);
 
 report_assoc_result:
-       if (res > 0) {
+       if (res > 0)
                rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
-       } else {
+       else
                rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
-       }
 
        report_join_res(padapter, res);
 
@@ -1473,6 +1467,7 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
        struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
        struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
        u8 *pframe = precv_frame->u.hdr.rx_data;
+       int ignore_received_deauth = 0;
 
        /* check A3 */
        if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
@@ -1508,36 +1503,33 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
 
 
                return _SUCCESS;
-       } else {
-               int     ignore_received_deauth = 0;
-
-               /*      Commented by Albert 20130604 */
-               /*      Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
-               /*      we will send the deauth first. */
-               /*      However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
-               /*      Added the following code to avoid this case. */
-               if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
-                       (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
-                       if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
-                               ignore_received_deauth = 1;
-                       } else if (WLAN_REASON_PREV_AUTH_NOT_VALID == reason) {
-                               /*  TODO: 802.11r */
-                               ignore_received_deauth = 1;
-                       }
-               }
-
-               netdev_dbg(padapter->pnetdev,
-                          "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
-                          reason, GetAddr3Ptr(pframe),
-                          ignore_received_deauth);
+       }
 
-               if (0 == ignore_received_deauth) {
-                       receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+       /*      Commented by Albert 20130604 */
+       /*      Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
+       /*      we will send the deauth first. */
+       /*      However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
+       /*      Added the following code to avoid this case. */
+       if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
+           (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
+               if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
+                       ignore_received_deauth = 1;
+               } else if (reason == WLAN_REASON_PREV_AUTH_NOT_VALID) {
+                       /*  TODO: 802.11r */
+                       ignore_received_deauth = 1;
                }
        }
+
+       netdev_dbg(padapter->pnetdev,
+                  "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
+                  reason, GetAddr3Ptr(pframe),
+                  ignore_received_deauth);
+
+       if (ignore_received_deauth == 0)
+               receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
        pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
        return _SUCCESS;
-
 }
 
 unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
@@ -1581,13 +1573,13 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
                }
 
                return _SUCCESS;
-       } else {
-               netdev_dbg(padapter->pnetdev,
-                          "sta recv disassoc reason code(%d) sta:%pM\n",
-                          reason, GetAddr3Ptr(pframe));
-
-               receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
        }
+       netdev_dbg(padapter->pnetdev,
+                  "sta recv disassoc reason code(%d) sta:%pM\n",
+                  reason, GetAddr3Ptr(pframe));
+
+       receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+
        pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
        return _SUCCESS;
 
@@ -1674,11 +1666,10 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
                        /* process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), GetAddr3Ptr(pframe)); */
                        process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
 
-                       if (pmlmeinfo->accept_addba_req) {
+                       if (pmlmeinfo->accept_addba_req)
                                issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 0);
-                       } else {
+                       else
                                issue_action_BA(padapter, addr, WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
-                       }
 
                        break;
 
@@ -1774,9 +1765,8 @@ static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
        u8 *pframe = precv_frame->u.hdr.rx_data;
        u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
 
-       if (!memcmp(frame_body + 2, P2P_OUI, 4)) {
+       if (!memcmp(frame_body + 2, P2P_OUI, 4))
                ret = on_action_public_p2p(precv_frame);
-       }
 
        return ret;
 }
@@ -2187,9 +2177,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
 
                        wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
                                pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
-                       if (wps_ie && wps_ielen > 0) {
+                       if (wps_ie && wps_ielen > 0)
                                rtw_get_wps_attr_content(wps_ie,  wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
-                       }
                        if (sr != 0)
                                set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
                        else
@@ -2245,9 +2234,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
 
 
        /*  EXTERNDED SUPPORTED RATE */
-       if (rate_len > 8) {
+       if (rate_len > 8)
                pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
-       }
 
 
        /* todo:HT for adhoc */
@@ -2400,7 +2388,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
                                pframe += ssid_ielen_diff;
                                pattrib->pktlen += ssid_ielen_diff;
                        }
-                       kfree (buf);
+                       kfree(buf);
                }
        } else {
                /* timestamp will be inserted by hardware */
@@ -2447,9 +2435,8 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
 
 
                /*  EXTERNDED SUPPORTED RATE */
-               if (rate_len > 8) {
+               if (rate_len > 8)
                        pframe = rtw_set_ie(pframe, WLAN_EID_EXT_SUPP_RATES, (rate_len - 8), (cur_network->supported_rates + 8), &pattrib->pktlen);
-               }
 
 
                /* todo:HT for adhoc */
@@ -2674,9 +2661,8 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
 
                /*  setting auth algo number */
                val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/*  0:OPEN System, 1:Shared key */
-               if (val16) {
+               if (val16)
                        use_shared_key = 1;
-               }
                le_tmp = cpu_to_le16(val16);
 
                /* setting IV for auth seq #3 */
@@ -2831,16 +2817,14 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
                                break;
                        }
 
-                       if (!pbuf || ie_len == 0) {
+                       if (!pbuf || ie_len == 0)
                                break;
-                       }
                }
 
        }
 
-       if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) {
+       if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
                pframe = rtw_set_ie(pframe, WLAN_EID_VENDOR_SPECIFIC, 6, REALTEK_96B_IE, &(pattrib->pktlen));
-       }
 
        /* add WPS IE ie for wps 2.0 */
        if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
@@ -3301,9 +3285,8 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
        __le16 le_tmp;
 
        pmgntframe = alloc_mgtxmitframe(pxmitpriv);
-       if (!pmgntframe) {
+       if (!pmgntframe)
                goto exit;
-       }
 
        /* update attribute */
        pattrib = &pmgntframe->attrib;
@@ -3552,13 +3535,13 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
                                rtw_hal_get_def_var(padapter,
                                                    HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
 
-                       if (IEEE80211_HT_MAX_AMPDU_64K == max_rx_ampdu_factor)
+                       if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_64K)
                                BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
-                       else if (IEEE80211_HT_MAX_AMPDU_32K == max_rx_ampdu_factor)
+                       else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_32K)
                                BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
-                       else if (IEEE80211_HT_MAX_AMPDU_16K == max_rx_ampdu_factor)
+                       else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_16K)
                                BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
-                       else if (IEEE80211_HT_MAX_AMPDU_8K == max_rx_ampdu_factor)
+                       else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_8K)
                                BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
                        else
                                BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
@@ -3627,9 +3610,8 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
        action = ACT_PUBLIC_BSSCOEXIST;
 
        pmgntframe = alloc_mgtxmitframe(pxmitpriv);
-       if (!pmgntframe) {
+       if (!pmgntframe)
                return;
-       }
 
        /* update attribute */
        pattrib = &pmgntframe->attrib;
@@ -3802,10 +3784,8 @@ unsigned int send_beacon(struct adapter *padapter)
 
        } while (false == bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
 
-       if (padapter->bSurpriseRemoved || padapter->bDriverStopped) {
+       if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
                return _FAIL;
-       }
-
 
        if (!bxmitok)
                return _FAIL;
@@ -4388,9 +4368,8 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
                        }
 
                        /*  skip AP 2.4G channel plan */
-                       while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+                       while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14))
                                j++;
-                       }
                }
 
                pmlmeext->update_channel_plan_by_ap_done = 1;
@@ -4402,9 +4381,8 @@ static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid
        i = 0;
        while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
                if (chplan_new[i].ChannelNum == channel) {
-                       if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+                       if (chplan_new[i].ScanType == SCAN_PASSIVE)
                                chplan_new[i].ScanType = SCAN_ACTIVE;
-                       }
                        break;
                }
                i++;
@@ -4629,9 +4607,8 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
        pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
-       if (!pcmd_obj) {
+       if (!pcmd_obj)
                return;
-       }
 
        cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
        pevtcmd = rtw_zmalloc(cmdsz);
@@ -5086,7 +5063,7 @@ void linked_status_chk(struct adapter *padapter)
                        if (pmlmeinfo->FW_sta_info[i].status == 1) {
                                psta = pmlmeinfo->FW_sta_info[i].psta;
 
-                               if (NULL == psta)
+                               if (psta == NULL)
                                        continue;
 
                                if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
@@ -5124,9 +5101,8 @@ void survey_timer_hdl(struct timer_list *t)
 
        /* issue rtw_sitesurvey_cmd */
        if (pmlmeext->sitesurvey_res.state > SCAN_START) {
-               if (pmlmeext->sitesurvey_res.state ==  SCAN_PROCESS) {
+               if (pmlmeext->sitesurvey_res.state ==  SCAN_PROCESS)
                        pmlmeext->sitesurvey_res.channel_idx++;
-               }
 
                if (pmlmeext->scan_abort) {
                        pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
@@ -5135,24 +5111,18 @@ void survey_timer_hdl(struct timer_list *t)
                }
 
                ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
-               if (!ph2c) {
-                       goto exit_survey_timer_hdl;
-               }
+               if (!ph2c)
+                       return;
 
                psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
                if (!psurveyPara) {
                        kfree(ph2c);
-                       goto exit_survey_timer_hdl;
+                       return;
                }
 
                init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
                rtw_enqueue_cmd(pcmdpriv, ph2c);
        }
-
-
-exit_survey_timer_hdl:
-
-       return;
 }
 
 void link_timer_hdl(struct timer_list *t)
@@ -5173,17 +5143,9 @@ void link_timer_hdl(struct timer_list *t)
        } else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
                /* re-auth timer */
                if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
-                       /* if (pmlmeinfo->auth_algo != dot11AuthAlgrthm_Auto) */
-                       /*  */
-                               pmlmeinfo->state = 0;
-                               report_join_res(padapter, -1);
-                               return;
-                       /*  */
-                       /* else */
-                       /*  */
-                       /*      pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared; */
-                       /*      pmlmeinfo->reauth_count = 0; */
-                       /*  */
+                       pmlmeinfo->state = 0;
+                       report_join_res(padapter, -1);
+                       return;
                }
 
                pmlmeinfo->auth_seq = 1;
@@ -5348,9 +5310,8 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
 
        /* check already connecting to AP or not */
        if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
-               if (pmlmeinfo->state & WIFI_FW_STATION_STATE) {
+               if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
                        issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, 1, 100);
-               }
                pmlmeinfo->state = WIFI_FW_NULL_STATE;
 
                /* clear CAM */
@@ -5485,9 +5446,8 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
        struct wlan_bssid_ex            *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
        u8 val8;
 
-       if (is_client_associated_to_ap(padapter)) {
+       if (is_client_associated_to_ap(padapter))
                        issue_deauth_ex(padapter, pnetwork->mac_address, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
-       }
 
        if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
                /* Stop BCN */
@@ -6073,7 +6033,7 @@ u8 run_in_thread_hdl(struct adapter *padapter, u8 *pbuf)
        struct RunInThread_param *p;
 
 
-       if (NULL == pbuf)
+       if (pbuf == NULL)
                return H2C_PARAMETERS_ERROR;
        p = (struct RunInThread_param *)pbuf;
 
index 96eb8ca..4f120c8 100644 (file)
@@ -8,47 +8,27 @@
 #include <drv_types.h>
 #include <linux/kernel.h>
 
-
-struct ch_freq {
-       u32 channel;
-       u32 frequency;
-};
-
-static struct ch_freq ch_freq_map[] = {
-       {1, 2412}, {2, 2417}, {3, 2422}, {4, 2427}, {5, 2432},
-       {6, 2437}, {7, 2442}, {8, 2447}, {9, 2452}, {10, 2457},
-       {11, 2462}, {12, 2467}, {13, 2472}, {14, 2484},
-       /*  UNII */
-       {36, 5180}, {40, 5200}, {44, 5220}, {48, 5240}, {52, 5260},
-       {56, 5280}, {60, 5300}, {64, 5320}, {149, 5745}, {153, 5765},
-       {157, 5785}, {161, 5805}, {165, 5825}, {167, 5835}, {169, 5845},
-       {171, 5855}, {173, 5865},
-       /* HiperLAN2 */
-       {100, 5500}, {104, 5520}, {108, 5540}, {112, 5560}, {116, 5580},
-       {120, 5600}, {124, 5620}, {128, 5640}, {132, 5660}, {136, 5680},
-       {140, 5700},
-       /* Japan MMAC */
-       {34, 5170}, {38, 5190}, {42, 5210}, {46, 5230},
-       /*  Japan */
-       {184, 4920}, {188, 4940}, {192, 4960}, {196, 4980},
-       {208, 5040},/* Japan, means J08 */
-       {212, 5060},/* Japan, means J12 */
-       {216, 5080},/* Japan, means J16 */
+static const u32 ch_freq_map[] = {
+       2412,
+       2417,
+       2422,
+       2427,
+       2432,
+       2437,
+       2442,
+       2447,
+       2452,
+       2457,
+       2462,
+       2467,
+       2472,
+       2484
 };
 
 u32 rtw_ch2freq(u32 channel)
 {
-       u8 i;
-       u32 freq = 0;
-
-       for (i = 0; i < ARRAY_SIZE(ch_freq_map); i++) {
-               if (channel == ch_freq_map[i].channel) {
-                       freq = ch_freq_map[i].frequency;
-                               break;
-               }
-       }
-       if (i == ARRAY_SIZE(ch_freq_map))
-               freq = 2412;
+       if (channel == 0 || channel > ARRAY_SIZE(ch_freq_map))
+               return 2412;
 
-       return freq;
+       return ch_freq_map[channel - 1];
 }
index af50674..9091f2f 100644 (file)
@@ -68,16 +68,6 @@ enum btc_chip_interface {
        BTC_INTF_MAX
 };
 
-enum {
-       BTC_CHIP_UNDEF          = 0,
-       BTC_CHIP_CSR_BC4        = 1,
-       BTC_CHIP_CSR_BC8        = 2,
-       BTC_CHIP_RTL8723A       = 3,
-       BTC_CHIP_RTL8821        = 4,
-       BTC_CHIP_RTL8723B       = 5,
-       BTC_CHIP_MAX
-};
-
 /*  following is for wifi link status */
 #define WIFI_STA_CONNECTED                             BIT0
 #define WIFI_AP_CONNECTED                              BIT1
@@ -87,7 +77,6 @@ enum {
 
 struct btc_board_info {
        /*  The following is some board information */
-       u8 btChipType;
        u8 pgAntNum;    /*  pg ant number */
        u8 btdmAntNum;  /*  ant number for btdm */
        u8 btdmAntPos;          /* Bryant Add to indicate Antenna Position for (pgAntNum = 2) && (btdmAntNum = 1)  (DPDT+1Ant case) */
index f4b3e8b..9acd493 100644 (file)
@@ -1113,11 +1113,6 @@ void EXhalbtcoutsrc_Periodical(struct btc_coexist *pBtCoexist)
 /*     halbtcoutsrc_NormalLowPower(pBtCoexist); */
 }
 
-void EXhalbtcoutsrc_SetChipType(u8 chipType)
-{
-       GLBtCoexist.boardInfo.btChipType = BTC_CHIP_RTL8723B;
-}
-
 void EXhalbtcoutsrc_SetAntNum(u8 type, u8 antNum)
 {
        if (BT_COEX_ANT_TYPE_PG == type) {
@@ -1188,9 +1183,6 @@ void hal_btcoex_SetChipType(struct adapter *padapter, u8 chipType)
 
 
        pHalData = GET_HAL_DATA(padapter);
-       pHalData->bt_coexist.btChipType = chipType;
-
-       EXhalbtcoutsrc_SetChipType(chipType);
 }
 
 void hal_btcoex_SetPgAntNum(struct adapter *padapter, u8 antNum)
index a545832..107f427 100644 (file)
@@ -811,17 +811,14 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
                SIZE_PTR alignment = 0;
 
                recvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
-
-               if (recvbuf->pskb) {
-                       recvbuf->pskb->dev = adapter->pnetdev;
-
-                       tmpaddr = (SIZE_PTR)recvbuf->pskb->data;
-                       alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
-                       skb_reserve(recvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
-               }
-
                if (!recvbuf->pskb)
                        return NULL;
+
+               recvbuf->pskb->dev = adapter->pnetdev;
+
+               tmpaddr = (SIZE_PTR)recvbuf->pskb->data;
+               alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
+               skb_reserve(recvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
        }
 
        /* 3 3. read data from rxfifo */
index 8f654a4..d0ce21c 100644 (file)
@@ -9,16 +9,7 @@
 
 /*  hal_ic_type_e */
 enum hal_ic_type_e { /* tag_HAL_IC_Type_Definition */
-       CHIP_8192S      =       0,
-       CHIP_8188C      =       1,
-       CHIP_8192C      =       2,
-       CHIP_8192D      =       3,
-       CHIP_8723A      =       4,
-       CHIP_8188E      =       5,
-       CHIP_8812       =       6,
-       CHIP_8821       =       7,
        CHIP_8723B      =       8,
-       CHIP_8192E      =       9,
 };
 
 /* hal_chip_type_e */
@@ -58,7 +49,6 @@ struct hal_version { /* tag_HAL_VERSION */
        u8                      ROMVer;
 };
 
-/* VERSION_8192C                       VersionID; */
 /* hal_version                 VersionID; */
 
 /*  Get element */
index 0ce08c2..0bbbdeb 100644 (file)
@@ -42,7 +42,6 @@
 #include <rtw_mlme.h>
 #include <mlme_osdep.h>
 #include <rtw_io.h>
-#include <rtw_ioctl.h>
 #include <rtw_ioctl_set.h>
 #include <osdep_intf.h>
 #include <rtw_eeprom.h>
index 8213dcf..d8d0375 100644 (file)
 #define REG_MULTI_FUNC_CTRL                    0x0068 /*  RTL8723 WIFI/BT/GPS Multi-Function control source. */
 #define REG_GSSR                                               0x006c
 #define REG_AFE_XTAL_CTRL_EXT                  0x0078 /* RTL8188E */
-#define REG_XCK_OUT_CTRL                               0x007c /* RTL8188E */
 #define REG_MCUFWDL                                    0x0080
-#define REG_WOL_EVENT                                  0x0081 /* RTL8188E */
 #define REG_MCUTSTCFG                                  0x0084
 #define REG_FDHM0                                              0x0088
-#define REG_HOST_SUSP_CNT                              0x00BC  /*  RTL8192C Host suspend counter on FPGA platform */
-#define REG_SYSTEM_ON_CTRL                     0x00CC  /*  For 8723AE Reset after S3 */
 #define REG_EFUSE_ACCESS                               0x00CF  /*  Efuse access protection for RTL8723 */
 #define REG_BIST_SCAN                                  0x00D0
 #define REG_BIST_RPT                                   0x00D4
 #define REG_FWIMR                                              0x0130
 #define REG_FWISR                                              0x0134
 #define REG_FTIMR                                              0x0138
-#define REG_FTISR                                              0x013C /* RTL8192C */
 #define REG_PKTBUF_DBG_CTRL                    0x0140
 #define REG_RXPKTBUF_CTRL                              (REG_PKTBUF_DBG_CTRL+2)
 #define REG_PKTBUF_DBG_DATA_L                  0x0144
 #define REG_MBIST_START                                0x0174
 #define REG_MBIST_DONE                                 0x0178
 #define REG_MBIST_FAIL                                 0x017C
-#define REG_32K_CTRL                                   0x0194 /* RTL8188E */
 #define REG_C2HEVT_MSG_NORMAL          0x01A0
 #define REG_C2HEVT_CLEAR                               0x01AF
 #define REG_MCUTST_1                                   0x01c0
-#define REG_MCUTST_WOWLAN                      0x01C7  /*  Defined after 8188E series. */
 #define REG_FMETHR                                             0x01C8
 #define REG_HMETFR                                             0x01CC
 #define REG_HMEBOX_0                                   0x01D0
 #define MAX_MSS_DENSITY_1T                     0x0A
 
 /*  */
-/*        8192C Cmd9346CR bits                                 (Offset 0xA, 16bit) */
-/*  */
-#define CmdEEPROM_En                           BIT5     /*  EEPROM enable when set 1 */
-#define CmdEERPOMSEL                           BIT4    /*  System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */
-#define Cmd9346CR_9356SEL                      BIT4
-
-/*  */
-/*        8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
-/*  */
-#define GPIOSEL_GPIO                           0
-#define GPIOSEL_ENBT                           BIT5
-
-/*  */
-/*        8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
-/*  */
-#define GPIO_IN                                        REG_GPIO_PIN_CTRL               /*  GPIO pins input value */
-#define GPIO_OUT                               (REG_GPIO_PIN_CTRL+1)   /*  GPIO pins output value */
-#define GPIO_IO_SEL                            (REG_GPIO_PIN_CTRL+2)   /*  GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
-#define GPIO_MOD                               (REG_GPIO_PIN_CTRL+3)
-
-/*  */
-/*        8811A GPIO PIN Control Register (offset 0x60, 4 byte) */
-/*  */
-#define GPIO_IN_8811A                  REG_GPIO_PIN_CTRL_2             /*  GPIO pins input value */
-#define GPIO_OUT_8811A                 (REG_GPIO_PIN_CTRL_2+1) /*  GPIO pins output value */
-#define GPIO_IO_SEL_8811A              (REG_GPIO_PIN_CTRL_2+2) /*  GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
-#define GPIO_MOD_8811A                 (REG_GPIO_PIN_CTRL_2+3)
-
-/*  */
-/*        8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
-/*  */
-#define HSIMR_GPIO12_0_INT_EN                  BIT0
-#define HSIMR_SPS_OCP_INT_EN                   BIT5
-#define HSIMR_RON_INT_EN                               BIT6
-#define HSIMR_PDN_INT_EN                               BIT7
-#define HSIMR_GPIO9_INT_EN                             BIT25
-
-/*  */
 /*        8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
 /*  */
 #define HSISR_GPIO12_0_INT                             BIT0
 #define HSISR_GPIO9_INT                                        BIT25
 
 /*  */
-/*        8192C (MSR) Media Status Register    (Offset 0x4C, 8 bits) */
-/*  */
-/*
-Network Type
-00: No link
-01: Link in ad hoc network
-10: Link in infrastructure network
-11: AP mode
-Default: 00b.
-*/
-#define MSR_NOLINK                             0x00
-#define MSR_ADHOC                              0x01
-#define MSR_INFRA                              0x02
-#define MSR_AP                                 0x03
-
-/*  */
 /*        USB INTR CONTENT */
 /*  */
 #define USB_C2H_CMDID_OFFSET                                   0
@@ -787,206 +726,6 @@ Default: 00b.
 #define IMR_WLANOFF                    BIT0
 
 /*  */
-/*  8723E series PCIE Host IMR/ISR bit */
-/*  */
-/*  IMR DW0 Bit 0-31 */
-#define PHIMR_TIMEOUT2                         BIT31
-#define PHIMR_TIMEOUT1                         BIT30
-#define PHIMR_PSTIMEOUT                        BIT29
-#define PHIMR_GTINT4                           BIT28
-#define PHIMR_GTINT3                           BIT27
-#define PHIMR_TXBCNERR                         BIT26
-#define PHIMR_TXBCNOK                          BIT25
-#define PHIMR_TSF_BIT32_TOGGLE BIT24
-#define PHIMR_BCNDMAINT3                       BIT23
-#define PHIMR_BCNDMAINT2                       BIT22
-#define PHIMR_BCNDMAINT1                       BIT21
-#define PHIMR_BCNDMAINT0                       BIT20
-#define PHIMR_BCNDOK3                          BIT19
-#define PHIMR_BCNDOK2                          BIT18
-#define PHIMR_BCNDOK1                          BIT17
-#define PHIMR_BCNDOK0                          BIT16
-#define PHIMR_HSISR_IND_ON                     BIT15
-#define PHIMR_BCNDMAINT_E                      BIT14
-#define PHIMR_ATIMEND_E                        BIT13
-#define PHIMR_ATIM_CTW_END             BIT12
-#define PHIMR_HISRE_IND                        BIT11   /*  RO. HISRE Indicator (HISRE & HIMRE is true, this bit is set to 1) */
-#define PHIMR_C2HCMD                           BIT10
-#define PHIMR_CPWM2                            BIT9
-#define PHIMR_CPWM                                     BIT8
-#define PHIMR_HIGHDOK                          BIT7            /*  High Queue DMA OK Interrupt */
-#define PHIMR_MGNTDOK                          BIT6            /*  Management Queue DMA OK Interrupt */
-#define PHIMR_BKDOK                                    BIT5            /*  AC_BK DMA OK Interrupt */
-#define PHIMR_BEDOK                                    BIT4            /*  AC_BE DMA OK Interrupt */
-#define PHIMR_VIDOK                                    BIT3            /*  AC_VI DMA OK Interrupt */
-#define PHIMR_VODOK                            BIT2            /*  AC_VO DMA Interrupt */
-#define PHIMR_RDU                                      BIT1            /*  Receive Descriptor Unavailable */
-#define PHIMR_ROK                                      BIT0            /*  Receive DMA OK Interrupt */
-
-/*  PCIE Host Interrupt Status Extension bit */
-#define PHIMR_BCNDMAINT7                       BIT23
-#define PHIMR_BCNDMAINT6                       BIT22
-#define PHIMR_BCNDMAINT5                       BIT21
-#define PHIMR_BCNDMAINT4                       BIT20
-#define PHIMR_BCNDOK7                          BIT19
-#define PHIMR_BCNDOK6                          BIT18
-#define PHIMR_BCNDOK5                          BIT17
-#define PHIMR_BCNDOK4                          BIT16
-/*  bit12 15: RSVD */
-#define PHIMR_TXERR                                    BIT11
-#define PHIMR_RXERR                                    BIT10
-#define PHIMR_TXFOVW                           BIT9
-#define PHIMR_RXFOVW                           BIT8
-/*  bit2-7: RSVD */
-#define PHIMR_OCPINT                           BIT1
-/*  bit0: RSVD */
-
-#define UHIMR_TIMEOUT2                         BIT31
-#define UHIMR_TIMEOUT1                         BIT30
-#define UHIMR_PSTIMEOUT                        BIT29
-#define UHIMR_GTINT4                           BIT28
-#define UHIMR_GTINT3                           BIT27
-#define UHIMR_TXBCNERR                         BIT26
-#define UHIMR_TXBCNOK                          BIT25
-#define UHIMR_TSF_BIT32_TOGGLE BIT24
-#define UHIMR_BCNDMAINT3                       BIT23
-#define UHIMR_BCNDMAINT2                       BIT22
-#define UHIMR_BCNDMAINT1                       BIT21
-#define UHIMR_BCNDMAINT0                       BIT20
-#define UHIMR_BCNDOK3                          BIT19
-#define UHIMR_BCNDOK2                          BIT18
-#define UHIMR_BCNDOK1                          BIT17
-#define UHIMR_BCNDOK0                          BIT16
-#define UHIMR_HSISR_IND                        BIT15
-#define UHIMR_BCNDMAINT_E                      BIT14
-/* RSVD        BIT13 */
-#define UHIMR_CTW_END                          BIT12
-/* RSVD        BIT11 */
-#define UHIMR_C2HCMD                           BIT10
-#define UHIMR_CPWM2                            BIT9
-#define UHIMR_CPWM                                     BIT8
-#define UHIMR_HIGHDOK                          BIT7            /*  High Queue DMA OK Interrupt */
-#define UHIMR_MGNTDOK                          BIT6            /*  Management Queue DMA OK Interrupt */
-#define UHIMR_BKDOK                            BIT5            /*  AC_BK DMA OK Interrupt */
-#define UHIMR_BEDOK                            BIT4            /*  AC_BE DMA OK Interrupt */
-#define UHIMR_VIDOK                                    BIT3            /*  AC_VI DMA OK Interrupt */
-#define UHIMR_VODOK                            BIT2            /*  AC_VO DMA Interrupt */
-#define UHIMR_RDU                                      BIT1            /*  Receive Descriptor Unavailable */
-#define UHIMR_ROK                                      BIT0            /*  Receive DMA OK Interrupt */
-
-/*  USB Host Interrupt Status Extension bit */
-#define UHIMR_BCNDMAINT7                       BIT23
-#define UHIMR_BCNDMAINT6                       BIT22
-#define UHIMR_BCNDMAINT5                       BIT21
-#define UHIMR_BCNDMAINT4                       BIT20
-#define UHIMR_BCNDOK7                          BIT19
-#define UHIMR_BCNDOK6                          BIT18
-#define UHIMR_BCNDOK5                          BIT17
-#define UHIMR_BCNDOK4                          BIT16
-/*  bit14-15: RSVD */
-#define UHIMR_ATIMEND_E                        BIT13
-#define UHIMR_ATIMEND                          BIT12
-#define UHIMR_TXERR                                    BIT11
-#define UHIMR_RXERR                                    BIT10
-#define UHIMR_TXFOVW                           BIT9
-#define UHIMR_RXFOVW                           BIT8
-/*  bit2-7: RSVD */
-#define UHIMR_OCPINT                           BIT1
-/*  bit0: RSVD */
-
-
-#define HAL_NIC_UNPLUG_ISR                     0xFFFFFFFF      /*  The value when the NIC is unplugged for PCI. */
-#define HAL_NIC_UNPLUG_PCI_ISR         0xEAEAEAEA      /*  The value when the NIC is unplugged for PCI in PCI interrupt (page 3). */
-
-/*  */
-/*        8188 IMR/ISR bits */
-/*  */
-#define IMR_DISABLED_88E                       0x0
-/*  IMR DW0(0x0060-0063) Bit 0-31 */
-#define IMR_TXCCK_88E                          BIT30           /*  TXRPT interrupt when CCX bit of the packet is set */
-#define IMR_PSTIMEOUT_88E                      BIT29           /*  Power Save Time Out Interrupt */
-#define IMR_GTINT4_88E                         BIT28           /*  When GTIMER4 expires, this bit is set to 1 */
-#define IMR_GTINT3_88E                         BIT27           /*  When GTIMER3 expires, this bit is set to 1 */
-#define IMR_TBDER_88E                          BIT26           /*  Transmit Beacon0 Error */
-#define IMR_TBDOK_88E                          BIT25           /*  Transmit Beacon0 OK */
-#define IMR_TSF_BIT32_TOGGLE_88E       BIT24           /*  TSF Timer BIT32 toggle indication interrupt */
-#define IMR_BCNDMAINT0_88E             BIT20           /*  Beacon DMA Interrupt 0 */
-#define IMR_BCNDERR0_88E                       BIT16           /*  Beacon Queue DMA Error 0 */
-#define IMR_HSISR_IND_ON_INT_88E       BIT15           /*  HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
-#define IMR_BCNDMAINT_E_88E            BIT14           /*  Beacon DMA Interrupt Extension for Win7 */
-#define IMR_ATIMEND_88E                        BIT12           /*  CTWidnow End or ATIM Window End */
-#define IMR_HISR1_IND_INT_88E          BIT11           /*  HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1) */
-#define IMR_C2HCMD_88E                         BIT10           /*  CPU to Host Command INT Status, Write 1 clear */
-#define IMR_CPWM2_88E                          BIT9                    /*  CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_CPWM_88E                           BIT8                    /*  CPU power Mode exchange INT Status, Write 1 clear */
-#define IMR_HIGHDOK_88E                        BIT7                    /*  High Queue DMA OK */
-#define IMR_MGNTDOK_88E                        BIT6                    /*  Management Queue DMA OK */
-#define IMR_BKDOK_88E                          BIT5                    /*  AC_BK DMA OK */
-#define IMR_BEDOK_88E                          BIT4                    /*  AC_BE DMA OK */
-#define IMR_VIDOK_88E                          BIT3                    /*  AC_VI DMA OK */
-#define IMR_VODOK_88E                          BIT2                    /*  AC_VO DMA OK */
-#define IMR_RDU_88E                                    BIT1                    /*  Rx Descriptor Unavailable */
-#define IMR_ROK_88E                                    BIT0                    /*  Receive DMA OK */
-
-/*  IMR DW1(0x00B4-00B7) Bit 0-31 */
-#define IMR_BCNDMAINT7_88E             BIT27           /*  Beacon DMA Interrupt 7 */
-#define IMR_BCNDMAINT6_88E             BIT26           /*  Beacon DMA Interrupt 6 */
-#define IMR_BCNDMAINT5_88E             BIT25           /*  Beacon DMA Interrupt 5 */
-#define IMR_BCNDMAINT4_88E             BIT24           /*  Beacon DMA Interrupt 4 */
-#define IMR_BCNDMAINT3_88E             BIT23           /*  Beacon DMA Interrupt 3 */
-#define IMR_BCNDMAINT2_88E             BIT22           /*  Beacon DMA Interrupt 2 */
-#define IMR_BCNDMAINT1_88E             BIT21           /*  Beacon DMA Interrupt 1 */
-#define IMR_BCNDOK7_88E                        BIT20           /*  Beacon Queue DMA OK Interrupt 7 */
-#define IMR_BCNDOK6_88E                        BIT19           /*  Beacon Queue DMA OK Interrupt 6 */
-#define IMR_BCNDOK5_88E                        BIT18           /*  Beacon Queue DMA OK Interrupt 5 */
-#define IMR_BCNDOK4_88E                        BIT17           /*  Beacon Queue DMA OK Interrupt 4 */
-#define IMR_BCNDOK3_88E                        BIT16           /*  Beacon Queue DMA OK Interrupt 3 */
-#define IMR_BCNDOK2_88E                        BIT15           /*  Beacon Queue DMA OK Interrupt 2 */
-#define IMR_BCNDOK1_88E                        BIT14           /*  Beacon Queue DMA OK Interrupt 1 */
-#define IMR_ATIMEND_E_88E                      BIT13           /*  ATIM Window End Extension for Win7 */
-#define IMR_TXERR_88E                          BIT11           /*  Tx Error Flag Interrupt Status, write 1 clear. */
-#define IMR_RXERR_88E                          BIT10           /*  Rx Error Flag INT Status, Write 1 clear */
-#define IMR_TXFOVW_88E                         BIT9                    /*  Transmit FIFO Overflow */
-#define IMR_RXFOVW_88E                         BIT8                    /*  Receive FIFO Overflow */
-
-/*===================================================================
-=====================================================================
-Here the register defines are for 92C. When the define is as same with 92C,
-we will use the 92C's define for the consistency
-So the following defines for 92C is not entire!!!!!!
-=====================================================================
-=====================================================================*/
-/*
-Based on Datasheet V33---090401
-Register Summary
-Current IOREG MAP
-0x0000h ~ 0x00FFh   System Configuration (256 Bytes)
-0x0100h ~ 0x01FFh   MACTOP General Configuration (256 Bytes)
-0x0200h ~ 0x027Fh   TXDMA Configuration (128 Bytes)
-0x0280h ~ 0x02FFh   RXDMA Configuration (128 Bytes)
-0x0300h ~ 0x03FFh   PCIE EMAC Reserved Region (256 Bytes)
-0x0400h ~ 0x04FFh   Protocol Configuration (256 Bytes)
-0x0500h ~ 0x05FFh   EDCA Configuration (256 Bytes)
-0x0600h ~ 0x07FFh   WMAC Configuration (512 Bytes)
-0x2000h ~ 0x3FFFh   8051 FW Download Region (8196 Bytes)
-*/
-       /*  */
-       /*               8192C (TXPAUSE) transmission pause     (Offset 0x522, 8 bits) */
-       /*  */
-/*  Note: */
-/*     The  bits of stopping AC(VO/VI/BE/BK) queue in datasheet RTL8192S/RTL8192C are wrong, */
-/*     the correct arrangement is VO - Bit0, VI - Bit1, BE - Bit2, and BK - Bit3. */
-/*     8723 and 88E may be not correct either in the earlier version. Confirmed with DD Tim. */
-/*  By Bruce, 2011-09-22. */
-#define StopBecon              BIT6
-#define StopHigh                       BIT5
-#define StopMgt                        BIT4
-#define StopBK                 BIT3
-#define StopBE                 BIT2
-#define StopVI                 BIT1
-#define StopVO                 BIT0
-
-/*  */
 /*        8192C (RCR) Receive Configuration Register   (Offset 0x608, 32 bits) */
 /*  */
 #define RCR_APPFCS                             BIT31   /*  WMAC append FCS after pauload */
@@ -1557,10 +1296,6 @@ Current IOREG MAP
 #define SDIO_HIMR_ATIMEND_E_MSK                BIT26
 #define SDIO_HIMR_CTWEND_MSK                   BIT27
 
-/* RTL8188E SDIO Specific */
-#define SDIO_HIMR_MCU_ERR_MSK                  BIT28
-#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK         BIT29
-
 /*  SDIO Host Interrupt Service Routine */
 #define SDIO_HISR_RX_REQUEST                   BIT0
 #define SDIO_HISR_AVAL                                 BIT1
@@ -1583,10 +1318,6 @@ Current IOREG MAP
 #define SDIO_HISR_ATIMEND_E                    BIT26
 #define SDIO_HISR_CTWEND                               BIT27
 
-/* RTL8188E SDIO Specific */
-#define SDIO_HISR_MCU_ERR                              BIT28
-#define SDIO_HISR_TSF_BIT32_TOGGLE     BIT29
-
 #define MASK_SDIO_HISR_CLEAR           (SDIO_HISR_TXERR |\
                                                                        SDIO_HISR_RXERR |\
                                                                        SDIO_HISR_TXFOVW |\
@@ -1651,39 +1382,13 @@ Current IOREG MAP
 #define GPS_HWPDN_SL                   BIT21   /*  GPS HW PDn polarity control */
 #define GPS_FUNC_EN                    BIT22   /*  GPS function enable */
 
-/* 3 REG_LIFECTRL_CTRL */
-#define HAL92C_EN_PKT_LIFE_TIME_BK             BIT3
-#define HAL92C_EN_PKT_LIFE_TIME_BE             BIT2
-#define HAL92C_EN_PKT_LIFE_TIME_VI             BIT1
-#define HAL92C_EN_PKT_LIFE_TIME_VO             BIT0
-
-#define HAL92C_MSDU_LIFE_TIME_UNIT             128     /*  in us, said by Tim. */
-
-/* 2 8192D PartNo. */
-#define PARTNO_92D_NIC                                                 (BIT7|BIT6)
-#define PARTNO_92D_NIC_REMARK                          (BIT5|BIT4)
-#define PARTNO_SINGLE_BAND_VS                          BIT3
-#define PARTNO_SINGLE_BAND_VS_REMARK           BIT1
-#define PARTNO_CONCURRENT_BAND_VC                      (BIT3|BIT2)
-#define PARTNO_CONCURRENT_BAND_VC_REMARK       (BIT1|BIT0)
-
 /*  */
 /*  General definitions */
 /*  */
 
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8188E              176
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8812                       255
 #define LAST_ENTRY_OF_TX_PKT_BUFFER_8723B              255
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_8192C              255
-#define LAST_ENTRY_OF_TX_PKT_BUFFER_DUAL_MAC   127
 
 #define POLLING_LLT_THRESHOLD                          20
 #define POLLING_READY_TIMEOUT_COUNT            1000
 
-
-/*  GPIO BIT */
-#define        HAL_8192C_HW_GPIO_WPS_BIT       BIT2
-#define        HAL_8192EU_HW_GPIO_WPS_BIT      BIT7
-#define        HAL_8188E_HW_GPIO_WPS_BIT       BIT7
-
 #endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_ioctl.h b/drivers/staging/rtl8723bs/include/rtw_ioctl.h
deleted file mode 100644 (file)
index 7179591..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef _RTW_IOCTL_H_
-#define _RTW_IOCTL_H_
-
-/*     00 - Success */
-/*     11 - Error */
-#define STATUS_SUCCESS                         (0x00000000L)
-#define STATUS_PENDING                         (0x00000103L)
-
-#define STATUS_UNSUCCESSFUL                    (0xC0000001L)
-#define STATUS_INSUFFICIENT_RESOURCES          (0xC000009AL)
-#define STATUS_NOT_SUPPORTED                   (0xC00000BBL)
-
-#define NDIS_STATUS_SUCCESS                    ((uint)STATUS_SUCCESS)
-#define NDIS_STATUS_PENDING                    ((uint)STATUS_PENDING)
-#define NDIS_STATUS_NOT_RECOGNIZED             ((uint)0x00010001L)
-#define NDIS_STATUS_NOT_COPIED                 ((uint)0x00010002L)
-#define NDIS_STATUS_NOT_ACCEPTED               ((uint)0x00010003L)
-#define NDIS_STATUS_CALL_ACTIVE                        ((uint)0x00010007L)
-
-#define NDIS_STATUS_FAILURE                    ((uint)STATUS_UNSUCCESSFUL)
-#define NDIS_STATUS_RESOURCES                  ((uint)STATUS_INSUFFICIENT_RESOURCES)
-#define NDIS_STATUS_CLOSING                    ((uint)0xC0010002L)
-#define NDIS_STATUS_BAD_VERSION                        ((uint)0xC0010004L)
-#define NDIS_STATUS_BAD_CHARACTERISTICS                ((uint)0xC0010005L)
-#define NDIS_STATUS_ADAPTER_NOT_FOUND          ((uint)0xC0010006L)
-#define NDIS_STATUS_OPEN_FAILED                        ((uint)0xC0010007L)
-#define NDIS_STATUS_DEVICE_FAILED              ((uint)0xC0010008L)
-#define NDIS_STATUS_MULTICAST_FULL             ((uint)0xC0010009L)
-#define NDIS_STATUS_MULTICAST_EXISTS           ((uint)0xC001000AL)
-#define NDIS_STATUS_MULTICAST_NOT_FOUND                ((uint)0xC001000BL)
-#define NDIS_STATUS_REQUEST_ABORTED            ((uint)0xC001000CL)
-#define NDIS_STATUS_RESET_IN_PROGRESS          ((uint)0xC001000DL)
-#define NDIS_STATUS_CLOSING_INDICATING         ((uint)0xC001000EL)
-#define NDIS_STATUS_NOT_SUPPORTED              ((uint)STATUS_NOT_SUPPORTED)
-#define NDIS_STATUS_INVALID_PACKET             ((uint)0xC001000FL)
-#define NDIS_STATUS_OPEN_LIST_FULL             ((uint)0xC0010010L)
-#define NDIS_STATUS_ADAPTER_NOT_READY          ((uint)0xC0010011L)
-#define NDIS_STATUS_ADAPTER_NOT_OPEN           ((uint)0xC0010012L)
-#define NDIS_STATUS_NOT_INDICATING             ((uint)0xC0010013L)
-#define NDIS_STATUS_INVALID_LENGTH             ((uint)0xC0010014L)
-#define NDIS_STATUS_INVALID_DATA               ((uint)0xC0010015L)
-#define NDIS_STATUS_BUFFER_TOO_SHORT           ((uint)0xC0010016L)
-#define NDIS_STATUS_INVALID_OID                        ((uint)0xC0010017L)
-#define NDIS_STATUS_ADAPTER_REMOVED            ((uint)0xC0010018L)
-#define NDIS_STATUS_UNSUPPORTED_MEDIA          ((uint)0xC0010019L)
-#define NDIS_STATUS_GROUP_ADDRESS_IN_USE       ((uint)0xC001001AL)
-#define NDIS_STATUS_FILE_NOT_FOUND             ((uint)0xC001001BL)
-#define NDIS_STATUS_ERROR_READING_FILE         ((uint)0xC001001CL)
-#define NDIS_STATUS_ALREADY_MAPPED             ((uint)0xC001001DL)
-#define NDIS_STATUS_RESOURCE_CONFLICT          ((uint)0xC001001EL)
-#define NDIS_STATUS_NO_CABLE                   ((uint)0xC001001FL)
-
-#define NDIS_STATUS_INVALID_SAP                        ((uint)0xC0010020L)
-#define NDIS_STATUS_SAP_IN_USE                 ((uint)0xC0010021L)
-#define NDIS_STATUS_INVALID_ADDRESS            ((uint)0xC0010022L)
-#define NDIS_STATUS_VC_NOT_ACTIVATED           ((uint)0xC0010023L)
-#define NDIS_STATUS_DEST_OUT_OF_ORDER          ((uint)0xC0010024L)  /*  cause 27 */
-#define NDIS_STATUS_VC_NOT_AVAILABLE           ((uint)0xC0010025L)  /*  cause 35, 45 */
-#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE     ((uint)0xC0010026L)  /*  cause 37 */
-#define NDIS_STATUS_INCOMPATABLE_QOS           ((uint)0xC0010027L)  /*  cause 49 */
-#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED     ((uint)0xC0010028L)  /*  cause 93 */
-#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION    ((uint)0xC0010029L)  /*  cause 3 */
-
-extern struct iw_handler_def  rtw_handlers_def;
-
-#endif /*  #ifndef __INC_CEINFO_ */
index 757efeb..380d8c9 100644 (file)
@@ -389,7 +389,7 @@ static int rtw_ndev_notifier_call(struct notifier_block *nb, unsigned long state
        if (dev->netdev_ops->ndo_do_ioctl != rtw_ioctl)
                return NOTIFY_DONE;
 
-       netdev_info(dev, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev),
+       netdev_dbg(dev, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev),
                    state);
 
        return NOTIFY_DONE;
index 805dc18..d5ad49d 100644 (file)
@@ -55,9 +55,9 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
                *offset += cnt;
 
        /*
-        * Using scatter-gather.  We have to go through the list one entry
-        * at a time.  Each s-g entry contains some number of pages, and
-        * each page has to be kmap()'ed separately.
+        * Using scatter-gather. We have to go through the list one entry
+        * at a time. Each s-g entry contains some number of pages which
+        * have to be copied one at a time.
         */
        } else {
                struct scatterlist *sg =
@@ -92,13 +92,11 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
                        while (sglen > 0) {
                                unsigned int plen = min(sglen, (unsigned int)
                                                PAGE_SIZE - poff);
-                               unsigned char *ptr = kmap(page);
 
                                if (dir == TO_XFER_BUF)
-                                       memcpy(ptr + poff, buffer + cnt, plen);
+                                       memcpy_to_page(page, poff, buffer + cnt, plen);
                                else
-                                       memcpy(buffer + cnt, ptr + poff, plen);
-                               kunmap(page);
+                                       memcpy_from_page(buffer + cnt, page, poff, plen);
 
                                /* Start at the beginning of the next page */
                                poff = 0;
index a7c6eb0..55cb00e 100644 (file)
@@ -81,6 +81,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
        sm750_dev->pvMem =
                ioremap_wc(sm750_dev->vidmem_start, sm750_dev->vidmem_size);
        if (!sm750_dev->pvMem) {
+               iounmap(sm750_dev->pvReg);
                pr_err("Map video memory failed\n");
                ret = -EFAULT;
                goto exit;
diff --git a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
deleted file mode 100644 (file)
index c2359de..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-This file describes sysfs entries beneath /devices/platform/visorchipset.
-
-What:          install/error
-Date:          7/18/2014
-KernelVersion:         TBD
-Contact:       sparmaintainer@unisys.com
-Description:   used to send the ID of a string that should be displayed on
-               s-Par's automatic installation progress screen when an error
-               is encountered during installation. This field has no effect
-               if not in installation mode.
-Users:         sparmaintainer@unisys.com
-
-What:          install/remainingsteps
-Date:          7/18/2014
-KernelVersion:         TBD
-Contact:       sparmaintainer@unisys.com
-Description:   used to set the value of the progress bar on the s-Par automatic
-               installation progress screen. This field has no effect if not in
-               installation mode.
-Users:         sparmaintainer@unisys.com
-
-What:          install/textid
-Date:          7/18/2014
-KernelVersion:         TBD
-Contact:       sparmaintainer@unisys.com
-Description:   used to send the ID of a string that should be displayed on
-               s-Par's automatic installation progress screen. Setting this
-               field when not in installation mode (boottotool was set on
-               the previous guest boot) has no effect.
-Users:         sparmaintainer@unisys.com
-
-What:          install/boottotool
-Date:          7/18/2014
-KernelVersion:         TBD
-Contact:       sparmaintainer@unisys.com
-Description:   The boottotool flag controls s-Par behavior on the next boot of
-               this guest. Setting the flag will cause the guest to boot from
-               the utility and installation image, which will use the value in
-               the toolaction field to determine what operation is being
-               requested.
-Users:         sparmaintainer@unisys.com
-
-What:          install/toolaction
-Date:          7/18/2014
-KernelVersion:         TBD
-Contact:       sparmaintainer@unisys.com
-Description:   This field is used to tell s-Par which type of recovery tool
-               action to perform on the next guest boot-up. The meaning of the
-               value is dependent on the type of installation software used to
-               commission the guest.
-Users:         sparmaintainer@unisys.com
-
-What:          parahotplug/deviceenabled
-Date:          7/18/2014
-KernelVersion:         TBD
-Contact:       sparmaintainer@unisys.com
-Description:   This entry is used by a Unisys support script installed on the
-               guest, and triggered by a udev event. The support script is
-               responsible for enabling and disabling SR-IOV devices when the
-               PF device is being recovered in another guest.
-
-               Some SR-IOV devices have problems when the PF is reset without
-               first disabling all VFs attached to that PF. s-Par handles this
-               situation by sending a message to guests using these VFs, and
-               the script will disable the device. When the PF is recovered,
-               another message is sent to the guests to re-enable the VFs.
-
-               The parahotplug/deviceenabled interface is used to acknowledge
-               the recovery message.
-Users:         sparmaintainer@unisys.com
-
-What:          parahotplug/devicedisabled
-Date:          7/18/2014
-KernelVersion:         TBD
-Contact:       sparmaintainer@unisys.com
-Description:   This entry is used by a Unisys support script installed on the
-               guest, and triggered by a udev event. The support script is
-               responsible for enabling and disabling SR-IOV devices when the
-               PF device is being recovered in another guest.
-
-               Some SR-IOV devices have problems when the PF is reset without
-               first disabling all VFs attached to that PF. s-Par handles this
-               situation by sending a message to guests using these VFs, and
-               the script will disable the device. When the PF is recovered,
-               another message is sent to the guests to re-enable the VFs.
-
-               The parahotplug/devicedisaabled interface is used to acknowledge
-               the initial recovery message.
-Users:         sparmaintainer@unisys.com
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
deleted file mode 100644 (file)
index cf29f88..0000000
+++ /dev/null
@@ -1,337 +0,0 @@
-1. Overview
------------
-
-This document describes the driver set for Unisys Secure Partitioning
-(s-Par(R)).
-
-s-Par is firmware that provides hardware partitioning capabilities for
-splitting large-scale Intel x86 servers into multiple isolated
-partitions. s-Par provides a set of para-virtualized device drivers to
-allow guest partitions on the same server to share devices that would
-normally be unsharable, specifically:
-
-* visornic - network interface
-* visorhba - scsi disk adapter
-* visorinput - keyboard and mouse
-
-These drivers conform to the standard Linux bus/device model described
-within Documentation/driver-api/driver-model/, and utilize a driver named
-visorbus to present the virtual busses involved. Drivers in the 'visor*'
-driver set are commonly referred to as "guest drivers" or "client drivers".
-All drivers except visorbus expose a device of a specific usable class to the
-Linux guest environment (e.g., block, network, or input), and are collectively
-referred to as "function drivers".
-
-The back-end for each device is owned and managed by a small,
-single-purpose service partition in the s-Par firmware, which communicates
-with each guest partition sharing that device through an area of shared memory
-called a "channel". In s-Par nomenclature, the back-end is often referred to
-as the "service partition", "IO partition" (for virtual network and scsi disk
-devices), or "console partition" (for virtual keyboard and mouse devices).
-
-Each virtual device requires exactly 1 dedicated channel, which the guest
-driver and back-end use to communicate.  The hypervisor need not intervene
-(other than normal interrupt handling) in the interactions that occur across
-this channel.
-
-NOT covered in this document:
-
-* s-Par also supports sharing physical PCI adapters via SR-IOV, but
-  because this requires no specific support in the guest partitions, it will
-  not be discussed in this document.  Shared SR-IOV devices should be used
-  wherever possible for highest performance.
-
-* Because the s-Par back-end provides a standard EFI framebuffer to each
-  guest, the already-existing efifb Linux driver is used to provide guest
-  video access. Thus, the only s-Par-unique support that is necessary to
-  provide a guest graphics console are for keyboard and mouse (via visorinput).
-
-
-2. Driver Descriptions
-----------------------
-
-2.1. visorbus
--------------
-
-2.1.1. Overview
----------------
-
-The visorbus driver handles the virtual busses on which all of the virtual
-devices reside. It provides a registration function named
-visorbus_register_visor_driver() that is called by each of the function
-drivers at initialization time, which the function driver uses to tell
-visorbus about the device classes (via specifying a list of device type
-GUIDs) it wants to handle. For use by function drivers, visorbus provides
-implementation for struct visor_driver and struct visor_device, as well
-as utility functions for communicating with the back-end.
-
-visorbus is associated with ACPI id "PNP0A07" in modules.alias, so if built
-as a module it will typically be loaded automatically via standard udev or
-systemd (God help us) configurations.
-
-visorbus can similarly force auto-loading of function drivers for virtual
-devices it discovers, as it includes a MODALIAS environment variable of this
-form in the hotplug uevent environment when each virtual device is
-discovered:
-
-    visorbus:<device type GUID>
-
-visorbus notifies each function driver when a device of its registered class
-arrives and departs, by calling the function driver's probe() and remove()
-methods.
-
-The actual struct device objects that correspond to each virtual bus and
-each virtual device are created and owned by visorbus.  These device objects
-are created in response to messages from the s-Par back-end received on a
-special control channel called the "controlvm channel" (each guest partition
-has access to exactly 1 controlvm channel), and have a lifetime that is
-independent of the function drivers that control them.
-
-2.1.2. "struct visor device" Function Driver Interfaces
--------------------------------------------------------
-
-The interface between visorbus and its function drivers is defined in
-visorbus.h, and described below.
-
-When a visor function driver loads, it calls visorbus_register_visor_driver()
-to register itself with visorbus. The significant information passed in this
-exchange is as follows:
-
-* the GUID(s) of the channel type(s) that are handled by this driver, as
-  well as a "friendly name" identifying each (this will be published under
-  /sys/devices/visorbus<x>/dev<y>)
-
-* the addresses of callback functions to be called whenever a virtual
-  device/channel with the appropriate channel-type GUID(s) appears or
-  disappears
-
-* the address of a "channel_interrupt" function, which will be automatically
-  called at specific intervals to enable the driver to poll the device
-  channel for activity
-
-The following functions implemented within each function driver will be
-called automatically by the visorbus driver at appropriate times:
-
-* The probe() function notifies about the creation of each new virtual
-  device/channel instance.
-
-* The remove() function notifies about the destruction of a virtual
-  device/channel instance.
-
-* The channel_interrupt() function is called at frequent intervals to
-  give the function driver an opportunity to poll the virtual device channel
-  for requests.  Information is passed to this function to enable the
-  function driver to use the visorchannel_signalinsert() and
-  visorchannel_signalremove() functions to respond to and initiate activity
-  over the channel.  (Note that since it is the visorbus driver that
-  determines when this is called, it is very easy to switch to
-  interrupt-driven mechanisms when available for particular virtual device
-  types.)
-
-* The pause() function is called should it ever be necessary to direct the
-  function driver to temporarily stop accessing the device channel.  An
-  example of when this is needed is when the service partition implementing
-  the back-end of the virtual device needs to be recovered.  After a
-  successful return of pause(), the function driver must not access the
-  device channel until a subsequent resume() occurs.
-
-* The resume() function is the "book-end" to pause(), and is described above.
-
-2.1.3. sysfs Advertised Information
------------------------------------
-
-Because visorbus is a standard Linux bus driver in the model described in
-Documentation/driver-api/driver-model/, the hierarchy of s-Par virtual devices is
-published in the sysfs tree beneath /bus/visorbus/, e.g.,
-/sys/bus/visorbus/devices/ might look like:
-
-    vbus1:dev1 -> ../../../devices/visorbus1/vbus1:dev1
-    vbus1:dev2 -> ../../../devices/visorbus1/vbus1:dev2
-    vbus1:dev3 -> ../../../devices/visorbus1/vbus1:dev3
-    vbus2:dev0 -> ../../../devices/visorbus2/vbus2:dev0
-    vbus2:dev1 -> ../../../devices/visorbus2/vbus2:dev1
-    vbus2:dev2 -> ../../../devices/visorbus2/vbus2:dev2
-    visorbus1 -> ../../../devices/visorbus1
-    visorbus2 -> ../../../devices/visorbus2
-
-visor_device notes:
-
-* Each visorbus<n> entry denotes the existence of a struct visor_device
-  denoting virtual bus #<n>.  A unique s-Par channel exists for each such
-  virtual bus.
-
-* Virtual bus numbers uniquely identify s-Par back-end service partitions.
-  In this example, bus 1 corresponds to the s-Par console partition
-  (controls keyboard, video, and mouse), whereas bus 2 corresponds to the
-  s-Par IO partition (controls network and disk).
-
-* Each vbus<x>:dev<y> entry denotes the existence of a struct visor_device
-  denoting virtual device #<y> outboard of virtual bus #<x>.  A unique s-Par
-  channel exists for each such virtual device.
-
-* If a function driver has loaded and claimed a particular device, the
-  bus/visorbus/devices/vbus<x>:dev<y>/driver symlink will indicate that
-  function driver.
-
-Every active visorbus device will have a sysfs subtree under:
-
-    /sys/devices/visorbus<x>/vbus<x>:dev<y>/
-
-The following files exist under /sys/devices/visorbus<x>/vbus<x>:dev<y>:
-
-    subsystem                 link to sysfs tree that describes the
-                              visorbus bus type; e.g.:
-                                  ../../../bus/visorbus
-
-    driver                    link to sysfs tree that describes the
-                              function driver controlling this device;
-                              e.g.:
-                                  ../../../bus/visorbus/drivers/visorhba
-                              Note that this "driver" link will not exist
-                              if the appropriate function driver has not
-                              been loaded yet.
-
-    channel                   properties of the device channel (all in
-                              ascii text format)
-
-        clientpartition       handle identifying the guest (client) side
-                              of this channel, e.g. 0x10000000.
-
-        nbytes                total size of this channel in bytes
-
-        physaddr              the guest physical address for the base of
-                              the channel
-
-        typeguid              a GUID identifying the channel type, in
-                              xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx notation
-
-        typename              a "friendly name" for this channel type, e.g.,
-                              "keyboard".  Note that this name is provided by
-                              a particular function driver, so "typename"
-                              will return an empty string until AFTER the
-                              appropriate function driver controlling this
-                              channel type is loaded
-
-        zoneguid              a GUID identifying the channel zone, in
-                              xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx notation
-
-
-2.2. visorhba
--------------
-
-The visorhba driver registers with visorbus as the function driver to
-handle virtual scsi disk devices, specified using the
-VISOR_VHBA_CHANNEL_GUID type in the visorbus_register_visor_driver()
-call. visorhba uses scsi_add_host() to expose a Linux block device
-(e.g., /sys/block/) in the guest environment for each s-Par virtual device.
-
-visorhba provides access to a shared SCSI host bus adapter and one or more
-disk devices, by proxying SCSI commands between the guest and the service
-partition that owns the shared SCSI adapter, using a channel between the
-guest and the service partition. The disks that appear on the shared bus
-are defined by the s-Par configuration and enforced by the service partition,
-while the guest driver handles sending commands and handling responses. Each
-disk is shared as a whole to a guest. Sharing the bus adapter in this way
-provides resiliency; should the device encounter an error, only the service
-partition is rebooted, and the device is reinitialized. This allows
-guests to continue running and to recover from the error.
-
-When compiled as a module, visorhba can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
-    "visorbus:"+VISOR_VHBA_CHANNEL_GUID_STR
-
-i.e.:
-
-    alias visorbus:414815ed-c58c-11da-95a9-00e08161165f visorhba
-
-
-2.3. visornic
--------------
-
-The visornic driver registers with visorbus as the function driver to
-handle virtual network devices, specified using the
-VISOR_VNIC_CHANNEL_GUID type in the visorbus_register_visor_driver()
-call. visornic uses register_netdev() to expose a Linux device of class net
-(e.g., /sys/class/net/) in the guest environment for each s-Par virtual
-device.
-
-visornic provides a paravirtualized network interface to a
-guest by proxying buffer information between the guest and the service
-partition that owns the shared network interface, using a channel
-between the guest and the service partition. The connectivity of this
-interface with the shared interface and possibly other guest
-partitions is defined by the s-Par configuration and enforced by the
-service partition; the guest driver handles communication and link
-status.
-
-When compiled as a module, visornic can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
-    "visorbus:"+VISOR_VNIC_CHANNEL_GUID_STR
-
-i.e.:
-
-    alias visorbus:8cd5994d-c58e-11da-95a9-00e08161165f visornic
-
-
-2.4. visorinput
----------------
-
-The visorinput driver registers with visorbus as the function driver to
-handle human input devices, specified using the
-VISOR_KEYBOARD_CHANNEL_GUID and VISOR_MOUSE_CHANNEL_GUID
-types in the visorbus_register_visor_driver() call. visorinput uses
-input_register_device() to expose devices of class input
-(e.g., /sys/class/input/) for virtual keyboard and virtual mouse devices.
-A s-Par virtual keyboard device maps 1-to-1 with a Linux input device
-named "visor Keyboard", while a s-Par virtual mouse device has 2 Linux input
-devices created for it: 1 named "visor Wheel", and 1 named "visor Mouse".
-
-By registering as input class devices, modern versions of X will
-automatically find and properly use s-Par virtual keyboard and mouse devices.
-As the s-Par back-end reports keyboard and mouse activity via events on the
-virtual device channel, the visorinput driver delivers the activity to the
-Linux environment by calling input_report_key() and input_report_abs().
-
-You can interact with the guest console using the usyscon Partition Desktop
-(a.k.a., "pd") application, provided as part of s-Par.  After installing the
-usyscon Partition Desktop into a Linux environment via the
-usyscon_partitiondesktop-*.rpm, or into a Windows environment via
-PartitionDesktop.msi, you will be able to launch a console for your guest
-Linux environment by clicking the console icon in the s-Par web UI.
-
-When compiled as a module, visorinput can be autoloaded by visorbus in
-standard udev/systemd environments, as it includes the modules.alias
-definition:
-
-    "visorbus:"+VISOR_MOUSE_CHANNEL_GUID_STR
-    "visorbus:"+VISOR_KEYBOARD_CHANNEL_GUID_STR
-
-i.e.:
-
-    alias visorbus:c73416d0-b0b8-44af-b304-9d2ae99f1b3d visorinput
-    alias visorbus:addf07d4-94a9-46e2-81c3-61abcdbdbd87 visorinput
-
-
-3. Minimum Required Driver Set
-------------------------------
-
-visorbus is required for every Linux guest running under s-Par.
-
-visorhba is typically required for a Linux guest running under s-Par, as it
-is required if your guest boot disk is a virtual device provided by the s-Par
-back-end, which is the default configuration.  However, for advanced
-configurations where the Linux guest boots via an SR-IOV-provided HBA or
-SAN disk for example, visorhba is not technically required.
-
-visornic is typically required for a Linux guest running under s-Par, as it
-is required if your guest network interface is a virtual device provided by
-the s-Par back-end, which is the default configuration.  However, for
-configurations where the Linux guest is provided with an SR-IOV NIC
-for example, visornic is not technically required.
-
-visorinput is only required for a Linux guest running under s-Par if you
-require graphics-mode access to your guest console.
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
deleted file mode 100644 (file)
index 43fe1ce..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys SPAR driver configuration
-#
-menuconfig UNISYSSPAR
-       bool "Unisys SPAR driver support"
-       help
-         Support for the Unisys SPAR drivers
-
-if UNISYSSPAR
-
-source "drivers/staging/unisys/visornic/Kconfig"
-source "drivers/staging/unisys/visorinput/Kconfig"
-source "drivers/staging/unisys/visorhba/Kconfig"
-
-endif # UNISYSSPAR
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
deleted file mode 100644 (file)
index aaddc61..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-Unisys s-Par drivers
-M:     David Kershner <sparmaintainer@unisys.com>
-S:     Maintained
-F:     drivers/staging/unisys/Documentation/overview.txt
-F:     drivers/staging/unisys/
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
deleted file mode 100644 (file)
index c0f76cc..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys SPAR drivers
-#
-obj-$(CONFIG_UNISYS_VISORNIC)          += visornic/
-obj-$(CONFIG_UNISYS_VISORINPUT)                += visorinput/
-obj-$(CONFIG_UNISYS_VISORHBA)          += visorhba/
diff --git a/drivers/staging/unisys/TODO b/drivers/staging/unisys/TODO
deleted file mode 100644 (file)
index d863f26..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-TODO:
-       - enhance visornic to use channel_interrupt() hook instead of a
-         kernel thread
-       - enhance visorhba to use channel_interrupt() hook instead of a
-         kernel thread
-       - teach visorbus to handle virtual interrupts triggered by s-Par
-         back-end, and call function driver's channel_interrupt() function
-         when they occur
-       - enhance debugfs interfaces (e.g., per device, etc.)
-       - upgrade/remove deprecated workqueue operations
-       - move individual drivers into proper driver subsystems
-
-Patches to:
-       Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-       Ken Cox <jkc@redhat.com>
-       Unisys s-Par maintainer mailing list <sparmaintainer@unisys.com>
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
deleted file mode 100644 (file)
index 9ef812c..0000000
+++ /dev/null
@@ -1,571 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2010 - 2016 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __IOCHANNEL_H__
-#define __IOCHANNEL_H__
-
-/*
- * Everything needed for IOPart-GuestPart communication is define in
- * this file. Note: Everything is OS-independent because this file is
- * used by Windows, Linux and possible EFI drivers.
- *
- * Communication flow between the IOPart and GuestPart uses the channel headers
- * channel state. The following states are currently being used:
- *       UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
- *
- * Additional states will be used later. No locking is needed to switch between
- * states due to the following rules:
- *
- *      1.  IOPart is only the only partition allowed to change from UNIT
- *      2.  IOPart is only the only partition allowed to change from
- *             CHANNEL_ATTACHING
- *      3.  GuestPart is only the only partition allowed to change from
- *             CHANNEL_ATTACHED
- *
- * The state changes are the following: IOPart sees the channel is in UNINIT,
- *        UNINIT -> CHANNEL_ATTACHING (performed only by IOPart)
- *        CHANNEL_ATTACHING -> CHANNEL_ATTACHED (performed only by IOPart)
- *        CHANNEL_ATTACHED -> CHANNEL_OPENED (performed only by GuestPart)
- */
-
-#include <linux/uuid.h>
-#include <linux/skbuff.h>
-#include <linux/visorbus.h>
-
-/*
- * Must increment these whenever you insert or delete fields within this channel
- * struct. Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct without needing to
- * increment this.
- */
-#define VISOR_VHBA_CHANNEL_VERSIONID 2
-#define VISOR_VNIC_CHANNEL_VERSIONID 2
-
-/*
- * Everything necessary to handle SCSI & NIC traffic between Guest Partition and
- * IO Partition is defined below.
- */
-
-/*
- * Define the two queues per data channel between iopart and ioguestparts.
- *     IOCHAN_TO_IOPART -- used by guest to 'insert' signals to iopart.
- *     IOCHAN_FROM_IOPART -- used by guest to 'remove' signals from IO part.
- */
-#define IOCHAN_TO_IOPART 0
-#define IOCHAN_FROM_IOPART 1
-
-/* Size of cdb - i.e., SCSI cmnd */
-#define MAX_CMND_SIZE 16
-
-/* Unisys-specific DMA direction values */
-enum uis_dma_data_direction {
-       UIS_DMA_BIDIRECTIONAL = 0,
-       UIS_DMA_TO_DEVICE = 1,
-       UIS_DMA_FROM_DEVICE = 2,
-       UIS_DMA_NONE = 3
-};
-
-#define MAX_SENSE_SIZE 64
-#define MAX_PHYS_INFO 64
-
-/*
- * enum net_types - Various types of network packets that can be sent in cmdrsp.
- * @NET_RCV_POST:      Submit buffer to hold receiving incoming packet.
- * @NET_RCV:           visornic -> uisnic. Incoming packet received.
- * @NET_XMIT:          uisnic -> visornic. For outgoing packet.
- * @NET_XMIT_DONE:     visornic -> uisnic. Outgoing packet xmitted.
- * @NET_RCV_ENBDIS:    uisnic -> visornic. Enable/Disable packet reception.
- * @NET_RCV_ENBDIS_ACK:        visornic -> uisnic. Acknowledge enable/disable packet.
- * @NET_RCV_PROMISC:   uisnic -> visornic. Enable/Disable promiscuous mode.
- * @NET_CONNECT_STATUS:        visornic -> uisnic. Indicate the loss or restoration of
- *                     a network connection.
- * @NET_MACADDR:       uisnic -> visornic. Indicates the client has requested
- *                     to update it's MAC address.
- * @NET_MACADDR_ACK:   MAC address acknowledge.
- */
-enum net_types {
-       NET_RCV_POST = 0,
-       NET_RCV,
-       NET_XMIT,
-       NET_XMIT_DONE,
-       NET_RCV_ENBDIS,
-       NET_RCV_ENBDIS_ACK,
-       /* Reception */
-       NET_RCV_PROMISC,
-       NET_CONNECT_STATUS,
-       NET_MACADDR,
-       NET_MACADDR_ACK,
-};
-
-/* Minimum eth data size */
-#define ETH_MIN_DATA_SIZE 46
-#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
-
-/* Maximum data size */
-#define VISOR_ETH_MAX_MTU 16384
-
-#ifndef MAX_MACADDR_LEN
-/* Number of bytes in MAC address */
-#define MAX_MACADDR_LEN 6
-#endif
-
-/* Various types of scsi task mgmt commands. */
-enum task_mgmt_types {
-       TASK_MGMT_ABORT_TASK = 1,
-       TASK_MGMT_BUS_RESET,
-       TASK_MGMT_LUN_RESET,
-       TASK_MGMT_TARGET_RESET,
-};
-
-/* Various types of vdisk mgmt commands. */
-enum vdisk_mgmt_types {
-       VDISK_MGMT_ACQUIRE = 1,
-       VDISK_MGMT_RELEASE,
-};
-
-struct phys_info {
-       u64 pi_pfn;
-       u16 pi_off;
-       u16 pi_len;
-} __packed;
-
-#define MIN_NUMSIGNALS 64
-
-/* Structs with pragma pack. */
-
-struct guest_phys_info {
-       u64 address;
-       u64 length;
-} __packed;
-
-/*
- * struct uisscsi_dest
- * @channel: Bus number.
- * @id:      Target number.
- * @lun:     Logical unit number.
- */
-struct uisscsi_dest {
-       u32 channel;
-       u32 id;
-       u32 lun;
-} __packed;
-
-struct vhba_wwnn {
-       u32 wwnn1;
-       u32 wwnn2;
-} __packed;
-
-/*
- * struct vhba_config_max
- * @max_channel: Maximum channel for devices attached to this bus.
- * @max_id:     Maximum SCSI ID for devices attached to bus.
- * @max_lun:    Maximum SCSI LUN for devices attached to bus.
- * @cmd_per_lun: Maximum number of outstanding commands per LUN.
- * @max_io_size: Maximum io size for devices attached to this bus. Max io size
- *              is often determined by the resource of the hba.
- *              e.g Max scatter gather list length * page size / sector size.
- *
- * WARNING: Values stored in this structure must contain maximum counts (not
- * maximum values).
- *
- * 20 bytes
- */
-struct vhba_config_max {
-       u32 max_channel;
-       u32 max_id;
-       u32 max_lun;
-       u32 cmd_per_lun;
-       u32 max_io_size;
-} __packed;
-
-/*
- * struct uiscmdrsp_scsi
- *
- * @handle:            The handle to the cmd that was received. Send it back as
- *                     is in the rsp packet.
- * @cmnd:              The cdb for the command.
- * @bufflen:           Length of data to be transferred out or in.
- * @guest_phys_entries:        Number of entries in scatter-gather list.
- * @struct gpi_list:   Physical address information for each fragment.
- * @data_dir:          Direction of the data, if any.
- * @struct vdest:      Identifies the virtual hba, id, channel, lun to which
- *                     cmd was sent.
- * @linuxstat:         Original Linux status used by Linux vdisk.
- * @scsistat:          The scsi status.
- * @addlstat:          Non-scsi status.
- * @sensebuf:          Sense info in case cmd failed. sensebuf holds the
- *                     sense_data struct. See sense_data struct for more
- *                     details.
- * @*vdisk:            Pointer to the vdisk to clean up when IO completes.
- * @no_disk_result:    Used to return no disk inquiry result when
- *                     no_disk_result is set to 1
- *                     scsi.scsistat is SAM_STAT_GOOD
- *                     scsi.addlstat is 0
- *                     scsi.linuxstat is SAM_STAT_GOOD
- *                     That is, there is NO error.
- */
-struct uiscmdrsp_scsi {
-       u64 handle;
-       u8 cmnd[MAX_CMND_SIZE];
-       u32 bufflen;
-       u16 guest_phys_entries;
-       struct guest_phys_info gpi_list[MAX_PHYS_INFO];
-       u32 data_dir;
-       struct uisscsi_dest vdest;
-       /* Needed to queue the rsp back to cmd originator. */
-       int linuxstat;
-       u8 scsistat;
-       u8 addlstat;
-#define ADDL_SEL_TIMEOUT 4
-       /* The following fields are need to determine the result of command. */
-       u8 sensebuf[MAX_SENSE_SIZE];
-       void *vdisk;
-       int no_disk_result;
-} __packed;
-
-/*
- * Defines to support sending correct inquiry result when no disk is
- * configured.
- *
- * From SCSI SPC2 -
- *
- * If the target is not capable of supporting a device on this logical unit, the
- * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
- * and PERIPHERAL DEVICE TYPE set to 1Fh).
- *
- * The device server is capable of supporting the specified peripheral device
- * type on this logical unit. However, the physical device is not currently
- * connected to this logical unit.
- */
-
-/*
- * Peripheral qualifier of 0x3
- * Peripheral type of 0x1f
- * Specifies no device but target present
- */
-#define DEV_NOT_CAPABLE 0x7f
-/*
- * Peripheral qualifier of 0x1
- * Peripheral type of 0 - disk
- * Specifies device capable, but not present
- */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
-/* HiSup = 1; shows support for report luns must be returned for lun 0. */
-#define DEV_HISUPPORT 0x10
-
-/*
- * Peripheral qualifier of 0x3
- * Peripheral type of 0x1f
- * Specifies no device but target present
- */
-#define DEV_NOT_CAPABLE 0x7f
-/*
- * Peripheral qualifier of 0x1
- * Peripheral type of 0 - disk
- * Specifies device capable, but not present
- */
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20
-/* HiSup = 1; shows support for report luns must be returned for lun 0. */
-#define DEV_HISUPPORT 0x10
-
-/*
- * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
- * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product
- * and revision. Yikes! So let us always send back 36 bytes, the minimum for
- * inquiry result.
- */
-#define NO_DISK_INQUIRY_RESULT_LEN 36
-/* 5 bytes minimum for inquiry result */
-#define MIN_INQUIRY_RESULT_LEN 5
-
-/* SCSI device version for no disk inquiry result */
-/* indicates SCSI SPC2 (SPC3 is 5) */
-#define SCSI_SPC2_VER 4
-
-/* Struct and Defines to support sense information. */
-
-/*
- * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
- * initialized in exactly the manner that is recommended in Windows (hence the
- * odd values).
- * When set, these fields will have the following values:
- * ErrorCode = 0x70            indicates current error
- * Valid = 1                   indicates sense info is valid
- * SenseKey                    contains sense key as defined by SCSI specs.
- * AdditionalSenseCode         contains sense key as defined by SCSI specs.
- * AdditionalSenseCodeQualifier        contains qualifier to sense code as defined by
- *                             scsi docs.
- * AdditionalSenseLength       contains will be sizeof(sense_data)-8=10.
- */
-struct sense_data {
-       u8 errorcode:7;
-       u8 valid:1;
-       u8 segment_number;
-       u8 sense_key:4;
-       u8 reserved:1;
-       u8 incorrect_length:1;
-       u8 end_of_media:1;
-       u8 file_mark:1;
-       u8 information[4];
-       u8 additional_sense_length;
-       u8 command_specific_information[4];
-       u8 additional_sense_code;
-       u8 additional_sense_code_qualifier;
-       u8 fru_code;
-       u8 sense_key_specific[3];
-} __packed;
-
-/*
- * struct net_pkt_xmt
- * @len:                   Full length of data in the packet.
- * @num_frags:             Number of fragments in frags containing data.
- * @struct phys_info frags: Physical page information.
- * @ethhdr:                The ethernet header.
- * @struct lincsum:        These are needed for csum at uisnic end.
- *      @valid:            1 = struct is valid - else ignore.
- *      @hrawoffv:  1 = hwrafoff is valid.
- *      @nhrawoffv: 1 = nhwrafoff is valid.
- *      @protocol:  Specifies packet protocol.
- *      @csum:     Value used to set skb->csum at IOPart.
- *      @hrawoff:   Value used to set skb->h.raw at IOPart. hrawoff points to
- *                 the start of the TRANSPORT LAYER HEADER.
- *      @nhrawoff:  Value used to set skb->nh.raw at IOPart. nhrawoff points to
- *                 the start of the NETWORK LAYER HEADER.
- *
- * NOTE:
- * The full packet is described in frags but the ethernet header is separately
- * kept in ethhdr so that uisnic doesn't have "MAP" the guest memory to get to
- * the header. uisnic needs ethhdr to determine how to route the packet.
- */
-struct net_pkt_xmt {
-       int len;
-       int num_frags;
-       struct phys_info frags[MAX_PHYS_INFO];
-       char ethhdr[ETH_HLEN];
-       struct {
-               u8 valid;
-               u8 hrawoffv;
-               u8 nhrawoffv;
-               __be16 protocol;
-               __wsum csum;
-               u32 hrawoff;
-               u32 nhrawoff;
-       } lincsum;
-} __packed;
-
-struct net_pkt_xmtdone {
-       /* Result of NET_XMIT */
-       u32 xmt_done_result;
-} __packed;
-
-/*
- * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The
- * reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
- * visornic requires that there is "overhead" in the buffer, and pads 16 bytes.
- * Use 1 full cache line size for "overhead" so that transfers are optimized.
- * IOVM requires that a buffer be represented by 1 phys_info structure
- * which can only cover page_size.
- */
-#define RCVPOST_BUF_SIZE 4032
-#define MAX_NET_RCV_CHAIN \
-       ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
-        / RCVPOST_BUF_SIZE)
-
-/* rcv buf size must be large enough to include ethernet data len + ethernet
- * header len - we are choosing 2K because it is guaranteed to be describable.
- */
-struct net_pkt_rcvpost {
-       /* Physical page information for the single fragment 2K rcv buf */
-       struct phys_info frag;
-       /*
-        * Ensures that receive posts are returned to the adapter which we sent
-        * them from originally.
-        */
-       u64 unique_num;
-
-} __packed;
-
-/*
- * struct net_pkt_rcv
- * @rcv_done_len:      Length of the received data.
- * @numrcvbufs:                Contains the incoming data. Guest side MUST chain these
- *                     together.
- * @*rcvbuf:           List of chained rcvbufa. Each entry is a receive buffer
- *                     provided by NET_RCV_POST. NOTE: First rcvbuf in the
- *                     chain will also be provided in net.buf.
- * @unique_num:
- * @rcvs_dropped_delta:
- *
- * The number of rcvbuf that can be chained is based on max mtu and size of each
- * rcvbuf.
- */
-struct net_pkt_rcv {
-       u32 rcv_done_len;
-       u8 numrcvbufs;
-       void *rcvbuf[MAX_NET_RCV_CHAIN];
-       u64 unique_num;
-       u32 rcvs_dropped_delta;
-} __packed;
-
-struct net_pkt_enbdis {
-       void *context;
-       /* 1 = enable, 0 = disable */
-       u16 enable;
-} __packed;
-
-struct net_pkt_macaddr {
-       void *context;
-       /* 6 bytes */
-       u8 macaddr[MAX_MACADDR_LEN];
-} __packed;
-
-/*
- * struct uiscmdrsp_net - cmd rsp packet used for VNIC network traffic.
- * @enum type:
- * @*buf:
- * @union:
- *     @struct xmt:     Used for NET_XMIT.
- *     @struct xmtdone: Used for NET_XMIT_DONE.
- *     @struct rcvpost: Used for NET_RCV_POST.
- *     @struct rcv:     Used for NET_RCV.
- *     @struct enbdis:  Used for NET_RCV_ENBDIS, NET_RCV_ENBDIS_ACK,
- *                      NET_RCV_PROMSIC, and NET_CONNECT_STATUS.
- *     @struct macaddr:
- */
-struct uiscmdrsp_net {
-       enum net_types type;
-       void *buf;
-       union {
-               struct net_pkt_xmt xmt;
-               struct net_pkt_xmtdone xmtdone;
-               struct net_pkt_rcvpost rcvpost;
-               struct net_pkt_rcv rcv;
-               struct net_pkt_enbdis enbdis;
-               struct net_pkt_macaddr macaddr;
-       };
-} __packed;
-
-/*
- * struct uiscmdrsp_scsitaskmgmt
- * @enum tasktype:      The type of task.
- * @struct vdest:       The vdisk for which this task mgmt is generated.
- * @handle:             This is a handle that the guest has saved off for its
- *                      own use. The handle value is preserved by iopart and
- *                      returned as in task mgmt rsp.
- * @notify_handle:      For Linux guests, this is a pointer to wait_queue_head
- *                      that a thread is waiting on to see if the taskmgmt
- *                      command has completed. When the rsp is received by
- *                      guest, the thread receiving the response uses this to
- *                      notify the thread waiting for taskmgmt command
- *                      completion. It's value is preserved by iopart and
- *                      returned as in the task mgmt rsp.
- * @notifyresult_handle: This is a handle to the location in the guest where
- *                      the result of the taskmgmt command (result field) is
- *                      saved to when the response is handled. It's value is
- *                      preserved by iopart and returned as is in the task mgmt
- *                      rsp.
- * @result:             Result of taskmgmt command - set by IOPart.
- */
-struct uiscmdrsp_scsitaskmgmt {
-       enum task_mgmt_types tasktype;
-       struct uisscsi_dest vdest;
-       u64 handle;
-       u64 notify_handle;
-       u64 notifyresult_handle;
-       char result;
-
-#define TASK_MGMT_FAILED 0
-} __packed;
-
-/*
- * struct uiscmdrsp_disknotify - Used by uissd to send disk add/remove
- *                              notifications to Guest.
- * @add:     0-remove, 1-add.
- * @*v_hba:  Channel info to route msg.
- * @channel: SCSI Path of Disk to added or removed.
- * @id:             SCSI Path of Disk to added or removed.
- * @lun:     SCSI Path of Disk to added or removed.
- *
- * Note that the vHba pointer is not used by the Client/Guest side.
- */
-struct uiscmdrsp_disknotify {
-       u8 add;
-       void *v_hba;
-       u32 channel, id, lun;
-} __packed;
-
-/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
-struct uiscmdrsp {
-       char cmdtype;
-       /* Describes what type of information is in the struct */
-#define CMD_SCSI_TYPE        1
-#define CMD_NET_TYPE         2
-#define CMD_SCSITASKMGMT_TYPE 3
-#define CMD_NOTIFYGUEST_TYPE  4
-       union {
-               struct uiscmdrsp_scsi scsi;
-               struct uiscmdrsp_net net;
-               struct uiscmdrsp_scsitaskmgmt scsitaskmgmt;
-               struct uiscmdrsp_disknotify disknotify;
-       };
-       /* Send the response when the cmd is done (scsi and scsittaskmgmt). */
-       void *private_data;
-       /* General Purpose Queue Link */
-       struct uiscmdrsp *next;
-       /* Pointer to the nextactive commands */
-       struct uiscmdrsp *activeQ_next;
-       /* Pointer to the prevactive commands */
-       struct uiscmdrsp *activeQ_prev;
-} __packed;
-
-/* total = 28 bytes */
-struct iochannel_vhba {
-       /* 8 bytes */
-       struct vhba_wwnn wwnn;
-       /* 20 bytes */
-       struct vhba_config_max max;
-} __packed;
-
-struct iochannel_vnic {
-       /* 6 bytes */
-       u8 macaddr[6];
-       /* 4 bytes */
-       u32 num_rcv_bufs;
-       /* 4 bytes */
-       u32 mtu;
-       /* 16 bytes */
-       guid_t zone_guid;
-} __packed;
-
-/*
- * This is just the header of the IO channel. It is assumed that directly after
- * this header there is a large region of memory which contains the command and
- * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
- */
-struct visor_io_channel {
-       struct channel_header channel_header;
-       struct signal_queue_header cmd_q;
-       struct signal_queue_header rsp_q;
-       union {
-               struct iochannel_vhba vhba;
-               struct iochannel_vnic vnic;
-       } __packed;
-
-#define MAX_CLIENTSTRING_LEN 1024
-       /* client_string is NULL termimated so holds max-1 bytes */
-        u8 client_string[MAX_CLIENTSTRING_LEN];
-} __packed;
-
-/* INLINE functions for initializing and accessing I/O data channels. */
-#define SIZEOF_CMDRSP (64 * DIV_ROUND_UP(sizeof(struct uiscmdrsp), 64))
-
-/* Use 4K page sizes when passing page info between Guest and IOPartition. */
-#define PI_PAGE_SIZE 0x1000
-#define PI_PAGE_MASK 0x0FFF
-
-/* __IOCHANNEL_H__ */
-#endif
diff --git a/drivers/staging/unisys/visorhba/Kconfig b/drivers/staging/unisys/visorhba/Kconfig
deleted file mode 100644 (file)
index ed59ac1..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visorhba configuration
-#
-
-config UNISYS_VISORHBA
-       tristate "Unisys visorhba driver"
-       depends on UNISYSSPAR && UNISYS_VISORBUS && SCSI
-       help
-               The Unisys visorhba driver provides support for s-Par HBA
-               devices exposed on the s-Par visorbus. When a message is sent
-               to visorbus to create a HBA device, the probe function of
-               visorhba is called to create the scsi device.
-               If you say Y here, you will enable the Unisys visorhba driver.
-
diff --git a/drivers/staging/unisys/visorhba/Makefile b/drivers/staging/unisys/visorhba/Makefile
deleted file mode 100644 (file)
index b613a7d..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys channel
-#
-
-obj-$(CONFIG_UNISYS_VISORHBA)  += visorhba.o
-
-visorhba-y := visorhba_main.o
-
-ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
deleted file mode 100644 (file)
index 48aa18f..0000000
+++ /dev/null
@@ -1,1142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2012 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/debugfs.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/visorbus.h>
-#include <linux/xarray.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "iochannel.h"
-
-/* The Send and Receive Buffers of the IO Queue may both be full */
-
-#define IOS_ERROR_THRESHOLD  1000
-#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
-#define VISORHBA_ERROR_COUNT 30
-
-static struct dentry *visorhba_debugfs_dir;
-
-/* GUIDS for HBA channel type supported by this driver */
-static struct visor_channeltype_descriptor visorhba_channel_types[] = {
-       /* Note that the only channel type we expect to be reported by the
-        * bus driver is the VISOR_VHBA channel.
-        */
-       { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
-         VISOR_VHBA_CHANNEL_VERSIONID },
-       {}
-};
-
-MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
-MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
-
-struct visordisk_info {
-       struct scsi_device *sdev;
-       u32 valid;
-       atomic_t ios_threshold;
-       atomic_t error_count;
-       struct visordisk_info *next;
-};
-
-struct scsipending {
-       struct uiscmdrsp cmdrsp;
-       /* The Data being tracked */
-       void *sent;
-       /* Type of pointer that is being stored */
-       char cmdtype;
-};
-
-/* Each scsi_host has a host_data area that contains this struct. */
-struct visorhba_devdata {
-       struct Scsi_Host *scsihost;
-       struct visor_device *dev;
-       struct list_head dev_info_list;
-       /* Tracks the requests that have been forwarded to
-        * the IOVM and haven't returned yet
-        */
-       struct scsipending pending[MAX_PENDING_REQUESTS];
-       /* Start search for next pending free slot here */
-       unsigned int nextinsert;
-       /* lock to protect data in devdata */
-       spinlock_t privlock;
-       bool serverdown;
-       bool serverchangingstate;
-       unsigned long long acquire_failed_cnt;
-       unsigned long long interrupts_rcvd;
-       unsigned long long interrupts_notme;
-       unsigned long long interrupts_disabled;
-       u64 __iomem *flags_addr;
-       struct visordisk_info head;
-       unsigned int max_buff_len;
-       int devnum;
-       struct uiscmdrsp *cmdrsp;
-       /*
-        * allows us to pass int handles back-and-forth between us and
-        * iovm, instead of raw pointers
-        */
-       struct xarray xa;
-       struct dentry *debugfs_dir;
-       struct dentry *debugfs_info;
-};
-
-struct visorhba_devices_open {
-       struct visorhba_devdata *devdata;
-};
-
-/*
- * add_scsipending_entry - Save off io command that is pending in
- *                        Service Partition
- * @devdata: Pointer to devdata
- * @cmdtype: Specifies the type of command pending
- * @new:     The command to be saved
- *
- * Saves off the io command that is being handled by the Service
- * Partition so that it can be handled when it completes. If new is
- * NULL it is assumed the entry refers only to the cmdrsp.
- *
- * Return: Insert_location where entry was added on success,
- *        -EBUSY if it can't
- */
-static int add_scsipending_entry(struct visorhba_devdata *devdata,
-                                char cmdtype, void *new)
-{
-       unsigned long flags;
-       struct scsipending *entry;
-       int insert_location;
-
-       spin_lock_irqsave(&devdata->privlock, flags);
-       insert_location = devdata->nextinsert;
-       while (devdata->pending[insert_location].sent) {
-               insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
-               if (insert_location == (int)devdata->nextinsert) {
-                       spin_unlock_irqrestore(&devdata->privlock, flags);
-                       return -EBUSY;
-               }
-       }
-
-       entry = &devdata->pending[insert_location];
-       memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
-       entry->cmdtype = cmdtype;
-       if (new)
-               entry->sent = new;
-       /* wants to send cmdrsp */
-       else
-               entry->sent = &entry->cmdrsp;
-       devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
-       spin_unlock_irqrestore(&devdata->privlock, flags);
-
-       return insert_location;
-}
-
-/*
- * del_scsipending_ent - Removes an entry from the pending array
- * @devdata: Device holding the pending array
- * @del:     Entry to remove
- *
- * Removes the entry pointed at by del and returns it.
- *
- * Return: The scsipending entry pointed to on success, NULL on failure
- */
-static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
-{
-       unsigned long flags;
-       void *sent;
-
-       if (del >= MAX_PENDING_REQUESTS)
-               return NULL;
-
-       spin_lock_irqsave(&devdata->privlock, flags);
-       sent = devdata->pending[del].sent;
-       devdata->pending[del].cmdtype = 0;
-       devdata->pending[del].sent = NULL;
-       spin_unlock_irqrestore(&devdata->privlock, flags);
-
-       return sent;
-}
-
-/*
- * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
- * @ddata: Device holding the pending array
- * @ent:   Entry that stores the cmdrsp
- *
- * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
- * if the "sent" field is not NULL.
- *
- * Return: A pointer to the cmdrsp, NULL on failure
- */
-static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
-                                               int ent)
-{
-       if (ddata->pending[ent].sent)
-               return &ddata->pending[ent].cmdrsp;
-
-       return NULL;
-}
-
-/*
- * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
- *                             completion processing logic for a taskmgmt
- *                             cmd will be able to find who to wake up
- *                             and where to stash the result
- * @xa:       The data object maintaining the pointer<-->int mappings
- * @cmdrsp:   Response from the IOVM
- * @event:    The event handle to associate with an id
- * @result:   The location to place the result of the event handle into
- */
-static int setup_scsitaskmgmt_handles(struct xarray *xa, struct uiscmdrsp *cmdrsp,
-                                     wait_queue_head_t *event, int *result)
-{
-       int ret;
-       u32 id;
-
-       /* specify the event that has to be triggered when this cmd is complete */
-       ret = xa_alloc_irq(xa, &id, event, xa_limit_32b, GFP_KERNEL);
-       if (ret)
-               return ret;
-       cmdrsp->scsitaskmgmt.notify_handle = id;
-       ret = xa_alloc_irq(xa, &id, result, xa_limit_32b, GFP_KERNEL);
-       if (ret) {
-               xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
-               return ret;
-       }
-       cmdrsp->scsitaskmgmt.notifyresult_handle = id;
-
-       return 0;
-}
-
-/*
- * cleanup_scsitaskmgmt_handles - Forget handles created by
- *                               setup_scsitaskmgmt_handles()
- * @xa: The data object maintaining the pointer<-->int mappings
- * @cmdrsp:   Response from the IOVM
- */
-static void cleanup_scsitaskmgmt_handles(struct xarray *xa,
-                                        struct uiscmdrsp *cmdrsp)
-{
-       xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
-       xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
-}
-
-/*
- * forward_taskmgmt_command - Send taskmegmt command to the Service
- *                           Partition
- * @tasktype: Type of taskmgmt command
- * @scsidev:  Scsidev that issued command
- *
- * Create a cmdrsp packet and send it to the Service Partition
- * that will service this request.
- *
- * Return: Int representing whether command was queued successfully or not
- */
-static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
-                                   struct scsi_device *scsidev)
-{
-       struct uiscmdrsp *cmdrsp;
-       struct visorhba_devdata *devdata =
-               (struct visorhba_devdata *)scsidev->host->hostdata;
-       int notifyresult = 0xffff;
-       wait_queue_head_t notifyevent;
-       int scsicmd_id;
-       int ret;
-
-       if (devdata->serverdown || devdata->serverchangingstate)
-               return FAILED;
-
-       scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
-                                          NULL);
-       if (scsicmd_id < 0)
-               return FAILED;
-
-       cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
-
-       init_waitqueue_head(&notifyevent);
-
-       /* issue TASK_MGMT_ABORT_TASK */
-       cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
-
-       ret = setup_scsitaskmgmt_handles(&devdata->xa, cmdrsp,
-                                        &notifyevent, &notifyresult);
-       if (ret) {
-               dev_dbg(&scsidev->sdev_gendev,
-                       "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
-               return FAILED;
-       }
-
-       /* save destination */
-       cmdrsp->scsitaskmgmt.tasktype = tasktype;
-       cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
-       cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
-       cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
-       cmdrsp->scsitaskmgmt.handle = scsicmd_id;
-
-       dev_dbg(&scsidev->sdev_gendev,
-               "visorhba: initiating type=%d taskmgmt command\n", tasktype);
-       if (visorchannel_signalinsert(devdata->dev->visorchannel,
-                                     IOCHAN_TO_IOPART,
-                                     cmdrsp))
-               goto err_del_scsipending_ent;
-
-       /* It can take the Service Partition up to 35 seconds to complete
-        * an IO in some cases, so wait 45 seconds and error out
-        */
-       if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
-                               msecs_to_jiffies(45000)))
-               goto err_del_scsipending_ent;
-
-       dev_dbg(&scsidev->sdev_gendev,
-               "visorhba: taskmgmt type=%d success; result=0x%x\n",
-                tasktype, notifyresult);
-       cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
-       return SUCCESS;
-
-err_del_scsipending_ent:
-       dev_dbg(&scsidev->sdev_gendev,
-               "visorhba: taskmgmt type=%d not executed\n", tasktype);
-       del_scsipending_ent(devdata, scsicmd_id);
-       cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
-       return FAILED;
-}
-
-/*
- * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
-{
-       /* issue TASK_MGMT_ABORT_TASK */
-       struct scsi_device *scsidev;
-       struct visordisk_info *vdisk;
-       int rtn;
-
-       scsidev = scsicmd->device;
-       vdisk = scsidev->hostdata;
-       if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
-               atomic_inc(&vdisk->error_count);
-       else
-               atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
-       rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
-       if (rtn == SUCCESS) {
-               scsicmd->result = DID_ABORT << 16;
-               scsi_done(scsicmd);
-       }
-       return rtn;
-}
-
-/*
- * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
-{
-       /* issue TASK_MGMT_LUN_RESET */
-       struct scsi_device *scsidev;
-       struct visordisk_info *vdisk;
-       int rtn;
-
-       scsidev = scsicmd->device;
-       vdisk = scsidev->hostdata;
-       if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
-               atomic_inc(&vdisk->error_count);
-       else
-               atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
-       rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
-       if (rtn == SUCCESS) {
-               scsicmd->result = DID_RESET << 16;
-               scsi_done(scsicmd);
-       }
-       return rtn;
-}
-
-/*
- * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
- *                             target on the bus
- * @scsicmd: The scsicmd that needs aborted
- *
- * Return: SUCCESS if inserted, FAILED otherwise
- */
-static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
-{
-       struct scsi_device *scsidev;
-       struct visordisk_info *vdisk;
-       int rtn;
-
-       scsidev = scsicmd->device;
-       shost_for_each_device(scsidev, scsidev->host) {
-               vdisk = scsidev->hostdata;
-               if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
-                       atomic_inc(&vdisk->error_count);
-               else
-                       atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
-       }
-       rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
-       if (rtn == SUCCESS) {
-               scsicmd->result = DID_RESET << 16;
-               scsi_done(scsicmd);
-       }
-       return rtn;
-}
-
-/*
- * visorhba_host_reset_handler - Not supported
- * @scsicmd: The scsicmd that needs to be aborted
- *
- * Return: Not supported, return SUCCESS
- */
-static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
-{
-       /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
-       return SUCCESS;
-}
-
-/*
- * visorhba_get_info - Get information about SCSI device
- * @shp: Scsi host that is requesting information
- *
- * Return: String with visorhba information
- */
-static const char *visorhba_get_info(struct Scsi_Host *shp)
-{
-       /* Return version string */
-       return "visorhba";
-}
-
-/*
- * dma_data_dir_linux_to_spar - convert dma_data_direction value to
- *                             Unisys-specific equivalent
- * @d: dma direction value to convert
- *
- * Returns the Unisys-specific dma direction value corresponding to @d
- */
-static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
-{
-       switch (d) {
-       case DMA_BIDIRECTIONAL:
-               return UIS_DMA_BIDIRECTIONAL;
-       case DMA_TO_DEVICE:
-               return UIS_DMA_TO_DEVICE;
-       case DMA_FROM_DEVICE:
-               return UIS_DMA_FROM_DEVICE;
-       case DMA_NONE:
-               return UIS_DMA_NONE;
-       default:
-               return UIS_DMA_NONE;
-       }
-}
-
-/*
- * visorhba_queue_command_lck - Queues command to the Service Partition
- * @scsicmd:           Command to be queued
- * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
- *
- * Queues to scsicmd to the ServicePartition after converting it to a
- * uiscmdrsp structure.
- *
- * Return: 0 if successfully queued to the Service Partition, otherwise
- *        error code
- */
-static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd)
-{
-       void (*visorhba_cmnd_done)(struct scsi_cmnd *) = scsi_done;
-       struct uiscmdrsp *cmdrsp;
-       struct scsi_device *scsidev = scsicmd->device;
-       int insert_location;
-       unsigned char *cdb = scsicmd->cmnd;
-       struct Scsi_Host *scsihost = scsidev->host;
-       unsigned int i;
-       struct visorhba_devdata *devdata =
-               (struct visorhba_devdata *)scsihost->hostdata;
-       struct scatterlist *sg = NULL;
-       struct scatterlist *sglist = NULL;
-
-       if (devdata->serverdown || devdata->serverchangingstate)
-               return SCSI_MLQUEUE_DEVICE_BUSY;
-
-       insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
-                                               (void *)scsicmd);
-       if (insert_location < 0)
-               return SCSI_MLQUEUE_DEVICE_BUSY;
-
-       cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
-       cmdrsp->cmdtype = CMD_SCSI_TYPE;
-       /* save the pending insertion location. Deletion from pending
-        * will return the scsicmd pointer for completion
-        */
-       cmdrsp->scsi.handle = insert_location;
-
-       WARN_ON_ONCE(visorhba_cmnd_done != scsi_done);
-       /* save destination */
-       cmdrsp->scsi.vdest.channel = scsidev->channel;
-       cmdrsp->scsi.vdest.id = scsidev->id;
-       cmdrsp->scsi.vdest.lun = scsidev->lun;
-       /* save datadir */
-       cmdrsp->scsi.data_dir =
-               dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
-       memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
-       cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
-
-       /* keep track of the max buffer length so far. */
-       if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
-               devdata->max_buff_len = cmdrsp->scsi.bufflen;
-
-       if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
-               goto err_del_scsipending_ent;
-
-       /* convert buffer to phys information  */
-       /* buffer is scatterlist - copy it out */
-       sglist = scsi_sglist(scsicmd);
-
-       for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
-               cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
-               cmdrsp->scsi.gpi_list[i].length = sg->length;
-       }
-       cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
-
-       if (visorchannel_signalinsert(devdata->dev->visorchannel,
-                                     IOCHAN_TO_IOPART,
-                                     cmdrsp))
-               /* queue must be full and we aren't going to wait */
-               goto err_del_scsipending_ent;
-
-       return 0;
-
-err_del_scsipending_ent:
-       del_scsipending_ent(devdata, insert_location);
-       return SCSI_MLQUEUE_DEVICE_BUSY;
-}
-
-#ifdef DEF_SCSI_QCMD
-static DEF_SCSI_QCMD(visorhba_queue_command)
-#else
-#define visorhba_queue_command visorhba_queue_command_lck
-#endif
-
-/*
- * visorhba_slave_alloc - Called when new disk is discovered
- * @scsidev: New disk
- *
- * Create a new visordisk_info structure and add it to our
- * list of vdisks.
- *
- * Return: 0 on success, -ENOMEM on failure.
- */
-static int visorhba_slave_alloc(struct scsi_device *scsidev)
-{
-       /* this is called by the midlayer before scan for new devices --
-        * LLD can alloc any struct & do init if needed.
-        */
-       struct visordisk_info *vdisk;
-       struct visorhba_devdata *devdata;
-       struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
-
-       /* already allocated return success */
-       if (scsidev->hostdata)
-               return 0;
-
-       /* even though we errored, treat as success */
-       devdata = (struct visorhba_devdata *)scsihost->hostdata;
-       if (!devdata)
-               return 0;
-
-       vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
-       if (!vdisk)
-               return -ENOMEM;
-
-       vdisk->sdev = scsidev;
-       scsidev->hostdata = vdisk;
-       return 0;
-}
-
-/*
- * visorhba_slave_destroy - Disk is going away, clean up resources.
- * @scsidev: Scsi device to destroy
- */
-static void visorhba_slave_destroy(struct scsi_device *scsidev)
-{
-       /* midlevel calls this after device has been quiesced and
-        * before it is to be deleted.
-        */
-       struct visordisk_info *vdisk;
-
-       vdisk = scsidev->hostdata;
-       scsidev->hostdata = NULL;
-       kfree(vdisk);
-}
-
-static struct scsi_host_template visorhba_driver_template = {
-       .name = "Unisys Visor HBA",
-       .info = visorhba_get_info,
-       .queuecommand = visorhba_queue_command,
-       .eh_abort_handler = visorhba_abort_handler,
-       .eh_device_reset_handler = visorhba_device_reset_handler,
-       .eh_bus_reset_handler = visorhba_bus_reset_handler,
-       .eh_host_reset_handler = visorhba_host_reset_handler,
-#define visorhba_MAX_CMNDS 128
-       .can_queue = visorhba_MAX_CMNDS,
-       .sg_tablesize = 64,
-       .this_id = -1,
-       .slave_alloc = visorhba_slave_alloc,
-       .slave_destroy = visorhba_slave_destroy,
-};
-
-/*
- * info_debugfs_show - Debugfs interface to dump visorhba states
- * @seq: The sequence file to write information to
- * @v:   Unused, but needed for use with seq file single_open invocation
- *
- * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
- *
- * Return: SUCCESS
- */
-static int info_debugfs_show(struct seq_file *seq, void *v)
-{
-       struct visorhba_devdata *devdata = seq->private;
-
-       seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
-       seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
-       seq_printf(seq, "interrupts_disabled = %llu\n",
-                  devdata->interrupts_disabled);
-       seq_printf(seq, "interrupts_notme = %llu\n",
-                  devdata->interrupts_notme);
-       seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
-       if (devdata->flags_addr) {
-               u64 phys_flags_addr =
-                       virt_to_phys((__force  void *)devdata->flags_addr);
-               seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
-                          phys_flags_addr);
-               seq_printf(seq, "FeatureFlags = %llu\n",
-                          (u64)readq(devdata->flags_addr));
-       }
-       seq_printf(seq, "acquire_failed_cnt = %llu\n",
-                  devdata->acquire_failed_cnt);
-
-       return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(info_debugfs);
-
-/*
- * complete_taskmgmt_command - Complete task management
- * @idrtable: The data object maintaining the pointer<-->int mappings
- * @cmdrsp:   Response from the IOVM
- * @result:   The result of the task management command
- *
- * Service Partition returned the result of the task management
- * command. Wake up anyone waiting for it.
- */
-static void complete_taskmgmt_command(struct xarray *xa,
-                                     struct uiscmdrsp *cmdrsp, int result)
-{
-       wait_queue_head_t *wq =
-               xa_load(xa, cmdrsp->scsitaskmgmt.notify_handle);
-       int *scsi_result_ptr =
-               xa_load(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
-       if (unlikely(!(wq && scsi_result_ptr))) {
-               pr_err("visorhba: no completion context; cmd will time out\n");
-               return;
-       }
-
-       /* copy the result of the taskmgmt and
-        * wake up the error handler that is waiting for this
-        */
-       pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
-       *scsi_result_ptr = result;
-       wake_up_all(wq);
-}
-
-/*
- * visorhba_serverdown_complete - Called when we are done cleaning up
- *                               from serverdown
- * @devdata: Visorhba instance on which to complete serverdown
- *
- * Called when we are done cleanning up from serverdown, stop processing
- * queue, fail pending IOs.
- */
-static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
-{
-       int i;
-       struct scsipending *pendingdel = NULL;
-       struct scsi_cmnd *scsicmd = NULL;
-       struct uiscmdrsp *cmdrsp;
-       unsigned long flags;
-
-       /* Stop using the IOVM response queue (queue should be drained
-        * by the end)
-        */
-       visorbus_disable_channel_interrupts(devdata->dev);
-
-       /* Fail commands that weren't completed */
-       spin_lock_irqsave(&devdata->privlock, flags);
-       for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
-               pendingdel = &devdata->pending[i];
-               switch (pendingdel->cmdtype) {
-               case CMD_SCSI_TYPE:
-                       scsicmd = pendingdel->sent;
-                       scsicmd->result = DID_RESET << 16;
-                       scsi_done(scsicmd);
-                       break;
-               case CMD_SCSITASKMGMT_TYPE:
-                       cmdrsp = pendingdel->sent;
-                       complete_taskmgmt_command(&devdata->xa, cmdrsp,
-                                                 TASK_MGMT_FAILED);
-                       break;
-               default:
-                       break;
-               }
-               pendingdel->cmdtype = 0;
-               pendingdel->sent = NULL;
-       }
-       spin_unlock_irqrestore(&devdata->privlock, flags);
-
-       devdata->serverdown = true;
-       devdata->serverchangingstate = false;
-}
-
-/*
- * visorhba_serverdown - Got notified that the IOVM is down
- * @devdata: Visorhba that is being serviced by downed IOVM
- *
- * Something happened to the IOVM, return immediately and
- * schedule cleanup work.
- *
- * Return: 0 on success, -EINVAL on failure
- */
-static int visorhba_serverdown(struct visorhba_devdata *devdata)
-{
-       if (!devdata->serverdown && !devdata->serverchangingstate) {
-               devdata->serverchangingstate = true;
-               visorhba_serverdown_complete(devdata);
-       } else if (devdata->serverchangingstate) {
-               return -EINVAL;
-       }
-       return 0;
-}
-
-/*
- * do_scsi_linuxstat - Scsi command returned linuxstat
- * @cmdrsp:  Response from IOVM
- * @scsicmd: Command issued
- *
- * Don't log errors for disk-not-present inquiries.
- */
-static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
-                             struct scsi_cmnd *scsicmd)
-{
-       struct visordisk_info *vdisk;
-       struct scsi_device *scsidev;
-
-       scsidev = scsicmd->device;
-       memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
-
-       /* Do not log errors for disk-not-present inquiries */
-       if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
-           (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
-           cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
-               return;
-       /* Okay see what our error_count is here.... */
-       vdisk = scsidev->hostdata;
-       if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
-               atomic_inc(&vdisk->error_count);
-               atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
-       }
-}
-
-static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
-                                     bool is_lun0)
-{
-       if (len < NO_DISK_INQUIRY_RESULT_LEN)
-               return -EINVAL;
-       memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
-       buf[2] = SCSI_SPC2_VER;
-       if (is_lun0) {
-               buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
-               buf[3] = DEV_HISUPPORT;
-       } else {
-               buf[0] = DEV_NOT_CAPABLE;
-       }
-       buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
-       strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
-       return 0;
-}
-
-/*
- * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
- * @cmdrsp:  Response from IOVM
- * @scsicmd: Command issued
- *
- * Handle response when no linuxstat was returned.
- */
-static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
-                               struct scsi_cmnd *scsicmd)
-{
-       struct scsi_device *scsidev;
-       unsigned char *buf;
-       struct scatterlist *sg;
-       unsigned int i;
-       char *this_page;
-       char *this_page_orig;
-       int bufind = 0;
-       struct visordisk_info *vdisk;
-
-       scsidev = scsicmd->device;
-       if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
-           cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
-               if (cmdrsp->scsi.no_disk_result == 0)
-                       return;
-
-               buf = kzalloc(36, GFP_KERNEL);
-               if (!buf)
-                       return;
-
-               /* Linux scsi code wants a device at Lun 0
-                * to issue report luns, but we don't want
-                * a disk there so we'll present a processor
-                * there.
-                */
-               set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
-                                          scsidev->lun == 0);
-
-               if (scsi_sg_count(scsicmd) == 0) {
-                       memcpy(scsi_sglist(scsicmd), buf,
-                              cmdrsp->scsi.bufflen);
-                       kfree(buf);
-                       return;
-               }
-
-               scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
-                       this_page_orig = kmap_atomic(sg_page(sg));
-                       this_page = (void *)((unsigned long)this_page_orig |
-                                            sg->offset);
-                       memcpy(this_page, buf + bufind, sg->length);
-                       kunmap_atomic(this_page_orig);
-               }
-               kfree(buf);
-       } else {
-               vdisk = scsidev->hostdata;
-               if (atomic_read(&vdisk->ios_threshold) > 0) {
-                       atomic_dec(&vdisk->ios_threshold);
-                       if (atomic_read(&vdisk->ios_threshold) == 0)
-                               atomic_set(&vdisk->error_count, 0);
-               }
-       }
-}
-
-/*
- * complete_scsi_command - Complete a scsi command
- * @uiscmdrsp: Response from Service Partition
- * @scsicmd:   The scsi command
- *
- * Response was returned by the Service Partition. Finish it and send
- * completion to the scsi midlayer.
- */
-static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
-                                 struct scsi_cmnd *scsicmd)
-{
-       /* take what we need out of cmdrsp and complete the scsicmd */
-       scsicmd->result = cmdrsp->scsi.linuxstat;
-       if (cmdrsp->scsi.linuxstat)
-               do_scsi_linuxstat(cmdrsp, scsicmd);
-       else
-               do_scsi_nolinuxstat(cmdrsp, scsicmd);
-
-       scsi_done(scsicmd);
-}
-
-/*
- * drain_queue - Pull responses out of iochannel
- * @cmdrsp:  Response from the IOSP
- * @devdata: Device that owns this iochannel
- *
- * Pulls responses out of the iochannel and process the responses.
- */
-static void drain_queue(struct uiscmdrsp *cmdrsp,
-                       struct visorhba_devdata *devdata)
-{
-       struct scsi_cmnd *scsicmd;
-
-       while (1) {
-               /* queue empty */
-               if (visorchannel_signalremove(devdata->dev->visorchannel,
-                                             IOCHAN_FROM_IOPART,
-                                             cmdrsp))
-                       break;
-               if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
-                       /* scsicmd location is returned by the
-                        * deletion
-                        */
-                       scsicmd = del_scsipending_ent(devdata,
-                                                     cmdrsp->scsi.handle);
-                       if (!scsicmd)
-                               break;
-                       /* complete the orig cmd */
-                       complete_scsi_command(cmdrsp, scsicmd);
-               } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
-                       if (!del_scsipending_ent(devdata,
-                                                cmdrsp->scsitaskmgmt.handle))
-                               break;
-                       complete_taskmgmt_command(&devdata->xa, cmdrsp,
-                                                 cmdrsp->scsitaskmgmt.result);
-               } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
-                       dev_err_once(&devdata->dev->device,
-                                    "ignoring unsupported NOTIFYGUEST\n");
-               /* cmdrsp is now available for re-use */
-       }
-}
-
-/*
- * This is used only when this driver is active as an hba driver in the
- * client guest partition.  It is called periodically so we can obtain
- * and process the command respond from the IO Service Partition periodically.
- */
-static void visorhba_channel_interrupt(struct visor_device *dev)
-{
-       struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       if (!devdata)
-               return;
-
-       drain_queue(devdata->cmdrsp, devdata);
-}
-
-/*
- * visorhba_pause - Function to handle visorbus pause messages
- * @dev:          Device that is pausing
- * @complete_func: Function to call when finished
- *
- * Something has happened to the IO Service Partition that is
- * handling this device. Quiet this device and reset commands
- * so that the Service Partition can be corrected.
- *
- * Return: SUCCESS
- */
-static int visorhba_pause(struct visor_device *dev,
-                         visorbus_state_complete_func complete_func)
-{
-       struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       visorhba_serverdown(devdata);
-       complete_func(dev, 0);
-       return 0;
-}
-
-/*
- * visorhba_resume - Function called when the IO Service Partition is back
- * @dev:          Device that is pausing
- * @complete_func: Function to call when finished
- *
- * Yay! The IO Service Partition is back, the channel has been wiped
- * so lets re-establish connection and start processing responses.
- *
- * Return: 0 on success, -EINVAL on failure
- */
-static int visorhba_resume(struct visor_device *dev,
-                          visorbus_state_complete_func complete_func)
-{
-       struct visorhba_devdata *devdata;
-
-       devdata = dev_get_drvdata(&dev->device);
-       if (!devdata)
-               return -EINVAL;
-
-       if (devdata->serverdown && !devdata->serverchangingstate)
-               devdata->serverchangingstate = true;
-
-       visorbus_enable_channel_interrupts(dev);
-       devdata->serverdown = false;
-       devdata->serverchangingstate = false;
-
-       return 0;
-}
-
-/*
- * visorhba_probe - Device has been discovered; do acquire
- * @dev: visor_device that was discovered
- *
- * A new HBA was discovered; do the initial connections of it.
- *
- * Return: 0 on success, otherwise error code
- */
-static int visorhba_probe(struct visor_device *dev)
-{
-       struct Scsi_Host *scsihost;
-       struct vhba_config_max max;
-       struct visorhba_devdata *devdata = NULL;
-       int err, channel_offset;
-       u64 features;
-
-       scsihost = scsi_host_alloc(&visorhba_driver_template,
-                                  sizeof(*devdata));
-       if (!scsihost)
-               return -ENODEV;
-
-       channel_offset = offsetof(struct visor_io_channel, vhba.max);
-       err = visorbus_read_channel(dev, channel_offset, &max,
-                                   sizeof(struct vhba_config_max));
-       if (err < 0)
-               goto err_scsi_host_put;
-
-       scsihost->max_id = (unsigned int)max.max_id;
-       scsihost->max_lun = (unsigned int)max.max_lun;
-       scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
-       scsihost->max_sectors =
-           (unsigned short)(max.max_io_size >> 9);
-       scsihost->sg_tablesize =
-           (unsigned short)(max.max_io_size / PAGE_SIZE);
-       if (scsihost->sg_tablesize > MAX_PHYS_INFO)
-               scsihost->sg_tablesize = MAX_PHYS_INFO;
-       err = scsi_add_host(scsihost, &dev->device);
-       if (err < 0)
-               goto err_scsi_host_put;
-
-       devdata = (struct visorhba_devdata *)scsihost->hostdata;
-       devdata->dev = dev;
-       dev_set_drvdata(&dev->device, devdata);
-
-       devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
-                                                 visorhba_debugfs_dir);
-       if (!devdata->debugfs_dir) {
-               err = -ENOMEM;
-               goto err_scsi_remove_host;
-       }
-       devdata->debugfs_info =
-               debugfs_create_file("info", 0440,
-                                   devdata->debugfs_dir, devdata,
-                                   &info_debugfs_fops);
-       if (!devdata->debugfs_info) {
-               err = -ENOMEM;
-               goto err_debugfs_dir;
-       }
-
-       spin_lock_init(&devdata->privlock);
-       devdata->serverdown = false;
-       devdata->serverchangingstate = false;
-       devdata->scsihost = scsihost;
-
-       channel_offset = offsetof(struct visor_io_channel,
-                                 channel_header.features);
-       err = visorbus_read_channel(dev, channel_offset, &features, 8);
-       if (err)
-               goto err_debugfs_info;
-       features |= VISOR_CHANNEL_IS_POLLING;
-       err = visorbus_write_channel(dev, channel_offset, &features, 8);
-       if (err)
-               goto err_debugfs_info;
-
-       xa_init(&devdata->xa);
-
-       devdata->cmdrsp = kmalloc(sizeof(*devdata->cmdrsp), GFP_ATOMIC);
-       visorbus_enable_channel_interrupts(dev);
-
-       scsi_scan_host(scsihost);
-
-       return 0;
-
-err_debugfs_info:
-       debugfs_remove(devdata->debugfs_info);
-
-err_debugfs_dir:
-       debugfs_remove_recursive(devdata->debugfs_dir);
-
-err_scsi_remove_host:
-       scsi_remove_host(scsihost);
-
-err_scsi_host_put:
-       scsi_host_put(scsihost);
-       return err;
-}
-
-/*
- * visorhba_remove - Remove a visorhba device
- * @dev: Device to remove
- *
- * Removes the visorhba device.
- */
-static void visorhba_remove(struct visor_device *dev)
-{
-       struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
-       struct Scsi_Host *scsihost = NULL;
-
-       if (!devdata)
-               return;
-
-       scsihost = devdata->scsihost;
-       kfree(devdata->cmdrsp);
-       visorbus_disable_channel_interrupts(dev);
-       scsi_remove_host(scsihost);
-       scsi_host_put(scsihost);
-
-       dev_set_drvdata(&dev->device, NULL);
-       debugfs_remove(devdata->debugfs_info);
-       debugfs_remove_recursive(devdata->debugfs_dir);
-}
-
-/* This is used to tell the visorbus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visorhba_driver = {
-       .name = "visorhba",
-       .owner = THIS_MODULE,
-       .channel_types = visorhba_channel_types,
-       .probe = visorhba_probe,
-       .remove = visorhba_remove,
-       .pause = visorhba_pause,
-       .resume = visorhba_resume,
-       .channel_interrupt = visorhba_channel_interrupt,
-};
-
-/*
- * visorhba_init - Driver init routine
- *
- * Initialize the visorhba driver and register it with visorbus
- * to handle s-Par virtual host bus adapter.
- *
- * Return: 0 on success, error code otherwise
- */
-static int visorhba_init(void)
-{
-       int rc;
-
-       visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
-       if (!visorhba_debugfs_dir)
-               return -ENOMEM;
-
-       rc = visorbus_register_visor_driver(&visorhba_driver);
-       if (rc)
-               goto cleanup_debugfs;
-
-       return 0;
-
-cleanup_debugfs:
-       debugfs_remove_recursive(visorhba_debugfs_dir);
-
-       return rc;
-}
-
-/*
- * visorhba_exit - Driver exit routine
- *
- * Unregister driver from the bus and free up memory.
- */
-static void visorhba_exit(void)
-{
-       visorbus_unregister_visor_driver(&visorhba_driver);
-       debugfs_remove_recursive(visorhba_debugfs_dir);
-}
-
-module_init(visorhba_init);
-module_exit(visorhba_exit);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig
deleted file mode 100644 (file)
index 5f03639..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visorinput configuration
-#
-
-config UNISYS_VISORINPUT
-       tristate "Unisys visorinput driver"
-       depends on UNISYSSPAR && UNISYS_VISORBUS && INPUT
-       help
-               The Unisys s-Par visorinput driver provides a virtualized system
-               console (keyboard and mouse) that is accessible through the
-               s-Par firmware's user interface. s-Par provides video using the EFI
-               GOP protocol, so If this driver is not present, the Linux guest should
-               still boot with visible output in the partition desktop, but keyboard
-               and mouse interaction will not be available.
-
diff --git a/drivers/staging/unisys/visorinput/Makefile b/drivers/staging/unisys/visorinput/Makefile
deleted file mode 100644 (file)
index 68ced7c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys visorinput
-#
-
-obj-$(CONFIG_UNISYS_VISORINPUT)        += visorinput.o
-
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
deleted file mode 100644 (file)
index dffa71a..0000000
+++ /dev/null
@@ -1,788 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2011 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- * This driver lives in a generic guest Linux partition, and registers to
- * receive keyboard and mouse channels from the visorbus driver.  It reads
- * inputs from such channels, and delivers it to the Linux OS in the
- * standard way the Linux expects for input drivers.
- */
-
-#include <linux/fb.h>
-#include <linux/input.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* These defines identify mouse and keyboard activity which is specified by the
- * firmware to the host using the cmsimpleinput protocol.  @ingroup coretypes
- */
-/* only motion; arg1=x, arg2=y */
-#define INPUTACTION_XY_MOTION 1
-
-/* arg1: 1=left,2=center,3=right */
-#define INPUTACTION_MOUSE_BUTTON_DOWN 2
-#define INPUTACTION_MOUSE_BUTTON_UP 3
-#define INPUTACTION_MOUSE_BUTTON_CLICK 4
-#define INPUTACTION_MOUSE_BUTTON_DCLICK 5
-
-/* arg1: wheel rotation away from/toward user */
-#define INPUTACTION_WHEEL_ROTATE_AWAY 6
-#define INPUTACTION_WHEEL_ROTATE_TOWARD 7
-
-/* arg1: scancode, as follows: If arg1 <= 0xff, it's a 1-byte scancode and arg1
- *      is that scancode. If arg1 > 0xff, it's a 2-byte scanecode, with the 1st
- *      byte in the low 8 bits, and the 2nd byte in the high 8 bits.
- *      E.g., the right ALT key would appear as x'38e0'.
- */
-#define INPUTACTION_KEY_DOWN 64
-#define INPUTACTION_KEY_UP 65
-#define INPUTACTION_KEY_DOWN_UP 67
-
-/* arg1: scancode (in same format as inputaction_keyDown); MUST refer to one of
- *      the locking keys, like capslock, numlock, or scrolllock.
- * arg2: 1 iff locking key should be in the LOCKED position (e.g., light is ON)
- */
-#define INPUTACTION_SET_LOCKING_KEY_STATE 66
-
-/* Keyboard channel {c73416d0-b0b8-44af-b304-9d2ae99f1b3d} */
-#define VISOR_KEYBOARD_CHANNEL_GUID \
-       GUID_INIT(0xc73416d0, 0xb0b8, 0x44af, \
-                 0xb3, 0x4, 0x9d, 0x2a, 0xe9, 0x9f, 0x1b, 0x3d)
-#define VISOR_KEYBOARD_CHANNEL_GUID_STR "c73416d0-b0b8-44af-b304-9d2ae99f1b3d"
-
-/* Mouse channel {addf07d4-94a9-46e2-81c3-61abcdbdbd87} */
-#define VISOR_MOUSE_CHANNEL_GUID \
-       GUID_INIT(0xaddf07d4, 0x94a9, 0x46e2, \
-                 0x81, 0xc3, 0x61, 0xab, 0xcd, 0xbd, 0xbd, 0x87)
-#define VISOR_MOUSE_CHANNEL_GUID_STR "addf07d4-94a9-46e2-81c3-61abcdbdbd87"
-
-#define PIXELS_ACROSS_DEFAULT 1024
-#define PIXELS_DOWN_DEFAULT   768
-#define KEYCODE_TABLE_BYTES   256
-
-struct visor_inputactivity {
-       u16 action;
-       u16 arg1;
-       u16 arg2;
-       u16 arg3;
-} __packed;
-
-struct visor_inputreport {
-       u64 seq_no;
-       struct visor_inputactivity activity;
-} __packed;
-
-/* header of keyboard/mouse channels */
-struct visor_input_channel_data {
-       u32 n_input_reports;
-       union {
-               struct {
-                       u16 x_res;
-                       u16 y_res;
-               } mouse;
-               struct {
-                       u32 flags;
-               } keyboard;
-       };
-} __packed;
-
-enum visorinput_dev_type {
-       visorinput_keyboard,
-       visorinput_mouse,
-};
-
-/*
- * This is the private data that we store for each device. A pointer to this
- * struct is maintained via dev_get_drvdata() / dev_set_drvdata() for each
- * struct device.
- */
-struct visorinput_devdata {
-       struct visor_device *dev;
-       /* lock for dev */
-       struct mutex lock_visor_dev;
-       struct input_dev *visorinput_dev;
-       bool paused;
-       bool interrupts_enabled;
-       /* size of following array */
-       unsigned int keycode_table_bytes;
-       /* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */
-       unsigned char keycode_table[];
-};
-
-static const guid_t visor_keyboard_channel_guid = VISOR_KEYBOARD_CHANNEL_GUID;
-static const guid_t visor_mouse_channel_guid = VISOR_MOUSE_CHANNEL_GUID;
-
-/*
- * Borrowed from drivers/input/keyboard/atakbd.c
- * This maps 1-byte scancodes to keycodes.
- */
-static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
-       /* American layout */
-       [0] = KEY_GRAVE,
-       [1] = KEY_ESC,
-       [2] = KEY_1,
-       [3] = KEY_2,
-       [4] = KEY_3,
-       [5] = KEY_4,
-       [6] = KEY_5,
-       [7] = KEY_6,
-       [8] = KEY_7,
-       [9] = KEY_8,
-       [10] = KEY_9,
-       [11] = KEY_0,
-       [12] = KEY_MINUS,
-       [13] = KEY_EQUAL,
-       [14] = KEY_BACKSPACE,
-       [15] = KEY_TAB,
-       [16] = KEY_Q,
-       [17] = KEY_W,
-       [18] = KEY_E,
-       [19] = KEY_R,
-       [20] = KEY_T,
-       [21] = KEY_Y,
-       [22] = KEY_U,
-       [23] = KEY_I,
-       [24] = KEY_O,
-       [25] = KEY_P,
-       [26] = KEY_LEFTBRACE,
-       [27] = KEY_RIGHTBRACE,
-       [28] = KEY_ENTER,
-       [29] = KEY_LEFTCTRL,
-       [30] = KEY_A,
-       [31] = KEY_S,
-       [32] = KEY_D,
-       [33] = KEY_F,
-       [34] = KEY_G,
-       [35] = KEY_H,
-       [36] = KEY_J,
-       [37] = KEY_K,
-       [38] = KEY_L,
-       [39] = KEY_SEMICOLON,
-       [40] = KEY_APOSTROPHE,
-       [41] = KEY_GRAVE,
-       [42] = KEY_LEFTSHIFT,
-       [43] = KEY_BACKSLASH,
-       [44] = KEY_Z,
-       [45] = KEY_X,
-       [46] = KEY_C,
-       [47] = KEY_V,
-       [48] = KEY_B,
-       [49] = KEY_N,
-       [50] = KEY_M,
-       [51] = KEY_COMMA,
-       [52] = KEY_DOT,
-       [53] = KEY_SLASH,
-       [54] = KEY_RIGHTSHIFT,
-       [55] = KEY_KPASTERISK,
-       [56] = KEY_LEFTALT,
-       [57] = KEY_SPACE,
-       [58] = KEY_CAPSLOCK,
-       [59] = KEY_F1,
-       [60] = KEY_F2,
-       [61] = KEY_F3,
-       [62] = KEY_F4,
-       [63] = KEY_F5,
-       [64] = KEY_F6,
-       [65] = KEY_F7,
-       [66] = KEY_F8,
-       [67] = KEY_F9,
-       [68] = KEY_F10,
-       [69] = KEY_NUMLOCK,
-       [70] = KEY_SCROLLLOCK,
-       [71] = KEY_KP7,
-       [72] = KEY_KP8,
-       [73] = KEY_KP9,
-       [74] = KEY_KPMINUS,
-       [75] = KEY_KP4,
-       [76] = KEY_KP5,
-       [77] = KEY_KP6,
-       [78] = KEY_KPPLUS,
-       [79] = KEY_KP1,
-       [80] = KEY_KP2,
-       [81] = KEY_KP3,
-       [82] = KEY_KP0,
-       [83] = KEY_KPDOT,
-       /* enables UK backslash+pipe key and FR lessthan+greaterthan key */
-       [86] = KEY_102ND,
-       [87] = KEY_F11,
-       [88] = KEY_F12,
-       [90] = KEY_KPLEFTPAREN,
-       [91] = KEY_KPRIGHTPAREN,
-       [92] = KEY_KPASTERISK,
-       [93] = KEY_KPASTERISK,
-       [94] = KEY_KPPLUS,
-       [95] = KEY_HELP,
-       [96] = KEY_KPENTER,
-       [97] = KEY_RIGHTCTRL,
-       [98] = KEY_KPSLASH,
-       [99] = KEY_KPLEFTPAREN,
-       [100] = KEY_KPRIGHTPAREN,
-       [101] = KEY_KPSLASH,
-       [102] = KEY_HOME,
-       [103] = KEY_UP,
-       [104] = KEY_PAGEUP,
-       [105] = KEY_LEFT,
-       [106] = KEY_RIGHT,
-       [107] = KEY_END,
-       [108] = KEY_DOWN,
-       [109] = KEY_PAGEDOWN,
-       [110] = KEY_INSERT,
-       [111] = KEY_DELETE,
-       [112] = KEY_MACRO,
-       [113] = KEY_MUTE
-};
-
-/*
- * This maps the <xx> in extended scancodes of the form "0xE0 <xx>" into
- * keycodes.
- */
-static const unsigned char visorkbd_ext_keycode[KEYCODE_TABLE_BYTES] = {
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,             /* 0x00 */
-       0, 0, 0, 0, 0, 0, 0, 0,                                     /* 0x10 */
-       0, 0, 0, 0, KEY_KPENTER, KEY_RIGHTCTRL, 0, 0,               /* 0x18 */
-       0, 0, 0, 0, 0, 0, 0, 0,                                     /* 0x20 */
-       KEY_RIGHTALT, 0, 0, 0, 0, 0, 0, 0,                          /* 0x28 */
-       0, 0, 0, 0, 0, 0, 0, 0,                                     /* 0x30 */
-       KEY_RIGHTALT /* AltGr */, 0, 0, 0, 0, 0, 0, 0,              /* 0x38 */
-       0, 0, 0, 0, 0, 0, 0, KEY_HOME,                              /* 0x40 */
-       KEY_UP, KEY_PAGEUP, 0, KEY_LEFT, 0, KEY_RIGHT, 0, KEY_END,  /* 0x48 */
-       KEY_DOWN, KEY_PAGEDOWN, KEY_INSERT, KEY_DELETE, 0, 0, 0, 0, /* 0x50 */
-       0, 0, 0, 0, 0, 0, 0, 0,                                     /* 0x58 */
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,             /* 0x60 */
-       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,             /* 0x70 */
-};
-
-static int visorinput_open(struct input_dev *visorinput_dev)
-{
-       struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
-
-       if (!devdata) {
-               dev_err(&visorinput_dev->dev,
-                       "%s input_get_drvdata(%p) returned NULL\n",
-                       __func__, visorinput_dev);
-               return -EINVAL;
-       }
-       dev_dbg(&visorinput_dev->dev, "%s opened\n", __func__);
-
-       /*
-        * If we're not paused, really enable interrupts. Regardless of whether
-        * we are paused, set a flag indicating interrupts should be enabled so
-        * when we resume, interrupts will really be enabled.
-        */
-       mutex_lock(&devdata->lock_visor_dev);
-       devdata->interrupts_enabled = true;
-       if (devdata->paused)
-               goto out_unlock;
-       visorbus_enable_channel_interrupts(devdata->dev);
-
-out_unlock:
-       mutex_unlock(&devdata->lock_visor_dev);
-       return 0;
-}
-
-static void visorinput_close(struct input_dev *visorinput_dev)
-{
-       struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
-
-       if (!devdata) {
-               dev_err(&visorinput_dev->dev,
-                       "%s input_get_drvdata(%p) returned NULL\n",
-                       __func__, visorinput_dev);
-               return;
-       }
-       dev_dbg(&visorinput_dev->dev, "%s closed\n", __func__);
-
-       /*
-        * If we're not paused, really disable interrupts. Regardless of
-        * whether we are paused, set a flag indicating interrupts should be
-        * disabled so when we resume we will not re-enable them.
-        */
-       mutex_lock(&devdata->lock_visor_dev);
-       devdata->interrupts_enabled = false;
-       if (devdata->paused)
-               goto out_unlock;
-       visorbus_disable_channel_interrupts(devdata->dev);
-
-out_unlock:
-       mutex_unlock(&devdata->lock_visor_dev);
-}
-
-/*
- * setup_client_keyboard() initializes and returns a Linux input node that we
- * can use to deliver keyboard inputs to Linux.  We of course do this when we
- * see keyboard inputs coming in on a keyboard channel.
- */
-static struct input_dev *setup_client_keyboard(void *devdata,
-                                              unsigned char *keycode_table)
-
-{
-       int i;
-       struct input_dev *visorinput_dev = input_allocate_device();
-
-       if (!visorinput_dev)
-               return NULL;
-
-       visorinput_dev->name = "visor Keyboard";
-       visorinput_dev->phys = "visorkbd:input0";
-       visorinput_dev->id.bustype = BUS_VIRTUAL;
-       visorinput_dev->id.vendor = 0x0001;
-       visorinput_dev->id.product = 0x0001;
-       visorinput_dev->id.version = 0x0100;
-
-       visorinput_dev->evbit[0] = BIT_MASK(EV_KEY) |
-                                  BIT_MASK(EV_REP) |
-                                  BIT_MASK(EV_LED);
-       visorinput_dev->ledbit[0] = BIT_MASK(LED_CAPSL) |
-                                   BIT_MASK(LED_SCROLLL) |
-                                   BIT_MASK(LED_NUML);
-       visorinput_dev->keycode = keycode_table;
-       /* sizeof(unsigned char) */
-       visorinput_dev->keycodesize = 1;
-       visorinput_dev->keycodemax = KEYCODE_TABLE_BYTES;
-
-       for (i = 1; i < visorinput_dev->keycodemax; i++)
-               set_bit(keycode_table[i], visorinput_dev->keybit);
-       for (i = 1; i < visorinput_dev->keycodemax; i++)
-               set_bit(keycode_table[i + KEYCODE_TABLE_BYTES],
-                       visorinput_dev->keybit);
-
-       visorinput_dev->open = visorinput_open;
-       visorinput_dev->close = visorinput_close;
-       /* pre input_register! */
-       input_set_drvdata(visorinput_dev, devdata);
-
-       return visorinput_dev;
-}
-
-static struct input_dev *setup_client_mouse(void *devdata, unsigned int xres,
-                                           unsigned int yres)
-{
-       struct input_dev *visorinput_dev = input_allocate_device();
-
-       if (!visorinput_dev)
-               return NULL;
-
-       visorinput_dev->name = "visor Mouse";
-       visorinput_dev->phys = "visormou:input0";
-       visorinput_dev->id.bustype = BUS_VIRTUAL;
-       visorinput_dev->id.vendor = 0x0001;
-       visorinput_dev->id.product = 0x0002;
-       visorinput_dev->id.version = 0x0100;
-
-       visorinput_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-       set_bit(BTN_LEFT, visorinput_dev->keybit);
-       set_bit(BTN_RIGHT, visorinput_dev->keybit);
-       set_bit(BTN_MIDDLE, visorinput_dev->keybit);
-
-       if (xres == 0)
-               xres = PIXELS_ACROSS_DEFAULT;
-       if (yres == 0)
-               yres = PIXELS_DOWN_DEFAULT;
-       input_set_abs_params(visorinput_dev, ABS_X, 0, xres, 0, 0);
-       input_set_abs_params(visorinput_dev, ABS_Y, 0, yres, 0, 0);
-
-       visorinput_dev->open = visorinput_open;
-       visorinput_dev->close = visorinput_close;
-       /* pre input_register! */
-       input_set_drvdata(visorinput_dev, devdata);
-       input_set_capability(visorinput_dev, EV_REL, REL_WHEEL);
-
-       return visorinput_dev;
-}
-
-static struct visorinput_devdata *devdata_create(struct visor_device *dev,
-                                                enum visorinput_dev_type dtype)
-{
-       struct visorinput_devdata *devdata = NULL;
-       unsigned int extra_bytes = 0;
-       unsigned int size, xres, yres, err;
-       struct visor_input_channel_data data;
-
-       if (dtype == visorinput_keyboard)
-               /* allocate room for devdata->keycode_table, filled in below */
-               extra_bytes = KEYCODE_TABLE_BYTES * 2;
-       devdata = kzalloc(struct_size(devdata, keycode_table, extra_bytes),
-                         GFP_KERNEL);
-       if (!devdata)
-               return NULL;
-       mutex_init(&devdata->lock_visor_dev);
-       mutex_lock(&devdata->lock_visor_dev);
-       devdata->dev = dev;
-
-       /*
-        * visorinput_open() can be called as soon as input_register_device()
-        * happens, and that will enable channel interrupts.  Setting paused
-        * prevents us from getting into visorinput_channel_interrupt() prior
-        * to the device structure being totally initialized.
-        */
-       devdata->paused = true;
-
-       /*
-        * This is an input device in a client guest partition, so we need to
-        * create whatever input nodes are necessary to deliver our inputs to
-        * the guest OS.
-        */
-       switch (dtype) {
-       case visorinput_keyboard:
-               devdata->keycode_table_bytes = extra_bytes;
-               memcpy(devdata->keycode_table, visorkbd_keycode,
-                      KEYCODE_TABLE_BYTES);
-               memcpy(devdata->keycode_table + KEYCODE_TABLE_BYTES,
-                      visorkbd_ext_keycode, KEYCODE_TABLE_BYTES);
-               devdata->visorinput_dev = setup_client_keyboard
-                       (devdata, devdata->keycode_table);
-               if (!devdata->visorinput_dev)
-                       goto cleanups_register;
-               break;
-       case visorinput_mouse:
-               size = sizeof(struct visor_input_channel_data);
-               err = visorbus_read_channel(dev, sizeof(struct channel_header),
-                                           &data, size);
-               if (err)
-                       goto cleanups_register;
-               xres = data.mouse.x_res;
-               yres = data.mouse.y_res;
-               devdata->visorinput_dev = setup_client_mouse(devdata, xres,
-                                                            yres);
-               if (!devdata->visorinput_dev)
-                       goto cleanups_register;
-               break;
-       default:
-               /* No other input devices supported */
-               break;
-       }
-
-       dev_set_drvdata(&dev->device, devdata);
-       mutex_unlock(&devdata->lock_visor_dev);
-
-       /*
-        * Device struct is completely set up now, with the exception of
-        * visorinput_dev being registered. We need to unlock before we
-        * register the device, because this can cause an on-stack call of
-        * visorinput_open(), which would deadlock if we had the lock.
-        */
-       if (input_register_device(devdata->visorinput_dev)) {
-               input_free_device(devdata->visorinput_dev);
-               goto err_kfree_devdata;
-       }
-
-       mutex_lock(&devdata->lock_visor_dev);
-       /*
-        * Establish calls to visorinput_channel_interrupt() if that is the
-        * desired state that we've kept track of in interrupts_enabled while
-        * the device was being created.
-        */
-       devdata->paused = false;
-       if (devdata->interrupts_enabled)
-               visorbus_enable_channel_interrupts(dev);
-       mutex_unlock(&devdata->lock_visor_dev);
-
-       return devdata;
-
-cleanups_register:
-       mutex_unlock(&devdata->lock_visor_dev);
-err_kfree_devdata:
-       kfree(devdata);
-       return NULL;
-}
-
-static int visorinput_probe(struct visor_device *dev)
-{
-       const guid_t *guid;
-       enum visorinput_dev_type dtype;
-
-       guid = visorchannel_get_guid(dev->visorchannel);
-       if (guid_equal(guid, &visor_mouse_channel_guid))
-               dtype = visorinput_mouse;
-       else if (guid_equal(guid, &visor_keyboard_channel_guid))
-               dtype = visorinput_keyboard;
-       else
-               return -ENODEV;
-       visorbus_disable_channel_interrupts(dev);
-       if (!devdata_create(dev, dtype))
-               return -ENOMEM;
-       return 0;
-}
-
-static void unregister_client_input(struct input_dev *visorinput_dev)
-{
-       if (visorinput_dev)
-               input_unregister_device(visorinput_dev);
-}
-
-static void visorinput_remove(struct visor_device *dev)
-{
-       struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       if (!devdata)
-               return;
-
-       mutex_lock(&devdata->lock_visor_dev);
-       visorbus_disable_channel_interrupts(dev);
-
-       /*
-        * due to above, at this time no thread of execution will be in
-        * visorinput_channel_interrupt()
-        */
-
-       dev_set_drvdata(&dev->device, NULL);
-       mutex_unlock(&devdata->lock_visor_dev);
-
-       unregister_client_input(devdata->visorinput_dev);
-       kfree(devdata);
-}
-
-/*
- * Make it so the current locking state of the locking key indicated by
- * <keycode> is as indicated by <desired_state> (1=locked, 0=unlocked).
- */
-static void handle_locking_key(struct input_dev *visorinput_dev, int keycode,
-                              int desired_state)
-{
-       int led;
-
-       switch (keycode) {
-       case KEY_CAPSLOCK:
-               led = LED_CAPSL;
-               break;
-       case KEY_SCROLLLOCK:
-               led = LED_SCROLLL;
-               break;
-       case KEY_NUMLOCK:
-               led = LED_NUML;
-               break;
-       default:
-               return;
-       }
-       if (test_bit(led, visorinput_dev->led) != desired_state) {
-               input_report_key(visorinput_dev, keycode, 1);
-               input_sync(visorinput_dev);
-               input_report_key(visorinput_dev, keycode, 0);
-               input_sync(visorinput_dev);
-               __change_bit(led, visorinput_dev->led);
-       }
-}
-
-/*
- * <scancode> is either a 1-byte scancode, or an extended 16-bit scancode with
- * 0xE0 in the low byte and the extended scancode value in the next higher byte.
- */
-static int scancode_to_keycode(int scancode)
-{
-       if (scancode > 0xff)
-               return visorkbd_ext_keycode[(scancode >> 8) & 0xff];
-
-       return visorkbd_keycode[scancode];
-}
-
-static int calc_button(int x)
-{
-       switch (x) {
-       case 1:
-               return BTN_LEFT;
-       case 2:
-               return BTN_MIDDLE;
-       case 3:
-               return BTN_RIGHT;
-       default:
-               return -EINVAL;
-       }
-}
-
-/*
- * This is used only when this driver is active as an input driver in the
- * client guest partition.  It is called periodically so we can obtain inputs
- * from the channel, and deliver them to the guest OS.
- */
-static void visorinput_channel_interrupt(struct visor_device *dev)
-{
-       struct visor_inputreport r;
-       int scancode, keycode;
-       struct input_dev *visorinput_dev;
-       int xmotion, ymotion, button;
-       int i;
-       struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       if (!devdata)
-               return;
-
-       visorinput_dev = devdata->visorinput_dev;
-
-       while (!visorchannel_signalremove(dev->visorchannel, 0, &r)) {
-               scancode = r.activity.arg1;
-               keycode = scancode_to_keycode(scancode);
-               switch (r.activity.action) {
-               case INPUTACTION_KEY_DOWN:
-                       input_report_key(visorinput_dev, keycode, 1);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_KEY_UP:
-                       input_report_key(visorinput_dev, keycode, 0);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_KEY_DOWN_UP:
-                       input_report_key(visorinput_dev, keycode, 1);
-                       input_sync(visorinput_dev);
-                       input_report_key(visorinput_dev, keycode, 0);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_SET_LOCKING_KEY_STATE:
-                       handle_locking_key(visorinput_dev, keycode,
-                                          r.activity.arg2);
-                       break;
-               case INPUTACTION_XY_MOTION:
-                       xmotion = r.activity.arg1;
-                       ymotion = r.activity.arg2;
-                       input_report_abs(visorinput_dev, ABS_X, xmotion);
-                       input_report_abs(visorinput_dev, ABS_Y, ymotion);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_MOUSE_BUTTON_DOWN:
-                       button = calc_button(r.activity.arg1);
-                       if (button < 0)
-                               break;
-                       input_report_key(visorinput_dev, button, 1);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_MOUSE_BUTTON_UP:
-                       button = calc_button(r.activity.arg1);
-                       if (button < 0)
-                               break;
-                       input_report_key(visorinput_dev, button, 0);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_MOUSE_BUTTON_CLICK:
-                       button = calc_button(r.activity.arg1);
-                       if (button < 0)
-                               break;
-                       input_report_key(visorinput_dev, button, 1);
-                       input_sync(visorinput_dev);
-                       input_report_key(visorinput_dev, button, 0);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_MOUSE_BUTTON_DCLICK:
-                       button = calc_button(r.activity.arg1);
-                       if (button < 0)
-                               break;
-                       for (i = 0; i < 2; i++) {
-                               input_report_key(visorinput_dev, button, 1);
-                               input_sync(visorinput_dev);
-                               input_report_key(visorinput_dev, button, 0);
-                               input_sync(visorinput_dev);
-                       }
-                       break;
-               case INPUTACTION_WHEEL_ROTATE_AWAY:
-                       input_report_rel(visorinput_dev, REL_WHEEL, 1);
-                       input_sync(visorinput_dev);
-                       break;
-               case INPUTACTION_WHEEL_ROTATE_TOWARD:
-                       input_report_rel(visorinput_dev, REL_WHEEL, -1);
-                       input_sync(visorinput_dev);
-                       break;
-               default:
-                       /* Unsupported input action */
-                       break;
-               }
-       }
-}
-
-static int visorinput_pause(struct visor_device *dev,
-                           visorbus_state_complete_func complete_func)
-{
-       int rc;
-       struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       if (!devdata) {
-               rc = -ENODEV;
-               goto out;
-       }
-
-       mutex_lock(&devdata->lock_visor_dev);
-       if (devdata->paused) {
-               rc = -EBUSY;
-               goto out_locked;
-       }
-       if (devdata->interrupts_enabled)
-               visorbus_disable_channel_interrupts(dev);
-
-       /*
-        * due to above, at this time no thread of execution will be in
-        * visorinput_channel_interrupt()
-        */
-       devdata->paused = true;
-       complete_func(dev, 0);
-       rc = 0;
-out_locked:
-       mutex_unlock(&devdata->lock_visor_dev);
-out:
-       return rc;
-}
-
-static int visorinput_resume(struct visor_device *dev,
-                            visorbus_state_complete_func complete_func)
-{
-       int rc;
-       struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       if (!devdata) {
-               rc = -ENODEV;
-               goto out;
-       }
-       mutex_lock(&devdata->lock_visor_dev);
-       if (!devdata->paused) {
-               rc = -EBUSY;
-               goto out_locked;
-       }
-       devdata->paused = false;
-       complete_func(dev, 0);
-
-       /*
-        * Re-establish calls to visorinput_channel_interrupt() if that is the
-        * desired state that we've kept track of in interrupts_enabled while
-        * the device was paused.
-        */
-       if (devdata->interrupts_enabled)
-               visorbus_enable_channel_interrupts(dev);
-
-       rc = 0;
-out_locked:
-       mutex_unlock(&devdata->lock_visor_dev);
-out:
-       return rc;
-}
-
-/* GUIDS for all channel types supported by this driver. */
-static struct visor_channeltype_descriptor visorinput_channel_types[] = {
-       { VISOR_KEYBOARD_CHANNEL_GUID, "keyboard",
-         sizeof(struct channel_header), 0 },
-       { VISOR_MOUSE_CHANNEL_GUID, "mouse", sizeof(struct channel_header), 0 },
-       {}
-};
-
-static struct visor_driver visorinput_driver = {
-       .name = "visorinput",
-       .owner = THIS_MODULE,
-       .channel_types = visorinput_channel_types,
-       .probe = visorinput_probe,
-       .remove = visorinput_remove,
-       .channel_interrupt = visorinput_channel_interrupt,
-       .pause = visorinput_pause,
-       .resume = visorinput_resume,
-};
-
-module_driver(visorinput_driver, visorbus_register_visor_driver,
-             visorbus_unregister_visor_driver);
-
-MODULE_DEVICE_TABLE(visorbus, visorinput_channel_types);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par human input driver for virtual keyboard/mouse");
-
-MODULE_ALIAS("visorbus:" VISOR_MOUSE_CHANNEL_GUID_STR);
-MODULE_ALIAS("visorbus:" VISOR_KEYBOARD_CHANNEL_GUID_STR);
diff --git a/drivers/staging/unisys/visornic/Kconfig b/drivers/staging/unisys/visornic/Kconfig
deleted file mode 100644 (file)
index 3f8f557..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Unisys visornic configuration
-#
-
-config UNISYS_VISORNIC
-       tristate "Unisys visornic driver"
-       depends on UNISYSSPAR && UNISYS_VISORBUS && NET
-       help
-               The Unisys Visornic driver provides support for s-Par network
-               devices exposed on the s-Par visorbus. When a message is sent
-               to visorbus to create a network device, the probe function of
-               visornic is called to create the netdev device. Networking on
-               s-Par switches will not work if this driver is not selected.
-               If you say Y here, you will enable the Unisys visornic driver.
-
diff --git a/drivers/staging/unisys/visornic/Makefile b/drivers/staging/unisys/visornic/Makefile
deleted file mode 100644 (file)
index f298488..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys channel
-#
-
-obj-$(CONFIG_UNISYS_VISORNIC)  += visornic.o
-
-visornic-y := visornic_main.o
-
-ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
deleted file mode 100644 (file)
index 6434324..0000000
+++ /dev/null
@@ -1,2148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/* This driver lives in a spar partition, and registers to ethernet io
- * channels from the visorbus driver. It creates netdev devices and
- * forwards transmit to the IO channel and accepts rcvs from the IO
- * Partition via the IO channel.
- */
-
-#include <linux/debugfs.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/kthread.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/visorbus.h>
-
-#include "iochannel.h"
-
-#define VISORNIC_INFINITE_RSP_WAIT 0
-
-/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
- *         = 163840 bytes
- */
-#define MAX_BUF 163840
-#define NAPI_WEIGHT 64
-
-/* GUIDS for director channel type supported by this driver.  */
-/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
-#define VISOR_VNIC_CHANNEL_GUID \
-       GUID_INIT(0x8cd5994d, 0xc58e, 0x11da, \
-               0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-#define VISOR_VNIC_CHANNEL_GUID_STR \
-       "8cd5994d-c58e-11da-95a9-00e08161165f"
-
-static struct visor_channeltype_descriptor visornic_channel_types[] = {
-       /* Note that the only channel type we expect to be reported by the
-        * bus driver is the VISOR_VNIC channel.
-        */
-       { VISOR_VNIC_CHANNEL_GUID, "ultravnic", sizeof(struct channel_header),
-         VISOR_VNIC_CHANNEL_VERSIONID },
-       {}
-};
-MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
-/* FIXME XXX: This next line of code must be fixed and removed before
- * acceptance into the 'normal' part of the kernel.  It is only here as a place
- * holder to get module autoloading functionality working for visorbus.  Code
- * must be added to scripts/mode/file2alias.c, etc., to get this working
- * properly.
- */
-MODULE_ALIAS("visorbus:" VISOR_VNIC_CHANNEL_GUID_STR);
-
-struct chanstat {
-       unsigned long got_rcv;
-       unsigned long got_enbdisack;
-       unsigned long got_xmit_done;
-       unsigned long xmit_fail;
-       unsigned long sent_enbdis;
-       unsigned long sent_promisc;
-       unsigned long sent_post;
-       unsigned long sent_post_failed;
-       unsigned long sent_xmit;
-       unsigned long reject_count;
-       unsigned long extra_rcvbufs_sent;
-};
-
-/* struct visornic_devdata
- * @enabled:                        0 disabled 1 enabled to receive.
- * @enab_dis_acked:                 NET_RCV_ENABLE/DISABLE acked by IOPART.
- * @struct *dev:
- * @struct *netdev:
- * @struct net_stats:
- * @interrupt_rcvd:
- * @rsp_queue:
- * @struct **rcvbuf:
- * @incarnation_id:                 incarnation_id lets IOPART know about
- *                                  re-birth.
- * @old_flags:                      flags as they were prior to
- *                                  set_multicast_list.
- * @usage:                          count of users.
- * @num_rcv_bufs:                   number of rcv buffers the vnic will post.
- * @num_rcv_bufs_could_not_alloc:
- * @num_rcvbuf_in_iovm:
- * @alloc_failed_in_if_needed_cnt:
- * @alloc_failed_in_repost_rtn_cnt:
- * @max_outstanding_net_xmits:      absolute max number of outstanding xmits
- *                                  - should never hit this.
- * @upper_threshold_net_xmits:      high water mark for calling
- *                                  netif_stop_queue().
- * @lower_threshold_net_xmits:      high water mark for calling
- *                                  netif_wake_queue().
- * @struct xmitbufhead:             xmitbufhead - head of the xmit buffer list
- *                                  sent to the IOPART end.
- * @server_down_complete_func:
- * @struct timeout_reset:
- * @struct *cmdrsp_rcv:             cmdrsp_rcv is used for posting/unposting rcv
- *                                  buffers.
- * @struct *xmit_cmdrsp:            xmit_cmdrsp - issues NET_XMIT - only one
- *                                  active xmit at a time.
- * @server_down:                    IOPART is down.
- * @server_change_state:            Processing SERVER_CHANGESTATE msg.
- * @going_away:                     device is being torn down.
- * @struct *eth_debugfs_dir:
- * @interrupts_rcvd:
- * @interrupts_notme:
- * @interrupts_disabled:
- * @busy_cnt:
- * @priv_lock:                      spinlock to access devdata structures.
- * @flow_control_upper_hits:
- * @flow_control_lower_hits:
- * @n_rcv0:                         # rcvs of 0 buffers.
- * @n_rcv1:                         # rcvs of 1 buffers.
- * @n_rcv2:                         # rcvs of 2 buffers.
- * @n_rcvx:                         # rcvs of >2 buffers.
- * @found_repost_rcvbuf_cnt:        # repost_rcvbuf_cnt.
- * @repost_found_skb_cnt:           # of found the skb.
- * @n_repost_deficit:               # of lost rcv buffers.
- * @bad_rcv_buf:                    # of unknown rcv skb not freed.
- * @n_rcv_packets_not_accepted:     # bogs rcv packets.
- * @queuefullmsg_logged:
- * @struct chstat:
- * @struct napi:
- * @struct cmdrsp:
- */
-struct visornic_devdata {
-       unsigned short enabled;
-       unsigned short enab_dis_acked;
-
-       struct visor_device *dev;
-       struct net_device *netdev;
-       struct net_device_stats net_stats;
-       atomic_t interrupt_rcvd;
-       wait_queue_head_t rsp_queue;
-       struct sk_buff **rcvbuf;
-       u64 incarnation_id;
-       unsigned short old_flags;
-       atomic_t usage;
-
-       int num_rcv_bufs;
-       int num_rcv_bufs_could_not_alloc;
-       atomic_t num_rcvbuf_in_iovm;
-       unsigned long alloc_failed_in_if_needed_cnt;
-       unsigned long alloc_failed_in_repost_rtn_cnt;
-
-       unsigned long max_outstanding_net_xmits;
-       unsigned long upper_threshold_net_xmits;
-       unsigned long lower_threshold_net_xmits;
-       struct sk_buff_head xmitbufhead;
-
-       visorbus_state_complete_func server_down_complete_func;
-       struct work_struct timeout_reset;
-       struct uiscmdrsp *cmdrsp_rcv;
-       struct uiscmdrsp *xmit_cmdrsp;
-       bool server_down;
-       bool server_change_state;
-       bool going_away;
-       struct dentry *eth_debugfs_dir;
-       u64 interrupts_rcvd;
-       u64 interrupts_notme;
-       u64 interrupts_disabled;
-       u64 busy_cnt;
-       /* spinlock to access devdata structures. */
-       spinlock_t priv_lock;
-
-       /* flow control counter */
-       u64 flow_control_upper_hits;
-       u64 flow_control_lower_hits;
-
-       /* debug counters */
-       unsigned long n_rcv0;
-       unsigned long n_rcv1;
-       unsigned long n_rcv2;
-       unsigned long n_rcvx;
-       unsigned long found_repost_rcvbuf_cnt;
-       unsigned long repost_found_skb_cnt;
-       unsigned long n_repost_deficit;
-       unsigned long bad_rcv_buf;
-       unsigned long n_rcv_packets_not_accepted;
-
-       int queuefullmsg_logged;
-       struct chanstat chstat;
-       struct napi_struct napi;
-       struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
-};
-
-/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
-static u16 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u16 inp_len,
-                               u16 index, u16 max_pi_arr_entries,
-                               struct phys_info pi_arr[])
-{
-       u16 i, len, firstlen;
-
-       firstlen = PI_PAGE_SIZE - inp_off;
-       if (inp_len <= firstlen) {
-               /* The input entry spans only one page - add as is. */
-               if (index >= max_pi_arr_entries)
-                       return 0;
-               pi_arr[index].pi_pfn = inp_pfn;
-               pi_arr[index].pi_off = (u16)inp_off;
-               pi_arr[index].pi_len = (u16)inp_len;
-               return index + 1;
-       }
-
-       /* This entry spans multiple pages. */
-       for (len = inp_len, i = 0; len;
-               len -= pi_arr[index + i].pi_len, i++) {
-               if (index + i >= max_pi_arr_entries)
-                       return 0;
-               pi_arr[index + i].pi_pfn = inp_pfn + i;
-               if (i == 0) {
-                       pi_arr[index].pi_off = inp_off;
-                       pi_arr[index].pi_len = firstlen;
-               } else {
-                       pi_arr[index + i].pi_off = 0;
-                       pi_arr[index + i].pi_len = min_t(u16, len,
-                                                        PI_PAGE_SIZE);
-               }
-       }
-       return index + i;
-}
-
-/* visor_copy_fragsinfo_from_skb - copy fragment list in the SKB to a phys_info
- *                                array that the IOPART understands
- * @skb:         Skbuff that we are pulling the frags from.
- * @firstfraglen: Length of first fragment in skb.
- * @frags_max:   Max len of frags array.
- * @frags:       Frags array filled in on output.
- *
- * Return: Positive integer indicating number of entries filled in frags on
- *         success, negative integer on error.
- */
-static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
-                                        unsigned int firstfraglen,
-                                        unsigned int frags_max,
-                                        struct phys_info frags[])
-{
-       unsigned int count = 0, frag, size, offset = 0, numfrags;
-       unsigned int total_count;
-
-       numfrags = skb_shinfo(skb)->nr_frags;
-
-       /* Compute the number of fragments this skb has, and if its more than
-        * frag array can hold, linearize the skb
-        */
-       total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
-       if (firstfraglen % PI_PAGE_SIZE)
-               total_count++;
-
-       if (total_count > frags_max) {
-               if (skb_linearize(skb))
-                       return -EINVAL;
-               numfrags = skb_shinfo(skb)->nr_frags;
-               firstfraglen = 0;
-       }
-
-       while (firstfraglen) {
-               if (count == frags_max)
-                       return -EINVAL;
-
-               frags[count].pi_pfn =
-                       page_to_pfn(virt_to_page(skb->data + offset));
-               frags[count].pi_off =
-                       (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
-               size = min_t(unsigned int, firstfraglen,
-                            PI_PAGE_SIZE - frags[count].pi_off);
-
-               /* can take smallest of firstfraglen (what's left) OR
-                * bytes left in the page
-                */
-               frags[count].pi_len = size;
-               firstfraglen -= size;
-               offset += size;
-               count++;
-       }
-       if (numfrags) {
-               if ((count + numfrags) > frags_max)
-                       return -EINVAL;
-
-               for (frag = 0; frag < numfrags; frag++) {
-                       count = add_physinfo_entries(page_to_pfn(
-                                 skb_frag_page(&skb_shinfo(skb)->frags[frag])),
-                                 skb_frag_off(&skb_shinfo(skb)->frags[frag]),
-                                 skb_frag_size(&skb_shinfo(skb)->frags[frag]),
-                                 count, frags_max, frags);
-                       /* add_physinfo_entries only returns
-                        * zero if the frags array is out of room
-                        * That should never happen because we
-                        * fail above, if count+numfrags > frags_max.
-                        */
-                       if (!count)
-                               return -EINVAL;
-               }
-       }
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *skbinlist;
-               int c;
-
-               for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
-                    skbinlist = skbinlist->next) {
-                       c = visor_copy_fragsinfo_from_skb(skbinlist,
-                                                         skbinlist->len -
-                                                         skbinlist->data_len,
-                                                         frags_max - count,
-                                                         &frags[count]);
-                       if (c < 0)
-                               return c;
-                       count += c;
-               }
-       }
-       return count;
-}
-
-static ssize_t enable_ints_write(struct file *file,
-                                const char __user *buffer,
-                                size_t count, loff_t *ppos)
-{
-       /* Don't want to break ABI here by having a debugfs
-        * file that no longer exists or is writable, so
-        * lets just make this a vestigual function
-        */
-       return count;
-}
-
-static const struct file_operations debugfs_enable_ints_fops = {
-       .write = enable_ints_write,
-};
-
-/* visornic_serverdown_complete - pause device following IOPART going down
- * @devdata: Device managed by IOPART.
- *
- * The IO partition has gone down, and we need to do some cleanup for when it
- * comes back. Treat the IO partition as the link being down.
- */
-static void visornic_serverdown_complete(struct visornic_devdata *devdata)
-{
-       struct net_device *netdev = devdata->netdev;
-
-       /* Stop polling for interrupts */
-       visorbus_disable_channel_interrupts(devdata->dev);
-
-       rtnl_lock();
-       dev_close(netdev);
-       rtnl_unlock();
-
-       atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
-       devdata->chstat.sent_xmit = 0;
-       devdata->chstat.got_xmit_done = 0;
-
-       if (devdata->server_down_complete_func)
-               (*devdata->server_down_complete_func)(devdata->dev, 0);
-
-       devdata->server_down = true;
-       devdata->server_change_state = false;
-       devdata->server_down_complete_func = NULL;
-}
-
-/* visornic_serverdown - Command has notified us that IOPART is down
- * @devdata:      Device managed by IOPART.
- * @complete_func: Function to call when finished.
- *
- * Schedule the work needed to handle the server down request. Make sure we
- * haven't already handled the server change state event.
- *
- * Return: 0 if we scheduled the work, negative integer on error.
- */
-static int visornic_serverdown(struct visornic_devdata *devdata,
-                              visorbus_state_complete_func complete_func)
-{
-       unsigned long flags;
-       int err;
-
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       if (devdata->server_change_state) {
-               dev_dbg(&devdata->dev->device, "%s changing state\n",
-                       __func__);
-               err = -EINVAL;
-               goto err_unlock;
-       }
-       if (devdata->server_down) {
-               dev_dbg(&devdata->dev->device, "%s already down\n",
-                       __func__);
-               err = -EINVAL;
-               goto err_unlock;
-       }
-       if (devdata->going_away) {
-               dev_dbg(&devdata->dev->device,
-                       "%s aborting because device removal pending\n",
-                       __func__);
-               err = -ENODEV;
-               goto err_unlock;
-       }
-       devdata->server_change_state = true;
-       devdata->server_down_complete_func = complete_func;
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       visornic_serverdown_complete(devdata);
-       return 0;
-
-err_unlock:
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-       return err;
-}
-
-/* alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition
- * @netdev: Network adapter the rcv bufs are attached too.
- *
- * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
- * so that it can write rcv data into our memory space.
- *
- * Return: Pointer to sk_buff.
- */
-static struct sk_buff *alloc_rcv_buf(struct net_device *netdev)
-{
-       struct sk_buff *skb;
-
-       /* NOTE: the first fragment in each rcv buffer is pointed to by
-        * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
-        * in length, so the first frag is large enough to hold 1514.
-        */
-       skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
-       if (!skb)
-               return NULL;
-       skb->dev = netdev;
-       /* current value of mtu doesn't come into play here; large
-        * packets will just end up using multiple rcv buffers all of
-        * same size.
-        */
-       skb->len = RCVPOST_BUF_SIZE;
-       /* alloc_skb already zeroes it out for clarification. */
-       skb->data_len = 0;
-       return skb;
-}
-
-/* post_skb - post a skb to the IO Partition
- * @cmdrsp:  Cmdrsp packet to be send to the IO Partition.
- * @devdata: visornic_devdata to post the skb to.
- * @skb:     Skb to give to the IO partition.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int post_skb(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
-                   struct sk_buff *skb)
-{
-       int err;
-
-       cmdrsp->net.buf = skb;
-       cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
-       cmdrsp->net.rcvpost.frag.pi_off =
-               (unsigned long)skb->data & PI_PAGE_MASK;
-       cmdrsp->net.rcvpost.frag.pi_len = skb->len;
-       cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
-
-       if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
-               return -EINVAL;
-
-       cmdrsp->net.type = NET_RCV_POST;
-       cmdrsp->cmdtype = CMD_NET_TYPE;
-       err = visorchannel_signalinsert(devdata->dev->visorchannel,
-                                       IOCHAN_TO_IOPART,
-                                       cmdrsp);
-       if (err) {
-               devdata->chstat.sent_post_failed++;
-               return err;
-       }
-
-       atomic_inc(&devdata->num_rcvbuf_in_iovm);
-       devdata->chstat.sent_post++;
-       return 0;
-}
-
-/* send_enbdis - Send NET_RCV_ENBDIS to IO Partition
- * @netdev:  Netdevice we are enabling/disabling, used as context return value.
- * @state:   Enable = 1/disable = 0.
- * @devdata: Visornic device we are enabling/disabling.
- *
- * Send the enable/disable message to the IO Partition.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int send_enbdis(struct net_device *netdev, int state,
-                      struct visornic_devdata *devdata)
-{
-       int err;
-
-       devdata->cmdrsp_rcv->net.enbdis.enable = state;
-       devdata->cmdrsp_rcv->net.enbdis.context = netdev;
-       devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
-       devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
-       err = visorchannel_signalinsert(devdata->dev->visorchannel,
-                                       IOCHAN_TO_IOPART,
-                                       devdata->cmdrsp_rcv);
-       if (err)
-               return err;
-       devdata->chstat.sent_enbdis++;
-       return 0;
-}
-
-/* visornic_disable_with_timeout - disable network adapter
- * @netdev:  netdevice to disable.
- * @timeout: Timeout to wait for disable.
- *
- * Disable the network adapter and inform the IO Partition that we are disabled.
- * Reclaim memory from rcv bufs.
- *
- * Return: 0 on success, negative integer on failure of IO Partition responding.
- */
-static int visornic_disable_with_timeout(struct net_device *netdev,
-                                        const int timeout)
-{
-       struct visornic_devdata *devdata = netdev_priv(netdev);
-       int i;
-       unsigned long flags;
-       int wait = 0;
-       int err;
-
-       /* send a msg telling the other end we are stopping incoming pkts */
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       devdata->enabled = 0;
-       /* must wait for ack */
-       devdata->enab_dis_acked = 0;
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       /* send disable and wait for ack -- don't hold lock when sending
-        * disable because if the queue is full, insert might sleep.
-        * If an error occurs, don't wait for the timeout.
-        */
-       err = send_enbdis(netdev, 0, devdata);
-       if (err)
-               return err;
-
-       /* wait for ack to arrive before we try to free rcv buffers
-        * NOTE: the other end automatically unposts the rcv buffers
-        * when it gets a disable.
-        */
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
-              (wait < timeout)) {
-               if (devdata->enab_dis_acked)
-                       break;
-               if (devdata->server_down || devdata->server_change_state) {
-                       dev_dbg(&netdev->dev, "%s server went away\n",
-                               __func__);
-                       break;
-               }
-               set_current_state(TASK_INTERRUPTIBLE);
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               wait += schedule_timeout(msecs_to_jiffies(10));
-               spin_lock_irqsave(&devdata->priv_lock, flags);
-       }
-
-       /* Wait for usage to go to 1 (no other users) before freeing
-        * rcv buffers
-        */
-       if (atomic_read(&devdata->usage) > 1) {
-               while (1) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-                       schedule_timeout(msecs_to_jiffies(10));
-                       spin_lock_irqsave(&devdata->priv_lock, flags);
-                       if (atomic_read(&devdata->usage))
-                               break;
-               }
-       }
-       /* we've set enabled to 0, so we can give up the lock. */
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       /* stop the transmit queue so nothing more can be transmitted */
-       netif_stop_queue(netdev);
-
-       napi_disable(&devdata->napi);
-
-       skb_queue_purge(&devdata->xmitbufhead);
-
-       /* Free rcv buffers - other end has automatically unposed them on
-        * disable
-        */
-       for (i = 0; i < devdata->num_rcv_bufs; i++) {
-               if (devdata->rcvbuf[i]) {
-                       kfree_skb(devdata->rcvbuf[i]);
-                       devdata->rcvbuf[i] = NULL;
-               }
-       }
-
-       return 0;
-}
-
-/* init_rcv_bufs - initialize receive buffs and send them to the IO Partition
- * @netdev:  struct netdevice.
- * @devdata: visornic_devdata.
- *
- * Allocate rcv buffers and post them to the IO Partition.
- *
- * Return: 0 on success, negative integer on failure.
- */
-static int init_rcv_bufs(struct net_device *netdev,
-                        struct visornic_devdata *devdata)
-{
-       int i, j, count, err;
-
-       /* allocate fixed number of receive buffers to post to uisnic
-        * post receive buffers after we've allocated a required amount
-        */
-       for (i = 0; i < devdata->num_rcv_bufs; i++) {
-               devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
-               /* if we failed to allocate one let us stop */
-               if (!devdata->rcvbuf[i])
-                       break;
-       }
-       /* couldn't even allocate one -- bail out */
-       if (i == 0)
-               return -ENOMEM;
-       count = i;
-
-       /* Ensure we can alloc 2/3rd of the requested number of buffers.
-        * 2/3 is an arbitrary choice; used also in ndis init.c
-        */
-       if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
-               /* free receive buffers we did alloc and then bail out */
-               for (i = 0; i < count; i++) {
-                       kfree_skb(devdata->rcvbuf[i]);
-                       devdata->rcvbuf[i] = NULL;
-               }
-               return -ENOMEM;
-       }
-
-       /* post receive buffers to receive incoming input - without holding
-        * lock - we've not enabled nor started the queue so there shouldn't
-        * be any rcv or xmit activity
-        */
-       for (i = 0; i < count; i++) {
-               err = post_skb(devdata->cmdrsp_rcv, devdata,
-                              devdata->rcvbuf[i]);
-               if (!err)
-                       continue;
-
-               /* Error handling -
-                * If we posted at least one skb, we should return success,
-                * but need to free the resources that we have not successfully
-                * posted.
-                */
-               for (j = i; j < count; j++) {
-                       kfree_skb(devdata->rcvbuf[j]);
-                       devdata->rcvbuf[j] = NULL;
-               }
-               if (i == 0)
-                       return err;
-               break;
-       }
-
-       return 0;
-}
-
-/* visornic_enable_with_timeout        - send enable to IO Partition
- * @netdev:  struct net_device.
- * @timeout: Time to wait for the ACK from the enable.
- *
- * Sends enable to IOVM and inits, and posts receive buffers to IOVM. Timeout is
- * defined in msecs (timeout of 0 specifies infinite wait).
- *
- * Return: 0 on success, negative integer on failure.
- */
-static int visornic_enable_with_timeout(struct net_device *netdev,
-                                       const int timeout)
-{
-       int err = 0;
-       struct visornic_devdata *devdata = netdev_priv(netdev);
-       unsigned long flags;
-       int wait = 0;
-
-       napi_enable(&devdata->napi);
-
-       /* NOTE: the other end automatically unposts the rcv buffers when it
-        * gets a disable.
-        */
-       err = init_rcv_bufs(netdev, devdata);
-       if (err < 0) {
-               dev_err(&netdev->dev,
-                       "%s failed to init rcv bufs\n", __func__);
-               return err;
-       }
-
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       devdata->enabled = 1;
-       devdata->enab_dis_acked = 0;
-
-       /* now we're ready, let's send an ENB to uisnic but until we get
-        * an ACK back from uisnic, we'll drop the packets
-        */
-       devdata->n_rcv_packets_not_accepted = 0;
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       /* send enable and wait for ack -- don't hold lock when sending enable
-        * because if the queue is full, insert might sleep. If an error
-        * occurs error out.
-        */
-       err = send_enbdis(netdev, 1, devdata);
-       if (err)
-               return err;
-
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
-              (wait < timeout)) {
-               if (devdata->enab_dis_acked)
-                       break;
-               if (devdata->server_down || devdata->server_change_state) {
-                       dev_dbg(&netdev->dev, "%s server went away\n",
-                               __func__);
-                       break;
-               }
-               set_current_state(TASK_INTERRUPTIBLE);
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               wait += schedule_timeout(msecs_to_jiffies(10));
-               spin_lock_irqsave(&devdata->priv_lock, flags);
-       }
-
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       if (!devdata->enab_dis_acked) {
-               dev_err(&netdev->dev, "%s missing ACK\n", __func__);
-               return -EIO;
-       }
-
-       netif_start_queue(netdev);
-       return 0;
-}
-
-/* visornic_timeout_reset - handle xmit timeout resets
- * @work: Work item that scheduled the work.
- *
- * Transmit timeouts are typically handled by resetting the device for our
- * virtual NIC; we will send a disable and enable to the IOVM. If it doesn't
- * respond, we will trigger a serverdown.
- */
-static void visornic_timeout_reset(struct work_struct *work)
-{
-       struct visornic_devdata *devdata;
-       struct net_device *netdev;
-       int response = 0;
-
-       devdata = container_of(work, struct visornic_devdata, timeout_reset);
-       netdev = devdata->netdev;
-
-       rtnl_lock();
-       if (!netif_running(netdev)) {
-               rtnl_unlock();
-               return;
-       }
-
-       response = visornic_disable_with_timeout(netdev,
-                                                VISORNIC_INFINITE_RSP_WAIT);
-       if (response)
-               goto call_serverdown;
-
-       response = visornic_enable_with_timeout(netdev,
-                                               VISORNIC_INFINITE_RSP_WAIT);
-       if (response)
-               goto call_serverdown;
-
-       rtnl_unlock();
-
-       return;
-
-call_serverdown:
-       visornic_serverdown(devdata, NULL);
-       rtnl_unlock();
-}
-
-/* visornic_open - enable the visornic device and mark the queue started
- * @netdev: netdevice to start.
- *
- * Enable the device and start the transmit queue.
- *
- * Return: 0 on success.
- */
-static int visornic_open(struct net_device *netdev)
-{
-       visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
-       return 0;
-}
-
-/* visornic_close - disables the visornic device and stops the queues
- * @netdev: netdevice to stop.
- *
- * Disable the device and stop the transmit queue.
- *
- * Return 0 on success.
- */
-static int visornic_close(struct net_device *netdev)
-{
-       visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
-       return 0;
-}
-
-/* devdata_xmits_outstanding - compute outstanding xmits
- * @devdata: visornic_devdata for device
- *
- * Return: Long integer representing the number of outstanding xmits.
- */
-static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
-{
-       if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
-               return devdata->chstat.sent_xmit -
-                       devdata->chstat.got_xmit_done;
-       return (ULONG_MAX - devdata->chstat.got_xmit_done
-               + devdata->chstat.sent_xmit + 1);
-}
-
-/* vnic_hit_high_watermark
- * @devdata:       Indicates visornic device we are checking.
- * @high_watermark: Max num of unacked xmits we will tolerate before we will
- *                 start throttling.
- *
- * Return: True iff the number of unacked xmits sent to the IO Partition is >=
- *        high_watermark. False otherwise.
- */
-static bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
-                                   ulong high_watermark)
-{
-       return (devdata_xmits_outstanding(devdata) >= high_watermark);
-}
-
-/* vnic_hit_low_watermark
- * @devdata:      Indicates visornic device we are checking.
- * @low_watermark: We will wait until the num of unacked xmits drops to this
- *                value or lower before we start transmitting again.
- *
- * Return: True iff the number of unacked xmits sent to the IO Partition is <=
- *        low_watermark.
- */
-static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
-                                  ulong low_watermark)
-{
-       return (devdata_xmits_outstanding(devdata) <= low_watermark);
-}
-
-/* visornic_xmit - send a packet to the IO Partition
- * @skb:    Packet to be sent.
- * @netdev: Net device the packet is being sent from.
- *
- * Convert the skb to a cmdrsp so the IO Partition can understand it, and send
- * the XMIT command to the IO Partition for processing. This function is
- * protected from concurrent calls by a spinlock xmit_lock in the net_device
- * struct. As soon as the function returns, it can be called again.
- *
- * Return: NETDEV_TX_OK.
- */
-static netdev_tx_t visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
-       struct visornic_devdata *devdata;
-       int len, firstfraglen, padlen;
-       struct uiscmdrsp *cmdrsp = NULL;
-       unsigned long flags;
-       int err;
-
-       devdata = netdev_priv(netdev);
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-
-       if (netif_queue_stopped(netdev) || devdata->server_down ||
-           devdata->server_change_state) {
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               devdata->busy_cnt++;
-               dev_dbg(&netdev->dev,
-                       "%s busy - queue stopped\n", __func__);
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
-       /* sk_buff struct is used to host network data throughout all the
-        * linux network subsystems
-        */
-       len = skb->len;
-
-       /* skb->len is the FULL length of data (including fragmentary portion)
-        * skb->data_len is the length of the fragment portion in frags
-        * skb->len - skb->data_len is size of the 1st fragment in skb->data
-        * calculate the length of the first fragment that skb->data is
-        * pointing to
-        */
-       firstfraglen = skb->len - skb->data_len;
-       if (firstfraglen < ETH_HLEN) {
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               devdata->busy_cnt++;
-               dev_err(&netdev->dev,
-                       "%s busy - first frag too small (%d)\n",
-                       __func__, firstfraglen);
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
-       if (len < ETH_MIN_PACKET_SIZE &&
-           ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
-               /* pad the packet out to minimum size */
-               padlen = ETH_MIN_PACKET_SIZE - len;
-               skb_put_zero(skb, padlen);
-               len += padlen;
-               firstfraglen += padlen;
-       }
-
-       cmdrsp = devdata->xmit_cmdrsp;
-       /* clear cmdrsp */
-       memset(cmdrsp, 0, SIZEOF_CMDRSP);
-       cmdrsp->net.type = NET_XMIT;
-       cmdrsp->cmdtype = CMD_NET_TYPE;
-
-       /* save the pointer to skb -- we'll need it for completion */
-       cmdrsp->net.buf = skb;
-
-       if (vnic_hit_high_watermark(devdata,
-                                   devdata->max_outstanding_net_xmits)) {
-               /* extra NET_XMITs queued over to IOVM - need to wait */
-               devdata->chstat.reject_count++;
-               if (!devdata->queuefullmsg_logged &&
-                   ((devdata->chstat.reject_count & 0x3ff) == 1))
-                       devdata->queuefullmsg_logged = 1;
-               netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               devdata->busy_cnt++;
-               dev_dbg(&netdev->dev,
-                       "%s busy - waiting for iovm to catch up\n",
-                       __func__);
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-       if (devdata->queuefullmsg_logged)
-               devdata->queuefullmsg_logged = 0;
-
-       if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
-               cmdrsp->net.xmt.lincsum.valid = 1;
-               cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
-               if (skb_transport_header(skb) > skb->data) {
-                       cmdrsp->net.xmt.lincsum.hrawoff =
-                               skb_transport_header(skb) - skb->data;
-                       cmdrsp->net.xmt.lincsum.hrawoff = 1;
-               }
-               if (skb_network_header(skb) > skb->data) {
-                       cmdrsp->net.xmt.lincsum.nhrawoff =
-                               skb_network_header(skb) - skb->data;
-                       cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
-               }
-               cmdrsp->net.xmt.lincsum.csum = skb->csum;
-       } else {
-               cmdrsp->net.xmt.lincsum.valid = 0;
-       }
-
-       /* save off the length of the entire data packet */
-       cmdrsp->net.xmt.len = len;
-
-       /* copy ethernet header from first frag into ocmdrsp
-        * - everything else will be pass in frags & DMA'ed
-        */
-       memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN);
-
-       /* copy frags info - from skb->data we need to only provide access
-        * beyond eth header
-        */
-       cmdrsp->net.xmt.num_frags =
-               visor_copy_fragsinfo_from_skb(skb, firstfraglen,
-                                             MAX_PHYS_INFO,
-                                             cmdrsp->net.xmt.frags);
-       if (cmdrsp->net.xmt.num_frags < 0) {
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               devdata->busy_cnt++;
-               dev_err(&netdev->dev,
-                       "%s busy - copy frags failed\n", __func__);
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
-       err = visorchannel_signalinsert(devdata->dev->visorchannel,
-                                       IOCHAN_TO_IOPART, cmdrsp);
-       if (err) {
-               netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               devdata->busy_cnt++;
-               dev_dbg(&netdev->dev,
-                       "%s busy - signalinsert failed\n", __func__);
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
-       /* Track the skbs that have been sent to the IOVM for XMIT */
-       skb_queue_head(&devdata->xmitbufhead, skb);
-
-       /* update xmt stats */
-       devdata->net_stats.tx_packets++;
-       devdata->net_stats.tx_bytes += skb->len;
-       devdata->chstat.sent_xmit++;
-
-       /* check if we have hit the high watermark for netif_stop_queue() */
-       if (vnic_hit_high_watermark(devdata,
-                                   devdata->upper_threshold_net_xmits)) {
-               /* extra NET_XMITs queued over to IOVM - need to wait */
-               /* stop queue - call netif_wake_queue() after lower threshold */
-               netif_stop_queue(netdev);
-               dev_dbg(&netdev->dev,
-                       "%s busy - invoking iovm flow control\n",
-                       __func__);
-               devdata->flow_control_upper_hits++;
-       }
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       /* skb will be freed when we get back NET_XMIT_DONE */
-       return NETDEV_TX_OK;
-}
-
-/* visornic_get_stats - returns net_stats of the visornic device
- * @netdev: netdevice.
- *
- * Return: Pointer to the net_device_stats struct for the device.
- */
-static struct net_device_stats *visornic_get_stats(struct net_device *netdev)
-{
-       struct visornic_devdata *devdata = netdev_priv(netdev);
-
-       return &devdata->net_stats;
-}
-
-/* visornic_change_mtu - changes mtu of device
- * @netdev: netdevice.
- * @new_mtu: Value of new mtu.
- *
- * The device's MTU cannot be changed by system; it must be changed via a
- * CONTROLVM message. All vnics and pnics in a switch have to have the same MTU
- * for everything to work. Currently not supported.
- *
- * Return: -EINVAL.
- */
-static int visornic_change_mtu(struct net_device *netdev, int new_mtu)
-{
-       return -EINVAL;
-}
-
-/* visornic_set_multi - set visornic device flags
- * @netdev: netdevice.
- *
- * The only flag we currently support is IFF_PROMISC.
- */
-static void visornic_set_multi(struct net_device *netdev)
-{
-       struct uiscmdrsp *cmdrsp;
-       struct visornic_devdata *devdata = netdev_priv(netdev);
-       int err = 0;
-
-       if (devdata->old_flags == netdev->flags)
-               return;
-
-       if ((netdev->flags & IFF_PROMISC) ==
-           (devdata->old_flags & IFF_PROMISC))
-               goto out_save_flags;
-
-       cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
-       if (!cmdrsp)
-               return;
-       cmdrsp->cmdtype = CMD_NET_TYPE;
-       cmdrsp->net.type = NET_RCV_PROMISC;
-       cmdrsp->net.enbdis.context = netdev;
-       cmdrsp->net.enbdis.enable =
-               netdev->flags & IFF_PROMISC;
-       err = visorchannel_signalinsert(devdata->dev->visorchannel,
-                                       IOCHAN_TO_IOPART,
-                                       cmdrsp);
-       kfree(cmdrsp);
-       if (err)
-               return;
-
-out_save_flags:
-       devdata->old_flags = netdev->flags;
-}
-
-/* visornic_xmit_timeout - request to timeout the xmit
- * @netdev: netdevice.
- *
- * Queue the work and return. Make sure we have not already been informed that
- * the IO Partition is gone; if so, we will have already timed-out the xmits.
- */
-static void visornic_xmit_timeout(struct net_device *netdev, unsigned int txqueue)
-{
-       struct visornic_devdata *devdata = netdev_priv(netdev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       if (devdata->going_away) {
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               dev_dbg(&devdata->dev->device,
-                       "%s aborting because device removal pending\n",
-                       __func__);
-               return;
-       }
-
-       /* Ensure that a ServerDown message hasn't been received */
-       if (!devdata->enabled ||
-           (devdata->server_down && !devdata->server_change_state)) {
-               dev_dbg(&netdev->dev, "%s no processing\n",
-                       __func__);
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               return;
-       }
-       schedule_work(&devdata->timeout_reset);
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-}
-
-/* repost_return - repost rcv bufs that have come back
- * @cmdrsp: IO channel command struct to post.
- * @devdata: Visornic devdata for the device.
- * @skb: Socket buffer.
- * @netdev: netdevice.
- *
- * Repost rcv buffers that have been returned to us when we are finished
- * with them.
- *
- * Return: 0 for success, negative integer on error.
- */
-static int repost_return(struct uiscmdrsp *cmdrsp,
-                        struct visornic_devdata *devdata,
-                        struct sk_buff *skb, struct net_device *netdev)
-{
-       struct net_pkt_rcv copy;
-       int i = 0, cc, numreposted;
-       int found_skb = 0;
-       int status = 0;
-
-       copy = cmdrsp->net.rcv;
-       switch (copy.numrcvbufs) {
-       case 0:
-               devdata->n_rcv0++;
-               break;
-       case 1:
-               devdata->n_rcv1++;
-               break;
-       case 2:
-               devdata->n_rcv2++;
-               break;
-       default:
-               devdata->n_rcvx++;
-               break;
-       }
-       for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
-               for (i = 0; i < devdata->num_rcv_bufs; i++) {
-                       if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
-                               continue;
-
-                       if ((skb) && devdata->rcvbuf[i] == skb) {
-                               devdata->found_repost_rcvbuf_cnt++;
-                               found_skb = 1;
-                               devdata->repost_found_skb_cnt++;
-                       }
-                       devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
-                       if (!devdata->rcvbuf[i]) {
-                               devdata->num_rcv_bufs_could_not_alloc++;
-                               devdata->alloc_failed_in_repost_rtn_cnt++;
-                               status = -ENOMEM;
-                               break;
-                       }
-                       status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
-                       if (status) {
-                               kfree_skb(devdata->rcvbuf[i]);
-                               devdata->rcvbuf[i] = NULL;
-                               break;
-                       }
-                       numreposted++;
-                       break;
-               }
-       }
-       if (numreposted != copy.numrcvbufs) {
-               devdata->n_repost_deficit++;
-               status = -EINVAL;
-       }
-       if (skb) {
-               if (found_skb) {
-                       kfree_skb(skb);
-               } else {
-                       status = -EINVAL;
-                       devdata->bad_rcv_buf++;
-               }
-       }
-       return status;
-}
-
-/* visornic_rx - handle receive packets coming back from IO Partition
- * @cmdrsp: Receive packet returned from IO Partition.
- *
- * Got a receive packet back from the IO Partition; handle it and send it up
- * the stack.
-
- * Return: 1 iff an skb was received, otherwise 0.
- */
-static int visornic_rx(struct uiscmdrsp *cmdrsp)
-{
-       struct visornic_devdata *devdata;
-       struct sk_buff *skb, *prev, *curr;
-       struct net_device *netdev;
-       int cc, currsize, off;
-       struct ethhdr *eth;
-       unsigned long flags;
-
-       /* post new rcv buf to the other end using the cmdrsp we have at hand
-        * post it without holding lock - but we'll use the signal lock to
-        * synchronize the queue insert the cmdrsp that contains the net.rcv
-        * is the one we are using to repost, so copy the info we need from it.
-        */
-       skb = cmdrsp->net.buf;
-       netdev = skb->dev;
-
-       devdata = netdev_priv(netdev);
-
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       atomic_dec(&devdata->num_rcvbuf_in_iovm);
-
-       /* set length to how much was ACTUALLY received -
-        * NOTE: rcv_done_len includes actual length of data rcvd
-        * including ethhdr
-        */
-       skb->len = cmdrsp->net.rcv.rcv_done_len;
-
-       /* update rcv stats - call it with priv_lock held */
-       devdata->net_stats.rx_packets++;
-       devdata->net_stats.rx_bytes += skb->len;
-
-       /* test enabled while holding lock */
-       if (!(devdata->enabled && devdata->enab_dis_acked)) {
-               /* don't process it unless we're in enable mode and until
-                * we've gotten an ACK saying the other end got our RCV enable
-                */
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               repost_return(cmdrsp, devdata, skb, netdev);
-               return 0;
-       }
-
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       /* when skb was allocated, skb->dev, skb->data, skb->len and
-        * skb->data_len were setup. AND, data has already put into the
-        * skb (both first frag and in frags pages)
-        * NOTE: firstfragslen is the amount of data in skb->data and that
-        * which is not in nr_frags or frag_list. This is now simply
-        * RCVPOST_BUF_SIZE. bump tail to show how much data is in
-        * firstfrag & set data_len to show rest see if we have to chain
-        * frag_list.
-        */
-       /* do PRECAUTIONARY check */
-       if (skb->len > RCVPOST_BUF_SIZE) {
-               if (cmdrsp->net.rcv.numrcvbufs < 2) {
-                       if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
-                               dev_err(&devdata->netdev->dev,
-                                       "repost_return failed");
-                       return 0;
-               }
-               /* length rcvd is greater than firstfrag in this skb rcv buf  */
-               /* amount in skb->data */
-               skb->tail += RCVPOST_BUF_SIZE;
-               /* amount that will be in frag_list */
-               skb->data_len = skb->len - RCVPOST_BUF_SIZE;
-       } else {
-               /* data fits in this skb - no chaining - do
-                * PRECAUTIONARY check
-                */
-               /* should be 1 */
-               if (cmdrsp->net.rcv.numrcvbufs != 1) {
-                       if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
-                               dev_err(&devdata->netdev->dev,
-                                       "repost_return failed");
-                       return 0;
-               }
-               skb->tail += skb->len;
-               /* nothing rcvd in frag_list */
-               skb->data_len = 0;
-       }
-       off = skb_tail_pointer(skb) - skb->data;
-
-       /* amount we bumped tail by in the head skb
-        * it is used to calculate the size of each chained skb below
-        * it is also used to index into bufline to continue the copy
-        * (for chansocktwopc)
-        * if necessary chain the rcv skbs together.
-        * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
-        * chain the rest to that one.
-        * - do PRECAUTIONARY check
-        */
-       if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
-               if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
-                       dev_err(&devdata->netdev->dev, "repost_return failed");
-               return 0;
-       }
-
-       if (cmdrsp->net.rcv.numrcvbufs > 1) {
-               /* chain the various rcv buffers into the skb's frag_list. */
-               /* Note: off was initialized above  */
-               for (cc = 1, prev = NULL;
-                    cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
-                       curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
-                       curr->next = NULL;
-                       /* start of list- set head */
-                       if (!prev)
-                               skb_shinfo(skb)->frag_list = curr;
-                       else
-                               prev->next = curr;
-                       prev = curr;
-
-                       /* should we set skb->len and skb->data_len for each
-                        * buffer being chained??? can't hurt!
-                        */
-                       currsize = min(skb->len - off,
-                                      (unsigned int)RCVPOST_BUF_SIZE);
-                       curr->len = currsize;
-                       curr->tail += currsize;
-                       curr->data_len = 0;
-                       off += currsize;
-               }
-               /* assert skb->len == off */
-               if (skb->len != off) {
-                       netdev_err(devdata->netdev,
-                                  "something wrong; skb->len:%d != off:%d\n",
-                                  skb->len, off);
-               }
-       }
-
-       /* set up packet's protocol type using ethernet header - this
-        * sets up skb->pkt_type & it also PULLS out the eth header
-        */
-       skb->protocol = eth_type_trans(skb, netdev);
-       eth = eth_hdr(skb);
-       skb->csum = 0;
-       skb->ip_summed = CHECKSUM_NONE;
-
-       do {
-               /* accept all packets */
-               if (netdev->flags & IFF_PROMISC)
-                       break;
-               if (skb->pkt_type == PACKET_BROADCAST) {
-                       /* accept all broadcast packets */
-                       if (netdev->flags & IFF_BROADCAST)
-                               break;
-               } else if (skb->pkt_type == PACKET_MULTICAST) {
-                       if ((netdev->flags & IFF_MULTICAST) &&
-                           (netdev_mc_count(netdev))) {
-                               struct netdev_hw_addr *ha;
-                               int found_mc = 0;
-
-                               /* only accept multicast packets that we can
-                                * find in our multicast address list
-                                */
-                               netdev_for_each_mc_addr(ha, netdev) {
-                                       if (ether_addr_equal(eth->h_dest,
-                                                            ha->addr)) {
-                                               found_mc = 1;
-                                               break;
-                                       }
-                               }
-                               /* accept pkt, dest matches a multicast addr */
-                               if (found_mc)
-                                       break;
-                       }
-               /* accept packet, h_dest must match vnic  mac address */
-               } else if (skb->pkt_type == PACKET_HOST) {
-                       break;
-               } else if (skb->pkt_type == PACKET_OTHERHOST) {
-                       /* something is not right */
-                       dev_err(&devdata->netdev->dev,
-                               "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
-                               netdev->name, eth->h_dest, netdev->dev_addr);
-               }
-               /* drop packet - don't forward it up to OS */
-               devdata->n_rcv_packets_not_accepted++;
-               repost_return(cmdrsp, devdata, skb, netdev);
-               return 0;
-       } while (0);
-
-       netif_receive_skb(skb);
-       /* netif_rx returns various values, but "in practice most drivers
-        * ignore the return value
-        */
-
-       skb = NULL;
-       /* whether the packet got dropped or handled, the skb is freed by
-        * kernel code, so we shouldn't free it. but we should repost a
-        * new rcv buffer.
-        */
-       repost_return(cmdrsp, devdata, skb, netdev);
-       return 1;
-}
-
-/* devdata_initialize - initialize devdata structure
- * @devdata: visornic_devdata structure to initialize.
- * @dev:     visorbus_device it belongs to.
- *
- * Setup initial values for the visornic, based on channel and default values.
- *
- * Return: A pointer to the devdata structure.
- */
-static struct visornic_devdata *devdata_initialize(
-                                       struct visornic_devdata *devdata,
-                                       struct visor_device *dev)
-{
-       devdata->dev = dev;
-       devdata->incarnation_id = get_jiffies_64();
-       return devdata;
-}
-
-/* devdata_release - free up references in devdata
- * @devdata: Struct to clean up.
- */
-static void devdata_release(struct visornic_devdata *devdata)
-{
-       kfree(devdata->rcvbuf);
-       kfree(devdata->cmdrsp_rcv);
-       kfree(devdata->xmit_cmdrsp);
-}
-
-static const struct net_device_ops visornic_dev_ops = {
-       .ndo_open = visornic_open,
-       .ndo_stop = visornic_close,
-       .ndo_start_xmit = visornic_xmit,
-       .ndo_get_stats = visornic_get_stats,
-       .ndo_change_mtu = visornic_change_mtu,
-       .ndo_tx_timeout = visornic_xmit_timeout,
-       .ndo_set_rx_mode = visornic_set_multi,
-};
-
-/* DebugFS code */
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
-                                size_t len, loff_t *offset)
-{
-       ssize_t bytes_read = 0;
-       int str_pos = 0;
-       struct visornic_devdata *devdata;
-       struct net_device *dev;
-       char *vbuf;
-
-       if (len > MAX_BUF)
-               len = MAX_BUF;
-       vbuf = kzalloc(len, GFP_KERNEL);
-       if (!vbuf)
-               return -ENOMEM;
-
-       /* for each vnic channel dump out channel specific data */
-       rcu_read_lock();
-       for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
-               /* Only consider netdevs that are visornic, and are open */
-               if (dev->netdev_ops != &visornic_dev_ops ||
-                   (!netif_queue_stopped(dev)))
-                       continue;
-
-               devdata = netdev_priv(dev);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    "netdev = %s (0x%p), MAC Addr %pM\n",
-                                    dev->name,
-                                    dev,
-                                    dev->dev_addr);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    "VisorNic Dev Info = 0x%p\n", devdata);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " num_rcv_bufs = %d\n",
-                                    devdata->num_rcv_bufs);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " max_outstanding_next_xmits = %lu\n",
-                                   devdata->max_outstanding_net_xmits);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " upper_threshold_net_xmits = %lu\n",
-                                    devdata->upper_threshold_net_xmits);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " lower_threshold_net_xmits = %lu\n",
-                                    devdata->lower_threshold_net_xmits);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " queuefullmsg_logged = %d\n",
-                                    devdata->queuefullmsg_logged);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.got_rcv = %lu\n",
-                                    devdata->chstat.got_rcv);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.got_enbdisack = %lu\n",
-                                    devdata->chstat.got_enbdisack);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.got_xmit_done = %lu\n",
-                                    devdata->chstat.got_xmit_done);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.xmit_fail = %lu\n",
-                                    devdata->chstat.xmit_fail);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.sent_enbdis = %lu\n",
-                                    devdata->chstat.sent_enbdis);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.sent_promisc = %lu\n",
-                                    devdata->chstat.sent_promisc);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.sent_post = %lu\n",
-                                    devdata->chstat.sent_post);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.sent_post_failed = %lu\n",
-                                    devdata->chstat.sent_post_failed);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.sent_xmit = %lu\n",
-                                    devdata->chstat.sent_xmit);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.reject_count = %lu\n",
-                                    devdata->chstat.reject_count);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " chstat.extra_rcvbufs_sent = %lu\n",
-                                    devdata->chstat.extra_rcvbufs_sent);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " n_rcv0 = %lu\n", devdata->n_rcv0);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " n_rcv1 = %lu\n", devdata->n_rcv1);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " n_rcv2 = %lu\n", devdata->n_rcv2);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " n_rcvx = %lu\n", devdata->n_rcvx);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " num_rcvbuf_in_iovm = %d\n",
-                                    atomic_read(&devdata->num_rcvbuf_in_iovm));
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " alloc_failed_in_if_needed_cnt = %lu\n",
-                                    devdata->alloc_failed_in_if_needed_cnt);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " alloc_failed_in_repost_rtn_cnt = %lu\n",
-                                    devdata->alloc_failed_in_repost_rtn_cnt);
-               /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                *                   " inner_loop_limit_reached_cnt = %lu\n",
-                *                   devdata->inner_loop_limit_reached_cnt);
-                */
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " found_repost_rcvbuf_cnt = %lu\n",
-                                    devdata->found_repost_rcvbuf_cnt);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " repost_found_skb_cnt = %lu\n",
-                                    devdata->repost_found_skb_cnt);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " n_repost_deficit = %lu\n",
-                                    devdata->n_repost_deficit);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " bad_rcv_buf = %lu\n",
-                                    devdata->bad_rcv_buf);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " n_rcv_packets_not_accepted = %lu\n",
-                                    devdata->n_rcv_packets_not_accepted);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " interrupts_rcvd = %llu\n",
-                                    devdata->interrupts_rcvd);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " interrupts_notme = %llu\n",
-                                    devdata->interrupts_notme);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " interrupts_disabled = %llu\n",
-                                    devdata->interrupts_disabled);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " busy_cnt = %llu\n",
-                                    devdata->busy_cnt);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " flow_control_upper_hits = %llu\n",
-                                    devdata->flow_control_upper_hits);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " flow_control_lower_hits = %llu\n",
-                                    devdata->flow_control_lower_hits);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " netif_queue = %s\n",
-                                    netif_queue_stopped(devdata->netdev) ?
-                                    "stopped" : "running");
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                                    " xmits_outstanding = %lu\n",
-                                    devdata_xmits_outstanding(devdata));
-       }
-       rcu_read_unlock();
-       bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
-       kfree(vbuf);
-       return bytes_read;
-}
-
-static struct dentry *visornic_debugfs_dir;
-static const struct file_operations debugfs_info_fops = {
-       .read = info_debugfs_read,
-};
-
-/* send_rcv_posts_if_needed - send receive buffers to the IO Partition.
- * @devdata: Visornic device.
- */
-static void send_rcv_posts_if_needed(struct visornic_devdata *devdata)
-{
-       int i;
-       struct net_device *netdev;
-       struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
-       int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
-       int err;
-
-       /* don't do this until vnic is marked ready */
-       if (!(devdata->enabled && devdata->enab_dis_acked))
-               return;
-
-       netdev = devdata->netdev;
-       rcv_bufs_allocated = 0;
-       /* this code is trying to prevent getting stuck here forever,
-        * but still retry it if you can't allocate them all this time.
-        */
-       cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
-       while (cur_num_rcv_bufs_to_alloc > 0) {
-               cur_num_rcv_bufs_to_alloc--;
-               for (i = 0; i < devdata->num_rcv_bufs; i++) {
-                       if (devdata->rcvbuf[i])
-                               continue;
-                       devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
-                       if (!devdata->rcvbuf[i]) {
-                               devdata->alloc_failed_in_if_needed_cnt++;
-                               break;
-                       }
-                       rcv_bufs_allocated++;
-                       err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
-                       if (err) {
-                               kfree_skb(devdata->rcvbuf[i]);
-                               devdata->rcvbuf[i] = NULL;
-                               break;
-                       }
-                       devdata->chstat.extra_rcvbufs_sent++;
-               }
-       }
-       devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
-}
-
-/* drain_resp_queue - drains and ignores all messages from the resp queue
- * @cmdrsp:  IO channel command response message.
- * @devdata: Visornic device to drain.
- */
-static void drain_resp_queue(struct uiscmdrsp *cmdrsp,
-                            struct visornic_devdata *devdata)
-{
-       while (!visorchannel_signalremove(devdata->dev->visorchannel,
-                                         IOCHAN_FROM_IOPART,
-                                         cmdrsp))
-               ;
-}
-
-/* service_resp_queue - drain the response queue
- * @cmdrsp:  IO channel command response message.
- * @devdata: Visornic device to drain.
- * @rx_work_done:
- * @budget:
- *
- * Drain the response queue of any responses from the IO Partition. Process the
- * responses as we get them.
- */
-static void service_resp_queue(struct uiscmdrsp *cmdrsp,
-                              struct visornic_devdata *devdata,
-                              int *rx_work_done, int budget)
-{
-       unsigned long flags;
-       struct net_device *netdev;
-
-       while (*rx_work_done < budget) {
-               /* TODO: CLIENT ACQUIRE -- Don't really need this at the
-                * moment
-                */
-               /* queue empty */
-               if (visorchannel_signalremove(devdata->dev->visorchannel,
-                                             IOCHAN_FROM_IOPART,
-                                             cmdrsp))
-                       break;
-
-               switch (cmdrsp->net.type) {
-               case NET_RCV:
-                       devdata->chstat.got_rcv++;
-                       /* process incoming packet */
-                       *rx_work_done += visornic_rx(cmdrsp);
-                       break;
-               case NET_XMIT_DONE:
-                       spin_lock_irqsave(&devdata->priv_lock, flags);
-                       devdata->chstat.got_xmit_done++;
-                       if (cmdrsp->net.xmtdone.xmt_done_result)
-                               devdata->chstat.xmit_fail++;
-                       /* only call queue wake if we stopped it */
-                       netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
-                       /* ASSERT netdev == vnicinfo->netdev; */
-                       if (netdev == devdata->netdev &&
-                           netif_queue_stopped(netdev)) {
-                               /* check if we have crossed the lower watermark
-                                * for netif_wake_queue()
-                                */
-                               if (vnic_hit_low_watermark
-                                   (devdata,
-                                    devdata->lower_threshold_net_xmits)) {
-                                       /* enough NET_XMITs completed
-                                        * so can restart netif queue
-                                        */
-                                       netif_wake_queue(netdev);
-                                       devdata->flow_control_lower_hits++;
-                               }
-                       }
-                       skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
-                       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-                       kfree_skb(cmdrsp->net.buf);
-                       break;
-               case NET_RCV_ENBDIS_ACK:
-                       devdata->chstat.got_enbdisack++;
-                       netdev = (struct net_device *)
-                       cmdrsp->net.enbdis.context;
-                       spin_lock_irqsave(&devdata->priv_lock, flags);
-                       devdata->enab_dis_acked = 1;
-                       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-                       if (devdata->server_down &&
-                           devdata->server_change_state) {
-                               /* Inform Linux that the link is up */
-                               devdata->server_down = false;
-                               devdata->server_change_state = false;
-                               netif_wake_queue(netdev);
-                               netif_carrier_on(netdev);
-                       }
-                       break;
-               case NET_CONNECT_STATUS:
-                       netdev = devdata->netdev;
-                       if (cmdrsp->net.enbdis.enable == 1) {
-                               spin_lock_irqsave(&devdata->priv_lock, flags);
-                               devdata->enabled = cmdrsp->net.enbdis.enable;
-                               spin_unlock_irqrestore(&devdata->priv_lock,
-                                                      flags);
-                               netif_wake_queue(netdev);
-                               netif_carrier_on(netdev);
-                       } else {
-                               netif_stop_queue(netdev);
-                               netif_carrier_off(netdev);
-                               spin_lock_irqsave(&devdata->priv_lock, flags);
-                               devdata->enabled = cmdrsp->net.enbdis.enable;
-                               spin_unlock_irqrestore(&devdata->priv_lock,
-                                                      flags);
-                       }
-                       break;
-               default:
-                       break;
-               }
-               /* cmdrsp is now available for reuse  */
-       }
-}
-
-static int visornic_poll(struct napi_struct *napi, int budget)
-{
-       struct visornic_devdata *devdata = container_of(napi,
-                                                       struct visornic_devdata,
-                                                       napi);
-       int rx_count = 0;
-
-       send_rcv_posts_if_needed(devdata);
-       service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
-
-       /* If there aren't any more packets to receive stop the poll */
-       if (rx_count < budget)
-               napi_complete_done(napi, rx_count);
-
-       return rx_count;
-}
-
-/* visornic_channel_interrupt  - checks the status of the response queue
- *
- * Main function of the vnic_incoming thread. Periodically check the response
- * queue and drain it if needed.
- */
-static void visornic_channel_interrupt(struct visor_device *dev)
-{
-       struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       if (!devdata)
-               return;
-
-       if (!visorchannel_signalempty(devdata->dev->visorchannel,
-                                     IOCHAN_FROM_IOPART))
-               napi_schedule(&devdata->napi);
-
-       atomic_set(&devdata->interrupt_rcvd, 0);
-}
-
-/* visornic_probe - probe function for visornic devices
- * @dev: The visor device discovered.
- *
- * Called when visorbus discovers a visornic device on its bus. It creates a new
- * visornic ethernet adapter.
- *
- * Return: 0 on success, or negative integer on error.
- */
-static int visornic_probe(struct visor_device *dev)
-{
-       struct visornic_devdata *devdata = NULL;
-       struct net_device *netdev = NULL;
-       int err;
-       int channel_offset = 0;
-       u8 addr[ETH_ALEN];
-       u64 features;
-
-       netdev = alloc_etherdev(sizeof(struct visornic_devdata));
-       if (!netdev) {
-               dev_err(&dev->device,
-                       "%s alloc_etherdev failed\n", __func__);
-               return -ENOMEM;
-       }
-
-       netdev->netdev_ops = &visornic_dev_ops;
-       netdev->watchdog_timeo = 5 * HZ;
-       SET_NETDEV_DEV(netdev, &dev->device);
-
-       /* Get MAC address from channel and read it into the device. */
-       netdev->addr_len = ETH_ALEN;
-       channel_offset = offsetof(struct visor_io_channel, vnic.macaddr);
-       err = visorbus_read_channel(dev, channel_offset, addr, ETH_ALEN);
-       if (err < 0) {
-               dev_err(&dev->device,
-                       "%s failed to get mac addr from chan (%d)\n",
-                       __func__, err);
-               goto cleanup_netdev;
-       }
-       eth_hw_addr_set(netdev, addr);
-
-       devdata = devdata_initialize(netdev_priv(netdev), dev);
-       if (!devdata) {
-               dev_err(&dev->device,
-                       "%s devdata_initialize failed\n", __func__);
-               err = -ENOMEM;
-               goto cleanup_netdev;
-       }
-       /* don't trust messages laying around in the channel */
-       drain_resp_queue(devdata->cmdrsp, devdata);
-
-       devdata->netdev = netdev;
-       dev_set_drvdata(&dev->device, devdata);
-       init_waitqueue_head(&devdata->rsp_queue);
-       spin_lock_init(&devdata->priv_lock);
-       /* not yet */
-       devdata->enabled = 0;
-       atomic_set(&devdata->usage, 1);
-
-       /* Setup rcv bufs */
-       channel_offset = offsetof(struct visor_io_channel, vnic.num_rcv_bufs);
-       err = visorbus_read_channel(dev, channel_offset,
-                                   &devdata->num_rcv_bufs, 4);
-       if (err) {
-               dev_err(&dev->device,
-                       "%s failed to get #rcv bufs from chan (%d)\n",
-                       __func__, err);
-               goto cleanup_netdev;
-       }
-
-       devdata->rcvbuf = kcalloc(devdata->num_rcv_bufs,
-                                 sizeof(struct sk_buff *), GFP_KERNEL);
-       if (!devdata->rcvbuf) {
-               err = -ENOMEM;
-               goto cleanup_netdev;
-       }
-
-       /* set the net_xmit outstanding threshold
-        * always leave two slots open but you should have 3 at a minimum
-        * note that max_outstanding_net_xmits must be > 0
-        */
-       devdata->max_outstanding_net_xmits =
-               max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
-       devdata->upper_threshold_net_xmits =
-               max_t(unsigned long,
-                     2, (devdata->max_outstanding_net_xmits - 1));
-       devdata->lower_threshold_net_xmits =
-               max_t(unsigned long,
-                     1, (devdata->max_outstanding_net_xmits / 2));
-
-       skb_queue_head_init(&devdata->xmitbufhead);
-
-       /* create a cmdrsp we can use to post and unpost rcv buffers */
-       devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_KERNEL);
-       if (!devdata->cmdrsp_rcv) {
-               err = -ENOMEM;
-               goto cleanup_rcvbuf;
-       }
-       devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_KERNEL);
-       if (!devdata->xmit_cmdrsp) {
-               err = -ENOMEM;
-               goto cleanup_cmdrsp_rcv;
-       }
-       INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
-       devdata->server_down = false;
-       devdata->server_change_state = false;
-
-       /*set the default mtu */
-       channel_offset = offsetof(struct visor_io_channel, vnic.mtu);
-       err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
-       if (err) {
-               dev_err(&dev->device,
-                       "%s failed to get mtu from chan (%d)\n",
-                       __func__, err);
-               goto cleanup_xmit_cmdrsp;
-       }
-
-       /* TODO: Setup Interrupt information */
-       /* Let's start our threads to get responses */
-       netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
-
-       channel_offset = offsetof(struct visor_io_channel,
-                                 channel_header.features);
-       err = visorbus_read_channel(dev, channel_offset, &features, 8);
-       if (err) {
-               dev_err(&dev->device,
-                       "%s failed to get features from chan (%d)\n",
-                       __func__, err);
-               goto cleanup_napi_add;
-       }
-
-       features |= VISOR_CHANNEL_IS_POLLING;
-       features |= VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING;
-       err = visorbus_write_channel(dev, channel_offset, &features, 8);
-       if (err) {
-               dev_err(&dev->device,
-                       "%s failed to set features in chan (%d)\n",
-                       __func__, err);
-               goto cleanup_napi_add;
-       }
-
-       /* Note: Interrupts have to be enable before the while
-        * loop below because the napi routine is responsible for
-        * setting enab_dis_acked
-        */
-       visorbus_enable_channel_interrupts(dev);
-
-       err = register_netdev(netdev);
-       if (err) {
-               dev_err(&dev->device,
-                       "%s register_netdev failed (%d)\n", __func__, err);
-               goto cleanup_napi_add;
-       }
-
-       /* create debug/sysfs directories */
-       devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
-                                                     visornic_debugfs_dir);
-       if (!devdata->eth_debugfs_dir) {
-               dev_err(&dev->device,
-                       "%s debugfs_create_dir %s failed\n",
-                       __func__, netdev->name);
-               err = -ENOMEM;
-               goto cleanup_register_netdev;
-       }
-
-       dev_info(&dev->device, "%s success netdev=%s\n",
-                __func__, netdev->name);
-       return 0;
-
-cleanup_register_netdev:
-       unregister_netdev(netdev);
-
-cleanup_napi_add:
-       visorbus_disable_channel_interrupts(dev);
-       netif_napi_del(&devdata->napi);
-
-cleanup_xmit_cmdrsp:
-       kfree(devdata->xmit_cmdrsp);
-
-cleanup_cmdrsp_rcv:
-       kfree(devdata->cmdrsp_rcv);
-
-cleanup_rcvbuf:
-       kfree(devdata->rcvbuf);
-
-cleanup_netdev:
-       free_netdev(netdev);
-       return err;
-}
-
-/* host_side_disappeared - IO Partition is gone
- * @devdata: Device object.
- *
- * IO partition servicing this device is gone; do cleanup.
- */
-static void host_side_disappeared(struct visornic_devdata *devdata)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       /* indicate device destroyed */
-       devdata->dev = NULL;
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-}
-
-/* visornic_remove - called when visornic dev goes away
- * @dev: Visornic device that is being removed.
- *
- * Called when DEVICE_DESTROY gets called to remove device.
- */
-static void visornic_remove(struct visor_device *dev)
-{
-       struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
-       struct net_device *netdev;
-       unsigned long flags;
-
-       if (!devdata) {
-               dev_err(&dev->device, "%s no devdata\n", __func__);
-               return;
-       }
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       if (devdata->going_away) {
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               dev_err(&dev->device, "%s already being removed\n", __func__);
-               return;
-       }
-       devdata->going_away = true;
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-       netdev = devdata->netdev;
-       if (!netdev) {
-               dev_err(&dev->device, "%s not net device\n", __func__);
-               return;
-       }
-
-       /* going_away prevents new items being added to the workqueues */
-       cancel_work_sync(&devdata->timeout_reset);
-
-       debugfs_remove_recursive(devdata->eth_debugfs_dir);
-       /* this will call visornic_close() */
-       unregister_netdev(netdev);
-
-       visorbus_disable_channel_interrupts(devdata->dev);
-       netif_napi_del(&devdata->napi);
-
-       dev_set_drvdata(&dev->device, NULL);
-       host_side_disappeared(devdata);
-       devdata_release(devdata);
-       free_netdev(netdev);
-}
-
-/* visornic_pause - called when IO Part disappears
- * @dev:          Visornic device that is being serviced.
- * @complete_func: Call when finished.
- *
- * Called when the IO Partition has gone down. Need to free up resources and
- * wait for IO partition to come back. Mark link as down and don't attempt any
- * DMA. When we have freed memory, call the complete_func so that Command knows
- * we are done. If we don't call complete_func, the IO Partition will never
- * come back.
- *
- * Return: 0 on success.
- */
-static int visornic_pause(struct visor_device *dev,
-                         visorbus_state_complete_func complete_func)
-{
-       struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
-
-       visornic_serverdown(devdata, complete_func);
-       return 0;
-}
-
-/* visornic_resume - called when IO Partition has recovered
- * @dev:          Visornic device that is being serviced.
- * @compelte_func: Call when finished.
- *
- * Called when the IO partition has recovered. Re-establish connection to the IO
- * Partition and set the link up. Okay to do DMA again.
- *
- * Returns 0 for success, negative integer on error.
- */
-static int visornic_resume(struct visor_device *dev,
-                          visorbus_state_complete_func complete_func)
-{
-       struct visornic_devdata *devdata;
-       struct net_device *netdev;
-       unsigned long flags;
-
-       devdata = dev_get_drvdata(&dev->device);
-       if (!devdata) {
-               dev_err(&dev->device, "%s no devdata\n", __func__);
-               return -EINVAL;
-       }
-
-       netdev = devdata->netdev;
-
-       spin_lock_irqsave(&devdata->priv_lock, flags);
-       if (devdata->server_change_state) {
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               dev_err(&dev->device, "%s server already changing state\n",
-                       __func__);
-               return -EINVAL;
-       }
-       if (!devdata->server_down) {
-               spin_unlock_irqrestore(&devdata->priv_lock, flags);
-               dev_err(&dev->device, "%s server not down\n", __func__);
-               complete_func(dev, 0);
-               return 0;
-       }
-       devdata->server_change_state = true;
-       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-       /* Must transition channel to ATTACHED state BEFORE
-        * we can start using the device again.
-        * TODO: State transitions
-        */
-       visorbus_enable_channel_interrupts(dev);
-
-       rtnl_lock();
-       dev_open(netdev, NULL);
-       rtnl_unlock();
-
-       complete_func(dev, 0);
-       return 0;
-}
-
-/* This is used to tell the visorbus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visornic_driver = {
-       .name = "visornic",
-       .owner = THIS_MODULE,
-       .channel_types = visornic_channel_types,
-       .probe = visornic_probe,
-       .remove = visornic_remove,
-       .pause = visornic_pause,
-       .resume = visornic_resume,
-       .channel_interrupt = visornic_channel_interrupt,
-};
-
-/* visornic_init - init function
- *
- * Init function for the visornic driver. Do initial driver setup and wait
- * for devices.
- *
- * Return: 0 on success, negative integer on error.
- */
-static int visornic_init(void)
-{
-       int err;
-
-       visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
-
-       debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
-                           &debugfs_info_fops);
-       debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir, NULL,
-                           &debugfs_enable_ints_fops);
-
-       err = visorbus_register_visor_driver(&visornic_driver);
-       if (err)
-               debugfs_remove_recursive(visornic_debugfs_dir);
-
-       return err;
-}
-
-/* visornic_cleanup - driver exit routine
- *
- * Unregister driver from the bus and free up memory.
- */
-static void visornic_cleanup(void)
-{
-       visorbus_unregister_visor_driver(&visornic_driver);
-       debugfs_remove_recursive(visornic_debugfs_dir);
-}
-
-module_init(visornic_init);
-module_exit(visornic_cleanup);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par NIC driver for virtual network devices");
index cb7c824..31e58c9 100644 (file)
@@ -13,6 +13,7 @@ if BCM_VIDEOCORE
 
 config BCM2835_VCHIQ
        tristate "BCM2835 VCHIQ"
+       depends on HAS_DMA
        imply VCHIQ_CDEV
        help
                Broadcom BCM2835 and similar SoCs have a VPU called VideoCore. This config
index d32ea34..7f22f6c 100644 (file)
@@ -3,7 +3,9 @@ config SND_BCM2835
        tristate "BCM2835 Audio"
        depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
        select SND_PCM
-       select BCM2835_VCHIQ
+       select BCM2835_VCHIQ if HAS_DMA
        help
-         Say Y or M if you want to support BCM2835 built in audio
-
+         Say Y or M if you want to support BCM2835 built in audio.
+         This driver handles both 3.5mm and HDMI audio, by leveraging
+         the VCHIQ messaging interface between the kernel and the firmware
+         running on VideoCore.
\ No newline at end of file
diff --git a/drivers/staging/vc04_services/bcm2835-audio/TODO b/drivers/staging/vc04_services/bcm2835-audio/TODO
deleted file mode 100644 (file)
index b854512..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-*****************************************************************************
-*                                                                           *
-*                           TODO: BCM2835-AUDIO                             *
-*                                                                           *
-*****************************************************************************
-
-1) Revisit multi-cards options and PCM route mixer control (as per comment
-https://lore.kernel.org/lkml/s5hd0to5598.wl-tiwai@suse.de)
-
-2) Fix the remaining checkpatch.pl errors and warnings.
index 3703409..1c1f040 100644 (file)
@@ -117,15 +117,6 @@ static const struct snd_kcontrol_new snd_bcm2835_ctl[] = {
                .get = snd_bcm2835_ctl_get,
                .put = snd_bcm2835_ctl_put,
        },
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "PCM Playback Route",
-               .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
-               .private_value = PCM_PLAYBACK_DEVICE,
-               .info = snd_bcm2835_ctl_info,
-               .get = snd_bcm2835_ctl_get,
-               .put = snd_bcm2835_ctl_put,
-       },
 };
 
 static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
@@ -220,7 +211,14 @@ static int create_ctls(struct bcm2835_chip *chip, size_t size,
        return 0;
 }
 
-int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
+int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
+{
+       strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
+       return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl),
+                          snd_bcm2835_ctl);
+}
+
+int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
 {
        int err;
 
@@ -232,71 +230,3 @@ int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
                           snd_bcm2835_spdif);
 }
 
-static const struct snd_kcontrol_new snd_bcm2835_headphones_ctl[] = {
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Headphone Playback Volume",
-               .index = 0,
-               .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
-                         SNDRV_CTL_ELEM_ACCESS_TLV_READ,
-               .private_value = PCM_PLAYBACK_VOLUME,
-               .info = snd_bcm2835_ctl_info,
-               .get = snd_bcm2835_ctl_get,
-               .put = snd_bcm2835_ctl_put,
-               .count = 1,
-               .tlv = {.p = snd_bcm2835_db_scale}
-       },
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Headphone Playback Switch",
-               .index = 0,
-               .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
-               .private_value = PCM_PLAYBACK_MUTE,
-               .info = snd_bcm2835_ctl_info,
-               .get = snd_bcm2835_ctl_get,
-               .put = snd_bcm2835_ctl_put,
-               .count = 1,
-       }
-};
-
-int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
-{
-       strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
-       return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_headphones_ctl),
-                          snd_bcm2835_headphones_ctl);
-}
-
-static const struct snd_kcontrol_new snd_bcm2835_hdmi[] = {
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "HDMI Playback Volume",
-               .index = 0,
-               .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
-                         SNDRV_CTL_ELEM_ACCESS_TLV_READ,
-               .private_value = PCM_PLAYBACK_VOLUME,
-               .info = snd_bcm2835_ctl_info,
-               .get = snd_bcm2835_ctl_get,
-               .put = snd_bcm2835_ctl_put,
-               .count = 1,
-               .tlv = {.p = snd_bcm2835_db_scale}
-       },
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "HDMI Playback Switch",
-               .index = 0,
-               .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
-               .private_value = PCM_PLAYBACK_MUTE,
-               .info = snd_bcm2835_ctl_info,
-               .get = snd_bcm2835_ctl_get,
-               .put = snd_bcm2835_ctl_put,
-               .count = 1,
-       }
-};
-
-int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
-{
-       strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
-       return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_hdmi),
-                          snd_bcm2835_hdmi);
-}
-
index f2ef1d6..68e8d49 100644 (file)
@@ -82,8 +82,7 @@ void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream,
 }
 
 /* open callback */
-static int snd_bcm2835_playback_open_generic(
-       struct snd_pcm_substream *substream, int spdif)
+static int snd_bcm2835_playback_open_generic(struct snd_pcm_substream *substream, int spdif)
 {
        struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
        struct snd_pcm_runtime *runtime = substream->runtime;
@@ -237,7 +236,7 @@ static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
-       void *src = (void *) (substream->runtime->dma_area + rec->sw_data);
+       void *src = (void *)(substream->runtime->dma_area + rec->sw_data);
 
        bcm2835_audio_write(alsa_stream, bytes, src);
 }
index d567a2e..e429b33 100644 (file)
@@ -11,7 +11,7 @@ struct bcm2835_audio_instance {
        struct device *dev;
        unsigned int service_handle;
        struct completion msg_avail_comp;
-       struct mutex vchi_mutex;
+       struct mutex vchi_mutex; /* Serialize vchiq access */
        struct bcm2835_alsa_stream *alsa_stream;
        int result;
        unsigned int max_packet;
index 628732d..00bc898 100644 (file)
 #include "bcm2835.h"
 
 static bool enable_hdmi;
-static bool enable_headphones;
-static bool enable_compat_alsa = true;
+static bool enable_headphones = true;
 static int num_channels = MAX_SUBSTREAMS;
 
 module_param(enable_hdmi, bool, 0444);
 MODULE_PARM_DESC(enable_hdmi, "Enables HDMI virtual audio device");
 module_param(enable_headphones, bool, 0444);
 MODULE_PARM_DESC(enable_headphones, "Enables Headphones virtual audio device");
-module_param(enable_compat_alsa, bool, 0444);
-MODULE_PARM_DESC(enable_compat_alsa,
-                "Enables ALSA compatibility virtual audio device");
 module_param(num_channels, int, 0644);
 MODULE_PARM_DESC(num_channels, "Number of audio channels (default: 8)");
 
@@ -63,19 +59,20 @@ struct bcm2835_audio_driver {
        enum snd_bcm2835_route route;
 };
 
-static int bcm2835_audio_alsa_newpcm(struct bcm2835_chip *chip,
+static int bcm2835_audio_dual_newpcm(struct bcm2835_chip *chip,
                                     const char *name,
                                     enum snd_bcm2835_route route,
                                     u32 numchannels)
 {
        int err;
 
-       err = snd_bcm2835_new_pcm(chip, "bcm2835 ALSA", 0, AUDIO_DEST_AUTO,
-                                 numchannels - 1, false);
+       err = snd_bcm2835_new_pcm(chip, name, 0, route,
+                                 numchannels, false);
+
        if (err)
                return err;
 
-       err = snd_bcm2835_new_pcm(chip, "bcm2835 IEC958/HDMI", 1, 0, 1, true);
+       err = snd_bcm2835_new_pcm(chip, "IEC958", 1, route, 1, true);
        if (err)
                return err;
 
@@ -90,18 +87,6 @@ static int bcm2835_audio_simple_newpcm(struct bcm2835_chip *chip,
        return snd_bcm2835_new_pcm(chip, name, 0, route, numchannels, false);
 }
 
-static struct bcm2835_audio_driver bcm2835_audio_alsa = {
-       .driver = {
-               .name = "bcm2835_alsa",
-               .owner = THIS_MODULE,
-       },
-       .shortname = "bcm2835 ALSA",
-       .longname  = "bcm2835 ALSA",
-       .minchannels = 2,
-       .newpcm = bcm2835_audio_alsa_newpcm,
-       .newctl = snd_bcm2835_new_ctl,
-};
-
 static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
        .driver = {
                .name = "bcm2835_hdmi",
@@ -110,7 +95,7 @@ static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
        .shortname = "bcm2835 HDMI",
        .longname  = "bcm2835 HDMI",
        .minchannels = 1,
-       .newpcm = bcm2835_audio_simple_newpcm,
+       .newpcm = bcm2835_audio_dual_newpcm,
        .newctl = snd_bcm2835_new_hdmi_ctl,
        .route = AUDIO_DEST_HDMI
 };
@@ -135,10 +120,6 @@ struct bcm2835_audio_drivers {
 
 static struct bcm2835_audio_drivers children_devices[] = {
        {
-               .audio_driver = &bcm2835_audio_alsa,
-               .is_enabled = &enable_compat_alsa,
-       },
-       {
                .audio_driver = &bcm2835_audio_hdmi,
                .is_enabled = &enable_hdmi,
        },
index 51066ac..38b7451 100644 (file)
@@ -61,7 +61,7 @@ struct bcm2835_chip {
 
        unsigned int opened;
        unsigned int spdif_status;
-       struct mutex audio_mutex;
+       struct mutex audio_mutex; /* Serialize chip data access */
 
        struct bcm2835_vchi_ctx *vchi_ctx;
 };
index dcda565..870c9af 100644 (file)
@@ -3,8 +3,8 @@ config VIDEO_BCM2835
        tristate "BCM2835 Camera"
        depends on MEDIA_SUPPORT
        depends on VIDEO_DEV && (ARCH_BCM2835 || COMPILE_TEST)
-       select BCM2835_VCHIQ
-       select BCM2835_VCHIQ_MMAL
+       select BCM2835_VCHIQ if HAS_DMA
+       select BCM2835_VCHIQ_MMAL if HAS_DMA
        select VIDEOBUF2_VMALLOC
        select BTREE
        help
index 88b1878..fd456d1 100644 (file)
@@ -1033,9 +1033,9 @@ static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
        preview_port->es.video.crop.y = 0;
        preview_port->es.video.crop.width = f->fmt.pix.width;
        preview_port->es.video.crop.height = f->fmt.pix.height;
-       preview_port->es.video.frame_rate.num =
+       preview_port->es.video.frame_rate.numerator =
                                  dev->capture.timeperframe.denominator;
-       preview_port->es.video.frame_rate.den =
+       preview_port->es.video.frame_rate.denominator =
                                  dev->capture.timeperframe.numerator;
        ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
 
@@ -1084,9 +1084,9 @@ static int mmal_setup_encode_component(struct bcm2835_mmal_dev *dev,
        port->es.video.crop.y = 0;
        port->es.video.crop.width = f->fmt.pix.width;
        port->es.video.crop.height = f->fmt.pix.height;
-       port->es.video.frame_rate.num =
+       port->es.video.frame_rate.numerator =
                  dev->capture.timeperframe.denominator;
-       port->es.video.frame_rate.den =
+       port->es.video.frame_rate.denominator =
                  dev->capture.timeperframe.numerator;
 
        port->format.encoding = mfmt->mmal;
@@ -1225,8 +1225,8 @@ static int mmal_setup_components(struct bcm2835_mmal_dev *dev,
        camera_port->es.video.crop.y = 0;
        camera_port->es.video.crop.width = f->fmt.pix.width;
        camera_port->es.video.crop.height = f->fmt.pix.height;
-       camera_port->es.video.frame_rate.num = 0;
-       camera_port->es.video.frame_rate.den = 1;
+       camera_port->es.video.frame_rate.numerator = 0;
+       camera_port->es.video.frame_rate.denominator = 1;
        camera_port->es.video.color_space = MMAL_COLOR_SPACE_JPEG_JFIF;
 
        ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
@@ -1629,8 +1629,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
        format->es->video.crop.y = 0;
        format->es->video.crop.width = 1024;
        format->es->video.crop.height = 768;
-       format->es->video.frame_rate.num = 0; /* Rely on fps_range */
-       format->es->video.frame_rate.den = 1;
+       format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+       format->es->video.frame_rate.denominator = 1;
 
        format = &camera->output[CAM_PORT_VIDEO].format;
 
@@ -1643,8 +1643,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
        format->es->video.crop.y = 0;
        format->es->video.crop.width = 1024;
        format->es->video.crop.height = 768;
-       format->es->video.frame_rate.num = 0; /* Rely on fps_range */
-       format->es->video.frame_rate.den = 1;
+       format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+       format->es->video.frame_rate.denominator = 1;
 
        format = &camera->output[CAM_PORT_CAPTURE].format;
 
@@ -1656,8 +1656,8 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
        format->es->video.crop.y = 0;
        format->es->video.crop.width = 2592;
        format->es->video.crop.height = 1944;
-       format->es->video.frame_rate.num = 0; /* Rely on fps_range */
-       format->es->video.frame_rate.den = 1;
+       format->es->video.frame_rate.numerator = 0; /* Rely on fps_range */
+       format->es->video.frame_rate.denominator = 1;
 
        dev->capture.width = format->es->video.width;
        dev->capture.height = format->es->video.height;
index eb722f1..5644d1d 100644 (file)
@@ -154,13 +154,13 @@ static int ctrl_set_rational(struct bcm2835_mmal_dev *dev,
                             struct v4l2_ctrl *ctrl,
                             const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
 {
-       struct mmal_parameter_rational rational_value;
+       struct s32_fract rational_value;
        struct vchiq_mmal_port *control;
 
        control = &dev->component[COMP_CAMERA]->control;
 
-       rational_value.num = ctrl->val;
-       rational_value.den = 100;
+       rational_value.numerator = ctrl->val;
+       rational_value.denominator = 100;
 
        return vchiq_mmal_port_parameter_set(dev->instance, control,
                                             mmal_ctrl->mmal_id,
@@ -489,9 +489,10 @@ static int ctrl_set_awb_gains(struct bcm2835_mmal_dev *dev,
        else if (ctrl->id == V4L2_CID_BLUE_BALANCE)
                dev->blue_gain = ctrl->val;
 
-       gains.r_gain.num = dev->red_gain;
-       gains.b_gain.num = dev->blue_gain;
-       gains.r_gain.den = gains.b_gain.den = 1000;
+       gains.r_gain.numerator = dev->red_gain;
+       gains.r_gain.denominator = 1000;
+       gains.b_gain.numerator = dev->blue_gain;
+       gains.b_gain.denominator = 1000;
 
        return vchiq_mmal_port_parameter_set(dev->instance, control,
                                             mmal_ctrl->mmal_id,
@@ -1271,26 +1272,26 @@ int set_framerate_params(struct bcm2835_mmal_dev *dev)
        struct mmal_parameter_fps_range fps_range;
        int ret;
 
-       fps_range.fps_high.num = dev->capture.timeperframe.denominator;
-       fps_range.fps_high.den = dev->capture.timeperframe.numerator;
+       fps_range.fps_high.numerator = dev->capture.timeperframe.denominator;
+       fps_range.fps_high.denominator = dev->capture.timeperframe.numerator;
 
        if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
            (dev->exp_auto_priority)) {
                /* Variable FPS. Define min FPS as 1fps. */
-               fps_range.fps_low.num = 1;
-               fps_range.fps_low.den = 1;
+               fps_range.fps_low.numerator = 1;
+               fps_range.fps_low.denominator = 1;
        } else {
                /* Fixed FPS - set min and max to be the same */
-               fps_range.fps_low.num = fps_range.fps_high.num;
-               fps_range.fps_low.den = fps_range.fps_high.den;
+               fps_range.fps_low.numerator = fps_range.fps_high.numerator;
+               fps_range.fps_low.denominator = fps_range.fps_high.denominator;
        }
 
        v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
                 "Set fps range to %d/%d to %d/%d\n",
-                fps_range.fps_low.num,
-                fps_range.fps_low.den,
-                fps_range.fps_high.num,
-                fps_range.fps_high.den);
+                fps_range.fps_low.numerator,
+                fps_range.fps_low.denominator,
+                fps_range.fps_high.numerator,
+                fps_range.fps_high.denominator);
 
        ret = vchiq_mmal_port_parameter_set(dev->instance,
                                            &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW],
index f0bfacf..0596ac6 100644 (file)
@@ -431,21 +431,18 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
                        if (head_bytes > actual)
                                head_bytes = actual;
 
-                       memcpy((char *)kmap(pages[0]) +
+                       memcpy_to_page(pages[0],
                                pagelist->offset,
                                fragments,
                                head_bytes);
-                       kunmap(pages[0]);
                }
                if ((actual >= 0) && (head_bytes < actual) &&
-                   (tail_bytes != 0)) {
-                       memcpy((char *)kmap(pages[num_pages - 1]) +
-                               ((pagelist->offset + actual) &
-                               (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
+                   (tail_bytes != 0))
+                       memcpy_to_page(pages[num_pages - 1],
+                               (pagelist->offset + actual) &
+                               (PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
                                fragments + g_cache_line_size,
                                tail_bytes);
-                       kunmap(pages[num_pages - 1]);
-               }
 
                down(&g_free_fragments_mutex);
                *(char **)fragments = g_free_fragments;
@@ -918,8 +915,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
        struct vchiq_instance *instance;
        struct vchiq_service *service;
        enum vchiq_status status;
-       struct bulk_waiter_node *waiter = NULL;
-       bool found = false;
+       struct bulk_waiter_node *waiter = NULL, *iter;
 
        service = find_service_by_handle(handle);
        if (!service)
@@ -930,16 +926,16 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
        vchiq_service_put(service);
 
        mutex_lock(&instance->bulk_waiter_list_mutex);
-       list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
-               if (waiter->pid == current->pid) {
-                       list_del(&waiter->list);
-                       found = true;
+       list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
+               if (iter->pid == current->pid) {
+                       list_del(&iter->list);
+                       waiter = iter;
                        break;
                }
        }
        mutex_unlock(&instance->bulk_waiter_list_mutex);
 
-       if (found) {
+       if (waiter) {
                struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
 
                if (bulk) {
index 82b7bd7..1ddc661 100644 (file)
@@ -79,7 +79,6 @@
 #define BITSET_BIT(b)         (1 << (b & 31))
 #define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
 #define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
-#define BITSET_CLR(bs, b)     (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
 
 enum {
        DEBUG_ENTRIES,
index b41c2a2..66bbfec 100644 (file)
@@ -289,8 +289,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
                                      enum vchiq_bulk_mode __user *mode)
 {
        struct vchiq_service *service;
-       struct bulk_waiter_node *waiter = NULL;
-       bool found = false;
+       struct bulk_waiter_node *waiter = NULL, *iter;
        void *userdata;
        int status = 0;
        int ret;
@@ -309,16 +308,16 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
                userdata = &waiter->bulk_waiter;
        } else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
                mutex_lock(&instance->bulk_waiter_list_mutex);
-               list_for_each_entry(waiter, &instance->bulk_waiter_list,
+               list_for_each_entry(iter, &instance->bulk_waiter_list,
                                    list) {
-                       if (waiter->pid == current->pid) {
-                               list_del(&waiter->list);
-                               found = true;
+                       if (iter->pid == current->pid) {
+                               list_del(&iter->list);
+                               waiter = iter;
                                break;
                        }
                }
                mutex_unlock(&instance->bulk_waiter_list_mutex);
-               if (!found) {
+               if (!waiter) {
                        vchiq_log_error(vchiq_arm_log_level,
                                        "no bulk_waiter found for pid %d", current->pid);
                        ret = -ESRCH;
index d77e15f..492d4c5 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef MMAL_MSG_COMMON_H
 #define MMAL_MSG_COMMON_H
 
+#include <linux/types.h>
+
 enum mmal_msg_status {
        MMAL_MSG_STATUS_SUCCESS = 0, /**< Success */
        MMAL_MSG_STATUS_ENOMEM,      /**< Out of memory */
@@ -40,9 +42,4 @@ struct mmal_rect {
        s32 height; /**< height */
 };
 
-struct mmal_rational {
-       s32 num;    /**< Numerator */
-       s32 den;    /**< Denominator */
-};
-
 #endif /* MMAL_MSG_COMMON_H */
index 1e996d8..5569876 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef MMAL_MSG_FORMAT_H
 #define MMAL_MSG_FORMAT_H
 
+#include <linux/math.h>
+
 #include "mmal-msg-common.h"
 
 /* MMAL_ES_FORMAT_T */
@@ -30,8 +32,8 @@ struct mmal_video_format {
        u32 width;              /* Width of frame in pixels */
        u32 height;             /* Height of frame in rows of pixels */
        struct mmal_rect crop;  /* Visible region of the frame */
-       struct mmal_rational frame_rate;        /* Frame rate */
-       struct mmal_rational par;               /* Pixel aspect ratio */
+       struct s32_fract frame_rate;    /* Frame rate */
+       struct s32_fract par;           /* Pixel aspect ratio */
 
        /*
         * FourCC specifying the color space of the video stream. See the
index 2277e05..a0cdd28 100644 (file)
@@ -22,6 +22,8 @@
 #ifndef MMAL_PARAMETERS_H
 #define MMAL_PARAMETERS_H
 
+#include <linux/math.h>
+
 /** Common parameter ID group, used with many types of component. */
 #define MMAL_PARAMETER_GROUP_COMMON            (0 << 16)
 /** Camera-specific parameter ID group. */
@@ -223,11 +225,6 @@ enum mmal_parameter_camera_type {
        MMAL_PARAMETER_CUSTOM_AWB_GAINS,
 };
 
-struct mmal_parameter_rational {
-       s32 num;    /**< Numerator */
-       s32 den;    /**< Denominator */
-};
-
 enum mmal_parameter_camera_config_timestamp_mode {
        MMAL_PARAM_TIMESTAMP_MODE_ZERO = 0, /* Always timestamp frames as 0 */
        MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,  /* Use the raw STC value
@@ -243,9 +240,9 @@ enum mmal_parameter_camera_config_timestamp_mode {
 
 struct mmal_parameter_fps_range {
        /**< Low end of the permitted framerate range */
-       struct mmal_parameter_rational  fps_low;
+       struct s32_fract        fps_low;
        /**< High end of the permitted framerate range */
-       struct mmal_parameter_rational  fps_high;
+       struct s32_fract        fps_high;
 };
 
 /* camera configuration parameter */
@@ -350,8 +347,8 @@ enum MMAL_PARAM_FLICKERAVOID {
 };
 
 struct mmal_parameter_awbgains {
-       struct mmal_parameter_rational r_gain;  /**< Red gain */
-       struct mmal_parameter_rational b_gain;  /**< Blue gain */
+       struct s32_fract r_gain;        /**< Red gain */
+       struct s32_fract b_gain;        /**< Blue gain */
 };
 
 /** Manner of video rate control */
index 70c9d55..845b20e 100644 (file)
@@ -744,9 +744,9 @@ static void dump_port_info(struct vchiq_mmal_port *port)
                         port->es.video.crop.y,
                         port->es.video.crop.width, port->es.video.crop.height);
                pr_debug("               : framerate %d/%d  aspect %d/%d\n",
-                        port->es.video.frame_rate.num,
-                        port->es.video.frame_rate.den,
-                        port->es.video.par.num, port->es.video.par.den);
+                        port->es.video.frame_rate.numerator,
+                        port->es.video.frame_rate.denominator,
+                        port->es.video.par.numerator, port->es.video.par.denominator);
        }
 }
 
@@ -1549,8 +1549,8 @@ int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
        dst->es.video.crop.y = src->es.video.crop.y;
        dst->es.video.crop.width = src->es.video.crop.width;
        dst->es.video.crop.height = src->es.video.crop.height;
-       dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
-       dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
+       dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
+       dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
 
        /* set new format */
        ret = port_info_set(instance, dst);
@@ -1841,7 +1841,6 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
        mutex_unlock(&instance->vchiq_mutex);
 
        vchiq_shutdown(instance->vchiq_instance);
-       flush_workqueue(instance->bulk_wq);
        destroy_workqueue(instance->bulk_wq);
 
        idr_destroy(&instance->context_map);
similarity index 93%
rename from drivers/staging/vme/devices/Kconfig
rename to drivers/staging/vme_user/Kconfig
index 5651bb1..e8b4461 100644 (file)
@@ -3,7 +3,7 @@ comment "VME Device Drivers"
 
 config VME_USER
        tristate "VME user space access driver"
-       depends on STAGING
+       depends on STAGING && VME_BUS
        help
          If you say Y here you want to be able to access a limited number of
          VME windows in a manner at least semi-compatible with the interface
similarity index 99%
rename from drivers/staging/vme/devices/vme_user.c
rename to drivers/staging/vme_user/vme_user.c
index e3fa38b..859af79 100644 (file)
@@ -773,7 +773,7 @@ MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
 module_param_array(bus, int, &bus_num, 0000);
 
 MODULE_DESCRIPTION("VME User Space Access Driver");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
 MODULE_LICENSE("GPL");
 
 module_init(vme_user_init);
index dfdb0eb..577a38f 100644 (file)
@@ -29,7 +29,6 @@
  *
  */
 
-#include "tmacro.h"
 #include "mac.h"
 #include "baseband.h"
 #include "srom.h"
@@ -1910,19 +1909,19 @@ bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
        unsigned char by_value;
 
        /* BB reg offset */
-       VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
+       iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
 
        /* turn on REGR */
        MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
        /* W_MAX_TIMEOUT is the timeout period */
        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-               VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+               by_value = ioread8(iobase + MAC_REG_BBREGCTL);
                if (by_value & BBREGCTL_DONE)
                        break;
        }
 
        /* get BB data */
-       VNSvInPortB(iobase + MAC_REG_BBREGDATA, pby_data);
+       *pby_data = ioread8(iobase + MAC_REG_BBREGDATA);
 
        if (ww == W_MAX_TIMEOUT) {
                pr_debug(" DBG_PORT80(0x30)\n");
@@ -1953,15 +1952,15 @@ bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
        unsigned char by_value;
 
        /* BB reg offset */
-       VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
+       iowrite8(by_bb_addr, iobase + MAC_REG_BBREGADR);
        /* set BB data */
-       VNSvOutPortB(iobase + MAC_REG_BBREGDATA, by_data);
+       iowrite8(by_data, iobase + MAC_REG_BBREGDATA);
 
        /* turn on BBREGCTL_REGW */
        MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
        /* W_MAX_TIMEOUT is the timeout period */
        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-               VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+               by_value = ioread8(iobase + MAC_REG_BBREGCTL);
                if (by_value & BBREGCTL_DONE)
                        break;
        }
@@ -2054,7 +2053,7 @@ bool bb_vt3253_init(struct vnt_private *priv)
                                byVT3253B0_AGC[ii][0],
                                byVT3253B0_AGC[ii][1]);
 
-               VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23);
+               iowrite8(0x23, iobase + MAC_REG_ITRTMSET);
                MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
 
                priv->abyBBVGA[0] = 0x14;
index 1110366..2cde008 100644 (file)
@@ -11,7 +11,7 @@
  *      CARDbAddBasicRate - Add to BasicRateSet
  *      CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
  *      CARDqGetTSFOffset - Calculate TSFOffset
- *      CARDbGetCurrentTSF - Read Current NIC TSF counter
+ *      vt6655_get_current_tsf - Read Current NIC TSF counter
  *      CARDqGetNextTBTT - Calculate Next Beacon TSF counter
  *      CARDvSetFirstNextTBTT - Set NIC Beacon time
  *      CARDvUpdateNextTBTT - Sync. NIC Beacon time
@@ -24,7 +24,6 @@
  *
  */
 
-#include "tmacro.h"
 #include "card.h"
 #include "baseband.h"
 #include "mac.h"
@@ -239,26 +238,25 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
 
        if (priv->bySIFS != bySIFS) {
                priv->bySIFS = bySIFS;
-               VNSvOutPortB(priv->port_offset + MAC_REG_SIFS, priv->bySIFS);
+               iowrite8(priv->bySIFS, priv->port_offset + MAC_REG_SIFS);
        }
        if (priv->byDIFS != byDIFS) {
                priv->byDIFS = byDIFS;
-               VNSvOutPortB(priv->port_offset + MAC_REG_DIFS, priv->byDIFS);
+               iowrite8(priv->byDIFS, priv->port_offset + MAC_REG_DIFS);
        }
        if (priv->byEIFS != C_EIFS) {
                priv->byEIFS = C_EIFS;
-               VNSvOutPortB(priv->port_offset + MAC_REG_EIFS, priv->byEIFS);
+               iowrite8(priv->byEIFS, priv->port_offset + MAC_REG_EIFS);
        }
        if (priv->bySlot != bySlot) {
                priv->bySlot = bySlot;
-               VNSvOutPortB(priv->port_offset + MAC_REG_SLOT, priv->bySlot);
+               iowrite8(priv->bySlot, priv->port_offset + MAC_REG_SLOT);
 
                bb_set_short_slot_time(priv);
        }
        if (priv->byCWMaxMin != byCWMaxMin) {
                priv->byCWMaxMin = byCWMaxMin;
-               VNSvOutPortB(priv->port_offset + MAC_REG_CWMAXMIN0,
-                            priv->byCWMaxMin);
+               iowrite8(priv->byCWMaxMin, priv->port_offset + MAC_REG_CWMAXMIN0);
        }
 
        priv->byPacketType = CARDbyGetPktType(priv);
@@ -289,7 +287,7 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
        u64 local_tsf;
        u64 qwTSFOffset = 0;
 
-       CARDbGetCurrentTSF(priv, &local_tsf);
+       local_tsf = vt6655_get_current_tsf(priv);
 
        if (qwBSSTimestamp != local_tsf) {
                qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
@@ -321,9 +319,9 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
 bool CARDbSetBeaconPeriod(struct vnt_private *priv,
                          unsigned short wBeaconInterval)
 {
-       u64 qwNextTBTT = 0;
+       u64 qwNextTBTT;
 
-       CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
+       qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
 
        qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
 
@@ -740,24 +738,24 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
  *
  * Return Value: true if success; otherwise false
  */
-bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
+u64 vt6655_get_current_tsf(struct vnt_private *priv)
 {
        void __iomem *iobase = priv->port_offset;
        unsigned short ww;
        unsigned char data;
+       u32 low, high;
 
        MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-               VNSvInPortB(iobase + MAC_REG_TFTCTL, &data);
+               data = ioread8(iobase + MAC_REG_TFTCTL);
                if (!(data & TFTCTL_TSFCNTRRD))
                        break;
        }
        if (ww == W_MAX_TIMEOUT)
-               return false;
-       VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
-       VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
-
-       return true;
+               return 0;
+       low = ioread32(iobase + MAC_REG_TSFCNTR);
+       high = ioread32(iobase + MAC_REG_TSFCNTR + 4);
+       return le64_to_cpu(low + ((u64)high << 32));
 }
 
 /*
@@ -804,9 +802,9 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
                           unsigned short wBeaconInterval)
 {
        void __iomem *iobase = priv->port_offset;
-       u64 qwNextTBTT = 0;
+       u64 qwNextTBTT;
 
-       CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
+       qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
 
        qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
        /* Set NextTBTT */
index 09e7f3f..22dc359 100644 (file)
@@ -46,7 +46,7 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
                           unsigned short wBeaconInterval);
 void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
                         unsigned short wBeaconInterval);
-bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF);
+u64 vt6655_get_current_tsf(struct vnt_private *priv);
 u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
 u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
 unsigned char CARDbyGetPktType(struct vnt_private *priv);
index abe8678..652dcaf 100644 (file)
@@ -118,11 +118,9 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
                /* set HW default power register */
                MACvSelectPage1(priv->port_offset);
                RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
-               VNSvOutPortB(priv->port_offset + MAC_REG_PWRCCK,
-                            priv->byCurPwr);
+               iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWRCCK);
                RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
-               VNSvOutPortB(priv->port_offset + MAC_REG_PWROFDM,
-                            priv->byCurPwr);
+               iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWROFDM);
                MACvSelectPage0(priv->port_offset);
 
                spin_unlock_irqrestore(&priv->lock, flags);
index 897d70c..afaf331 100644 (file)
@@ -219,7 +219,7 @@ static void device_init_registers(struct vnt_private *priv)
        MACvInitialize(priv);
 
        /* Get Local ID */
-       VNSvInPortB(priv->port_offset + MAC_REG_LOCALID, &priv->local_id);
+       priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
 
        spin_lock_irqsave(&priv->lock, flags);
 
@@ -334,8 +334,7 @@ static void device_init_registers(struct vnt_private *priv)
        if (priv->local_id > REV_ID_VT3253_B1) {
                MACvSelectPage1(priv->port_offset);
 
-               VNSvOutPortB(priv->port_offset + MAC_REG_MSRCTL + 1,
-                            (MSRCTL1_TXPWR | MSRCTL1_CSAPAREN));
+               iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
 
                MACvSelectPage0(priv->port_offset);
        }
@@ -349,9 +348,9 @@ static void device_init_registers(struct vnt_private *priv)
        MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
 
        /* reset TSF counter */
-       VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+       iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
        /* enable TSF counter */
-       VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+       iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
 
        /* initialize BBP registers */
        bb_vt3253_init(priv);
@@ -377,7 +376,7 @@ static void device_init_registers(struct vnt_private *priv)
 
        if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
                /* Get GPIO */
-               MACvGPIOIn(priv->port_offset, &priv->byGPIO);
+               priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
 
                if (((priv->byGPIO & GPIO0_DATA) &&
                     !(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
@@ -406,7 +405,7 @@ static void device_init_registers(struct vnt_private *priv)
        MACvReceive1(priv->port_offset);
 
        /* start the adapter */
-       MACvStart(priv->port_offset);
+       iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
 }
 
 static void device_print_info(struct vnt_private *priv)
@@ -1029,7 +1028,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
        u32 isr;
        unsigned long flags;
 
-       MACvReadISR(priv->port_offset, &isr);
+       isr = ioread32(priv->port_offset + MAC_REG_ISR);
 
        if (isr == 0)
                return;
@@ -1042,7 +1041,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
        spin_lock_irqsave(&priv->lock, flags);
 
        /* Read low level stats */
-       MACvReadMIBCounter(priv->port_offset, &mib_counter);
+       mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
 
        low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
        low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
@@ -1060,7 +1059,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
 
                if (isr & ISR_FETALERR) {
                        pr_debug(" ISR_FETALERR\n");
-                       VNSvOutPortB(priv->port_offset + MAC_REG_SOFTPWRCTL, 0);
+                       iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
                        VNSvOutPortW(priv->port_offset +
                                     MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
                        device_error(priv, isr);
@@ -1116,7 +1115,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
                    ieee80211_queue_stopped(priv->hw, 0))
                        ieee80211_wake_queues(priv->hw);
 
-               MACvReadISR(priv->port_offset, &isr);
+               isr = ioread32(priv->port_offset + MAC_REG_ISR);
 
                MACvReceive0(priv->port_offset);
                MACvReceive1(priv->port_offset);
@@ -1407,7 +1406,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 
                spin_lock_irqsave(&priv->lock, flags);
 
-               MACvWriteBSSIDAddress(priv->port_offset, (u8 *)conf->bssid);
+               MACvWriteBSSIDAddress(priv->port_offset, conf->bssid);
 
                spin_unlock_irqrestore(&priv->lock, flags);
        }
@@ -1477,10 +1476,8 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 
                        CARDvSetFirstNextTBTT(priv, conf->beacon_int);
                } else {
-                       VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
-                                    TFTCTL_TSFCNTRST);
-                       VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
-                                    TFTCTL_TSFCNTREN);
+                       iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
+                       iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
                }
        }
 }
@@ -1513,7 +1510,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
 
        *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
 
-       VNSvInPortB(priv->port_offset + MAC_REG_RCR, &rx_mode);
+       rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
 
        dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
 
@@ -1561,7 +1558,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
                        rx_mode |= RCR_BSSID;
        }
 
-       VNSvOutPortB(priv->port_offset + MAC_REG_RCR, rx_mode);
+       iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
 
        dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
 }
@@ -1603,7 +1600,7 @@ static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct vnt_private *priv = hw->priv;
        u64 tsf;
 
-       CARDbGetCurrentTSF(priv, &tsf);
+       tsf = vt6655_get_current_tsf(priv);
 
        return tsf;
 }
@@ -1621,7 +1618,7 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct vnt_private *priv = hw->priv;
 
        /* reset TSF counter */
-       VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+       iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
 }
 
 static const struct ieee80211_ops vnt_mac_ops = {
index f843966..1469015 100644 (file)
@@ -11,7 +11,6 @@
  *
  */
 
-#include "tmacro.h"
 #include "key.h"
 #include "mac.h"
 
index 80cced7..88ddd06 100644 (file)
@@ -36,7 +36,6 @@
  *
  */
 
-#include "tmacro.h"
 #include "mac.h"
 
 /*
index 550dc4d..57ae3bd 100644 (file)
@@ -18,7 +18,6 @@
 #ifndef __MAC_H__
 #define __MAC_H__
 
-#include "tmacro.h"
 #include "upc.h"
 
 /*---------------------  Export Definitions -------------------------*/
 #define TFTCTL_TSFCNTREN    0x01
 
 /* Bits in the EnhanceCFG register */
-#define EnCFG_BarkerPream   0x00020000
-#define EnCFG_NXTBTTCFPSTR  0x00010000
-#define EnCFG_BcnSusClr     0x00000200
-#define EnCFG_BcnSusInd     0x00000100
-#define EnCFG_CFP_ProtectEn 0x00000040
-#define EnCFG_ProtectMd     0x00000020
-#define EnCFG_HwParCFP      0x00000010
-#define EnCFG_CFNULRSP      0x00000004
-#define EnCFG_BBType_MASK   0x00000003
-#define EnCFG_BBType_g      0x00000002
-#define EnCFG_BBType_b      0x00000001
-#define EnCFG_BBType_a      0x00000000
+#define ENCFG_BARKERPREAM   0x00020000
+#define ENCFG_NXTBTTCFPSTR  0x00010000
+#define ENCFG_BCNSUSCLR     0x00000200
+#define ENCFG_BCNSUSIND     0x00000100
+#define ENCFG_CFP_PROTECTEN 0x00000040
+#define ENCFG_PROTECTMD     0x00000020
+#define ENCFG_HWPARCFP      0x00000010
+#define ENCFG_CFNULRSP      0x00000004
+#define ENCFG_BBTYPE_MASK   0x00000003
+#define ENCFG_BBTYPE_G      0x00000002
+#define ENCFG_BBTYPE_B      0x00000001
+#define ENCFG_BBTYPE_A      0x00000000
 
 /* Bits in the Page1Sel register */
 #define PAGE1_SEL           0x01
 #define MAC_LB_INTERNAL     0x01
 #define MAC_LB_NONE         0x00
 
-#define Default_BI              0x200
+#define DEFAULT_BI          0x200
 
 /* MiscFIFO Offset */
 #define MISCFIFO_KEYETRY0       32
 #define MACvRegBitsOn(iobase, byRegOfs, byBits)                        \
 do {                                                                   \
        unsigned char byData;                                           \
-       VNSvInPortB(iobase + byRegOfs, &byData);                        \
-       VNSvOutPortB(iobase + byRegOfs, byData | (byBits));             \
+       byData = ioread8(iobase + byRegOfs);                            \
+       iowrite8(byData | (byBits), iobase + byRegOfs);                 \
 } while (0)
 
 #define MACvWordRegBitsOn(iobase, byRegOfs, wBits)                     \
 do {                                                                   \
        unsigned short wData;                                           \
-       VNSvInPortW(iobase + byRegOfs, &wData);                 \
+       wData = ioread16(iobase + byRegOfs);                            \
        VNSvOutPortW(iobase + byRegOfs, wData | (wBits));               \
 } while (0)
 
-#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits)                   \
-do {                                                                   \
-       unsigned long dwData;                                           \
-       VNSvInPortD(iobase + byRegOfs, &dwData);                        \
-       VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits));             \
-} while (0)
-
-#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits)              \
-do {                                                                   \
-       unsigned char byData;                                           \
-       VNSvInPortB(iobase + byRegOfs, &byData);                        \
-       byData &= byMask;                                               \
-       VNSvOutPortB(iobase + byRegOfs, byData | (byBits));             \
-} while (0)
-
 #define MACvRegBitsOff(iobase, byRegOfs, byBits)                       \
 do {                                                                   \
        unsigned char byData;                                           \
-       VNSvInPortB(iobase + byRegOfs, &byData);                        \
-       VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits));            \
+       byData = ioread8(iobase + byRegOfs);                            \
+       iowrite8(byData & ~(byBits), iobase + byRegOfs);                \
 } while (0)
 
 #define MACvWordRegBitsOff(iobase, byRegOfs, wBits)                    \
 do {                                                                   \
        unsigned short wData;                                           \
-       VNSvInPortW(iobase + byRegOfs, &wData);                 \
+       wData = ioread16(iobase + byRegOfs);                            \
        VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits));              \
 } while (0)
 
-#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits)                  \
-do {                                                                   \
-       unsigned long dwData;                                           \
-       VNSvInPortD(iobase + byRegOfs, &dwData);                        \
-       VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits));            \
-} while (0)
-
-#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr)        \
-       VNSvInPortD(iobase + MAC_REG_RXDMAPTR0,         \
-                   (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr)        \
-       VNSvInPortD(iobase + MAC_REG_RXDMAPTR1,         \
-                   (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr)        \
-       VNSvInPortD(iobase + MAC_REG_TXDMAPTR0,         \
-                   (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr)        \
-       VNSvInPortD(iobase + MAC_REG_AC0DMAPTR,         \
-                   (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr)       \
-       VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR,                \
-                   (unsigned long *)pdwCurrDescAddr)
-
-#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr)       \
-       VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR,                \
-                   (unsigned long *)pdwCurrDescAddr)
-
 /* set the chip with current BCN tx descriptor address */
 #define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr)       \
        VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR,                \
@@ -622,104 +575,40 @@ do {                                                                     \
        VNSvOutPortW(iobase + MAC_REG_BCNDMACTL + 2,            \
                     wCurrBCNLength)
 
-#define MACvReadBSSIDAddress(iobase, pbyEtherAddr)             \
-do {                                                           \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);             \
-       VNSvInPortB(iobase + MAC_REG_BSSID0,                    \
-                   (unsigned char *)pbyEtherAddr);             \
-       VNSvInPortB(iobase + MAC_REG_BSSID0 + 1,                \
-                   pbyEtherAddr + 1);                          \
-       VNSvInPortB(iobase + MAC_REG_BSSID0 + 2,                \
-                   pbyEtherAddr + 2);                          \
-       VNSvInPortB(iobase + MAC_REG_BSSID0 + 3,                \
-                   pbyEtherAddr + 3);                          \
-       VNSvInPortB(iobase + MAC_REG_BSSID0 + 4,                \
-                   pbyEtherAddr + 4);                          \
-       VNSvInPortB(iobase + MAC_REG_BSSID0 + 5,                \
-                   pbyEtherAddr + 5);                          \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);             \
-} while (0)
-
 #define MACvWriteBSSIDAddress(iobase, pbyEtherAddr)            \
 do {                                                           \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);             \
-       VNSvOutPortB(iobase + MAC_REG_BSSID0,                   \
-                    *(pbyEtherAddr));                          \
-       VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1,               \
-                    *(pbyEtherAddr + 1));                      \
-       VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2,               \
-                    *(pbyEtherAddr + 2));                      \
-       VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3,               \
-                    *(pbyEtherAddr + 3));                      \
-       VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4,               \
-                    *(pbyEtherAddr + 4));                      \
-       VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5,               \
-                    *(pbyEtherAddr + 5));                      \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);             \
+       iowrite8(1, iobase + MAC_REG_PAGE1SEL);                 \
+       iowrite8(pbyEtherAddr[0], iobase + MAC_REG_BSSID0);     \
+       iowrite8(pbyEtherAddr[1], iobase + MAC_REG_BSSID0 + 1); \
+       iowrite8(pbyEtherAddr[2], iobase + MAC_REG_BSSID0 + 2); \
+       iowrite8(pbyEtherAddr[3], iobase + MAC_REG_BSSID0 + 3); \
+       iowrite8(pbyEtherAddr[4], iobase + MAC_REG_BSSID0 + 4); \
+       iowrite8(pbyEtherAddr[5], iobase + MAC_REG_BSSID0 + 5); \
+       iowrite8(0, iobase + MAC_REG_PAGE1SEL);                 \
 } while (0)
 
 #define MACvReadEtherAddress(iobase, pbyEtherAddr)             \
 do {                                                           \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);             \
-       VNSvInPortB(iobase + MAC_REG_PAR0,                      \
-                   (unsigned char *)pbyEtherAddr);             \
-       VNSvInPortB(iobase + MAC_REG_PAR0 + 1,          \
-                   pbyEtherAddr + 1);                          \
-       VNSvInPortB(iobase + MAC_REG_PAR0 + 2,          \
-                   pbyEtherAddr + 2);                          \
-       VNSvInPortB(iobase + MAC_REG_PAR0 + 3,          \
-                   pbyEtherAddr + 3);                          \
-       VNSvInPortB(iobase + MAC_REG_PAR0 + 4,          \
-                   pbyEtherAddr + 4);                          \
-       VNSvInPortB(iobase + MAC_REG_PAR0 + 5,          \
-                   pbyEtherAddr + 5);                          \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);             \
+       iowrite8(1, iobase + MAC_REG_PAGE1SEL);                 \
+       pbyEtherAddr[0] = ioread8(iobase + MAC_REG_PAR0);       \
+       pbyEtherAddr[1] = ioread8(iobase + MAC_REG_PAR0 + 1);   \
+       pbyEtherAddr[2] = ioread8(iobase + MAC_REG_PAR0 + 2);   \
+       pbyEtherAddr[3] = ioread8(iobase + MAC_REG_PAR0 + 3);   \
+       pbyEtherAddr[4] = ioread8(iobase + MAC_REG_PAR0 + 4);   \
+       pbyEtherAddr[5] = ioread8(iobase + MAC_REG_PAR0 + 5);   \
+       iowrite8(0, iobase + MAC_REG_PAGE1SEL);                 \
 } while (0)
 
-#define MACvWriteEtherAddress(iobase, pbyEtherAddr)            \
-do {                                                           \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);             \
-       VNSvOutPortB(iobase + MAC_REG_PAR0,                     \
-                    *pbyEtherAddr);                            \
-       VNSvOutPortB(iobase + MAC_REG_PAR0 + 1,         \
-                    *(pbyEtherAddr + 1));                      \
-       VNSvOutPortB(iobase + MAC_REG_PAR0 + 2,         \
-                    *(pbyEtherAddr + 2));                      \
-       VNSvOutPortB(iobase + MAC_REG_PAR0 + 3,         \
-                    *(pbyEtherAddr + 3));                      \
-       VNSvOutPortB(iobase + MAC_REG_PAR0 + 4,         \
-                    *(pbyEtherAddr + 4));                      \
-       VNSvOutPortB(iobase + MAC_REG_PAR0 + 5,         \
-                    *(pbyEtherAddr + 5));                      \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);             \
-} while (0)
-
-#define MACvClearISR(iobase)                                           \
-       VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE)
-
-#define MACvStart(iobase)                                              \
-       VNSvOutPortB(iobase + MAC_REG_HOSTCR,                           \
-                    (HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON))
-
 #define MACvRx0PerPktMode(iobase)                                      \
        VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
 
-#define MACvRx0BufferFillMode(iobase)                                  \
-       VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
-
 #define MACvRx1PerPktMode(iobase)                                      \
        VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
 
-#define MACvRx1BufferFillMode(iobase)                                  \
-       VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
-
-#define MACvRxOn(iobase)                                               \
-       MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON)
-
 #define MACvReceive0(iobase)                                           \
 do {                                                                   \
        unsigned long dwData;                                           \
-       VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData);               \
+       dwData = ioread32(iobase + MAC_REG_RXDMACTL0);                  \
        if (dwData & DMACTL_RUN)                                        \
                VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
        else                                                            \
@@ -729,20 +618,17 @@ do {                                                                      \
 #define MACvReceive1(iobase)                                           \
 do {                                                                   \
        unsigned long dwData;                                           \
-       VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData);               \
+       dwData = ioread32(iobase + MAC_REG_RXDMACTL1);                  \
        if (dwData & DMACTL_RUN)                                        \
                VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
        else                                                            \
                VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
 } while (0)
 
-#define MACvTxOn(iobase)                                               \
-       MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON)
-
 #define MACvTransmit0(iobase)                                          \
 do {                                                                   \
        unsigned long dwData;                                           \
-       VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData);               \
+       dwData = ioread32(iobase + MAC_REG_TXDMACTL0);                  \
        if (dwData & DMACTL_RUN)                                        \
                VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
        else                                                            \
@@ -752,47 +638,21 @@ do {                                                                      \
 #define MACvTransmitAC0(iobase)                                        \
 do {                                                                   \
        unsigned long dwData;                                           \
-       VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData);               \
+       dwData = ioread32(iobase + MAC_REG_AC0DMACTL);                  \
        if (dwData & DMACTL_RUN)                                        \
                VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
        else                                                            \
                VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
 } while (0)
 
-#define MACvTransmitSYNC(iobase)                                       \
-do {                                                                   \
-       unsigned long dwData;                                           \
-       VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData);              \
-       if (dwData & DMACTL_RUN)                                        \
-               VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
-       else                                                            \
-               VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
-} while (0)
-
-#define MACvTransmitATIM(iobase)                                       \
-do {                                                                   \
-       unsigned long dwData;                                           \
-       VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData);              \
-       if (dwData & DMACTL_RUN)                                        \
-               VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
-       else                                                            \
-               VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
-} while (0)
-
-#define MACvTransmitBCN(iobase)                                        \
-       VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY)
-
 #define MACvClearStckDS(iobase)                                        \
 do {                                                                   \
        unsigned char byOrgValue;                                       \
-       VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue);             \
+       byOrgValue = ioread8(iobase + MAC_REG_STICKHW);                 \
        byOrgValue = byOrgValue & 0xFC;                                 \
-       VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue);             \
+       iowrite8(byOrgValue, iobase + MAC_REG_STICKHW);                 \
 } while (0)
 
-#define MACvReadISR(iobase, pdwValue)                          \
-       VNSvInPortD(iobase + MAC_REG_ISR, pdwValue)
-
 #define MACvWriteISR(iobase, dwValue)                          \
        VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
 
@@ -803,77 +663,58 @@ do {                                                                      \
        VNSvOutPortD(iobase + MAC_REG_IMR, 0)
 
 #define MACvSelectPage0(iobase)                                \
-               VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0)
+       iowrite8(0, iobase + MAC_REG_PAGE1SEL)
 
 #define MACvSelectPage1(iobase)                                \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1)
-
-#define MACvReadMIBCounter(iobase, pdwCounter)                 \
-       VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter)
-
-#define MACvPwrEvntDisable(iobase)                                     \
-       VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000)
+       iowrite8(1, iobase + MAC_REG_PAGE1SEL)
 
 #define MACvEnableProtectMD(iobase)                                    \
 do {                                                                   \
        unsigned long dwOrgValue;                                       \
-       VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);               \
-       dwOrgValue = dwOrgValue | EnCFG_ProtectMd;                      \
+       dwOrgValue = ioread32(iobase + MAC_REG_ENCFG);                  \
+       dwOrgValue = dwOrgValue | ENCFG_PROTECTMD;                      \
        VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);               \
 } while (0)
 
 #define MACvDisableProtectMD(iobase)                                   \
 do {                                                                   \
        unsigned long dwOrgValue;                                       \
-       VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);               \
-       dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd;                     \
+       dwOrgValue = ioread32(iobase + MAC_REG_ENCFG);                  \
+       dwOrgValue = dwOrgValue & ~ENCFG_PROTECTMD;                     \
        VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);               \
 } while (0)
 
 #define MACvEnableBarkerPreambleMd(iobase)                             \
 do {                                                                   \
        unsigned long dwOrgValue;                                       \
-       VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);               \
-       dwOrgValue = dwOrgValue | EnCFG_BarkerPream;                    \
+       dwOrgValue = ioread32(iobase + MAC_REG_ENCFG);                  \
+       dwOrgValue = dwOrgValue | ENCFG_BARKERPREAM;                    \
        VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);               \
 } while (0)
 
 #define MACvDisableBarkerPreambleMd(iobase)                            \
 do {                                                                   \
        unsigned long dwOrgValue;                                       \
-       VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);               \
-       dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream;                   \
+       dwOrgValue = ioread32(iobase + MAC_REG_ENCFG);                  \
+       dwOrgValue = dwOrgValue & ~ENCFG_BARKERPREAM;                   \
        VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);               \
 } while (0)
 
 #define MACvSetBBType(iobase, byTyp)                                   \
 do {                                                                   \
        unsigned long dwOrgValue;                                       \
-       VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);               \
-       dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK;                   \
+       dwOrgValue = ioread32(iobase + MAC_REG_ENCFG);                  \
+       dwOrgValue = dwOrgValue & ~ENCFG_BBTYPE_MASK;                   \
        dwOrgValue = dwOrgValue | (unsigned long)byTyp;                 \
        VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);               \
 } while (0)
 
-#define MACvReadATIMW(iobase, pwCounter)                               \
-       VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter)
-
-#define MACvWriteATIMW(iobase, wCounter)                               \
-       VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter)
-
-#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC)             \
-do {                                                           \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);             \
-       VNSvOutPortW(iobase + byRegOfs, wCRC);          \
-       VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);             \
-} while (0)
-
-#define MACvGPIOIn(iobase, pbyValue)                                   \
-       VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue)
-
 #define MACvSetRFLE_LatchBase(iobase)                                 \
        MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
 
+#define MAKEWORD(lb, hb) \
+       ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
+
 bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
                      unsigned char byTestBits);
 
index 4498c9d..ee5e2e0 100644 (file)
@@ -175,7 +175,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
 
        /* W_MAX_TIMEOUT is the timeout period */
        for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-               VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue);
+               dwValue = ioread32(iobase + MAC_REG_IFREGCTL);
                if (dwValue & IFREGCTL_DONE)
                        break;
        }
@@ -207,7 +207,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
        ret = true;
 
        /* 3-wire control for normal mode */
-       VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
+       iowrite8(0, iobase + MAC_REG_SOFTPWRCTL);
 
        MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI  |
                                                         SOFTPWRCTL_TXPEINV));
@@ -238,7 +238,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
                                                         SOFTPWRCTL_TXPEINV));
 
        /* 3-wire control for power saving mode */
-       VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+       iowrite8(PSSIG_WPE3 | PSSIG_WPE2, iobase + MAC_REG_PSPWRSIG);
 
        return ret;
 }
@@ -254,10 +254,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
        ret &= IFRFbWriteEmbedded(priv, al2230_channel_table1[byChannel - 1]);
 
        /* Set Channel[7] = 0 to tell H/W channel is changing now. */
-       VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+       iowrite8(byChannel & 0x7F, iobase + MAC_REG_CHANNEL);
        MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
        /* Set Channel[7] = 1 to tell H/W channel change is done. */
-       VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
+       iowrite8(byChannel | 0x80, iobase + MAC_REG_CHANNEL);
 
        return ret;
 }
index 53506e2..71cbfa6 100644 (file)
@@ -1426,7 +1426,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
        /* Set auto Transmit on */
        MACvRegBitsOn(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
        /* Poll Transmit the adapter */
-       MACvTransmitBCN(priv->port_offset);
+       iowrite8(BEACON_READY, priv->port_offset + MAC_REG_BCNDMACTL);
 
        return 0;
 }
@@ -1450,9 +1450,9 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
 int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
                      struct ieee80211_bss_conf *conf)
 {
-       VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+       iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
 
-       VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+       iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
 
        CARDvSetFirstNextTBTT(priv, conf->beacon_int);
 
index 5cdbc24..722a2cc 100644 (file)
@@ -28,7 +28,6 @@
  */
 
 #include "upc.h"
-#include "tmacro.h"
 #include "mac.h"
 #include "srom.h"
 
@@ -66,29 +65,29 @@ unsigned char SROMbyReadEmbedded(void __iomem *iobase,
        unsigned char byOrg;
 
        byData = 0xFF;
-       VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg);
+       byOrg = ioread8(iobase + MAC_REG_I2MCFG);
        /* turn off hardware retry for getting NACK */
-       VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
+       iowrite8(byOrg & (~I2MCFG_NORETRY), iobase + MAC_REG_I2MCFG);
        for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
-               VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
-               VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset);
+               iowrite8(EEP_I2C_DEV_ID, iobase + MAC_REG_I2MTGID);
+               iowrite8(byContntOffset, iobase + MAC_REG_I2MTGAD);
 
                /* issue read command */
-               VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR);
+               iowrite8(I2MCSR_EEMR, iobase + MAC_REG_I2MCSR);
                /* wait DONE be set */
                for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
-                       VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait);
+                       byWait = ioread8(iobase + MAC_REG_I2MCSR);
                        if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
                                break;
-                       PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
+                       udelay(CB_DELAY_LOOP_WAIT);
                }
                if ((wDelay < W_MAX_TIMEOUT) &&
                    (!(byWait & I2MCSR_NACK))) {
                        break;
                }
        }
-       VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData);
-       VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg);
+       byData = ioread8(iobase + MAC_REG_I2MDIPT);
+       iowrite8(byOrg, iobase + MAC_REG_I2MCFG);
        return byData;
 }
 
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
deleted file mode 100644 (file)
index 1582c03..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * Purpose: define basic common types and macros
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- */
-
-#ifndef __TMACRO_H__
-#define __TMACRO_H__
-
-/****** Common helper macros ***********************************************/
-
-#if !defined(LOBYTE)
-#define LOBYTE(w)           ((unsigned char)(w))
-#endif
-#if !defined(HIBYTE)
-#define HIBYTE(w)           ((unsigned char)(((unsigned short)(w) >> 8) & 0xFF))
-#endif
-
-#if !defined(LOWORD)
-#define LOWORD(d)           ((unsigned short)(d))
-#endif
-#if !defined(HIWORD)
-#define HIWORD(d)           ((unsigned short)((((unsigned long)(d)) >> 16) & 0xFFFF))
-#endif
-
-#define LODWORD(q)          ((q).u.dwLowDword)
-#define HIDWORD(q)          ((q).u.dwHighDword)
-
-#if !defined(MAKEWORD)
-#define MAKEWORD(lb, hb)    ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
-#endif
-#if !defined(MAKEDWORD)
-#define MAKEDWORD(lw, hw)   ((unsigned long)(((unsigned short)(lw)) | (((unsigned long)((unsigned short)(hw))) << 16)))
-#endif
-
-#endif /* __TMACRO_H__ */
index b374db5..2a47f57 100644 (file)
 
 /* For memory mapped IO */
 
-#define VNSvInPortB(dwIOAddress, pbyData) \
-       (*(pbyData) = ioread8(dwIOAddress))
-
-#define VNSvInPortW(dwIOAddress, pwData) \
-       (*(pwData) = ioread16(dwIOAddress))
-
-#define VNSvInPortD(dwIOAddress, pdwData) \
-       (*(pdwData) = ioread32(dwIOAddress))
-
-#define VNSvOutPortB(dwIOAddress, byData) \
-       iowrite8((u8)(byData), dwIOAddress)
-
 #define VNSvOutPortW(dwIOAddress, wData) \
        iowrite16((u16)(wData), dwIOAddress)
 
 #define VNSvOutPortD(dwIOAddress, dwData) \
        iowrite32((u32)(dwData), dwIOAddress)
 
-#define PCAvDelayByIO(uDelayUnit)                              \
-do {                                                           \
-       unsigned char __maybe_unused byData;                    \
-       unsigned long ii;                                       \
-                                                               \
-       if (uDelayUnit <= 50) {                                 \
-               udelay(uDelayUnit);                             \
-       } else {                                                \
-               for (ii = 0; ii < (uDelayUnit); ii++)           \
-                       byData = inb(0x61);                     \
-       }                                                       \
-} while (0)
-
 /*---------------------  Export Classes  ----------------------------*/
 
 /*---------------------  Export Variables  --------------------------*/
index aca0030..413e2fc 100644 (file)
@@ -55,7 +55,6 @@ static struct ieee80211_channel vnt_channels_2ghz[] = {
        { .center_freq = 2484, .hw_value = 14 }
 };
 
-
 static struct ieee80211_supported_band vnt_supported_2ghz_band = {
        .channels = vnt_channels_2ghz,
        .n_channels = ARRAY_SIZE(vnt_channels_2ghz),
index acbbf8a..464602c 100644 (file)
@@ -82,7 +82,6 @@ static u8 al2230_channel_table1[CB_MAX_CHANNEL_24G][3] = {
        {0x06, 0x66, 0x61}
 };
 
-
 static u8 vt3226_init_table[CB_VT3226_INIT_SEQ][3] = {
        {0x03, 0xff, 0x80},
        {0x02, 0x82, 0xa1},
index 7951bd6..87379ed 100644 (file)
@@ -328,8 +328,7 @@ static int prism2_scan(struct wiphy *wiphy,
                (i < request->n_channels) && i < ARRAY_SIZE(prism2_channels);
                i++)
                msg1.channellist.data.data[i] =
-                       ieee80211_frequency_to_channel(
-                               request->channels[i]->center_freq);
+                       ieee80211_frequency_to_channel(request->channels[i]->center_freq);
        msg1.channellist.data.len = request->n_channels;
 
        msg1.maxchanneltime.data = 250;
@@ -476,14 +475,13 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
                                return -EINVAL;
 
                        result = prism2_domibset_uint32(wlandev,
-                               DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
+                                                       DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
                                sme->key_idx);
                        if (result)
                                goto exit;
 
                        /* send key to driver */
-                       did = didmib_dot11smt_wepdefaultkeystable_key(
-                                       sme->key_idx + 1);
+                       did = didmib_dot11smt_wepdefaultkeystable_key(sme->key_idx + 1);
                        result = prism2_domibset_pstr32(wlandev,
                                                        did, sme->key_len,
                                                        (u8 *)sme->key);
@@ -589,7 +587,7 @@ static int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
                data = MBM_TO_DBM(mbm);
 
        result = prism2_domibset_uint32(wlandev,
-               DIDMIB_DOT11PHY_TXPOWERTABLE_CURRENTTXPOWERLEVEL,
+                                       DIDMIB_DOT11PHY_TXPOWERTABLE_CURRENTTXPOWERLEVEL,
                data);
 
        if (result) {
index 98c154a..0611e37 100644 (file)
@@ -1227,8 +1227,8 @@ struct hfa384x {
 
        struct timer_list throttle;
 
-       struct tasklet_struct reaper_bh;
-       struct tasklet_struct completion_bh;
+       struct work_struct reaper_bh;
+       struct work_struct completion_bh;
 
        struct work_struct usb_work;
 
index 938e11a..3384452 100644 (file)
@@ -191,9 +191,9 @@ static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
 
 static void hfa384x_usb_throttlefn(struct timer_list *t);
 
-static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t);
+static void hfa384x_usbctlx_completion_task(struct work_struct *work);
 
-static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t);
+static void hfa384x_usbctlx_reaper_task(struct work_struct *work);
 
 static int hfa384x_usbctlx_submit(struct hfa384x *hw,
                                  struct hfa384x_usbctlx *ctlx);
@@ -539,8 +539,8 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
        /* Initialize the authentication queue */
        skb_queue_head_init(&hw->authq);
 
-       tasklet_setup(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
-       tasklet_setup(&hw->completion_bh, hfa384x_usbctlx_completion_task);
+       INIT_WORK(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
+       INIT_WORK(&hw->completion_bh, hfa384x_usbctlx_completion_task);
        INIT_WORK(&hw->link_bh, prism2sta_processing_defer);
        INIT_WORK(&hw->usb_work, hfa384x_usb_defer);
 
@@ -2585,20 +2585,20 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
 /*----------------------------------------------------------------
  * hfa384x_usbctlx_reaper_task
  *
- * Tasklet to delete dead CTLX objects
+ * Deferred work callback to delete dead CTLX objects
  *
  * Arguments:
- *     data    ptr to a struct hfa384x
+ *     work    contains ptr to a struct hfa384x
  *
  * Returns:
  *
  * Call context:
- *     Interrupt
+ *      Task
  *----------------------------------------------------------------
  */
-static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t)
+static void hfa384x_usbctlx_reaper_task(struct work_struct *work)
 {
-       struct hfa384x *hw = from_tasklet(hw, t, reaper_bh);
+       struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
        struct hfa384x_usbctlx *ctlx, *temp;
        unsigned long flags;
 
@@ -2618,21 +2618,21 @@ static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t)
 /*----------------------------------------------------------------
  * hfa384x_usbctlx_completion_task
  *
- * Tasklet to call completion handlers for returned CTLXs
+ * Deferred work callback to call completion handlers for returned CTLXs
  *
  * Arguments:
- *     data    ptr to struct hfa384x
+ *     work    contains ptr to a struct hfa384x
  *
  * Returns:
  *     Nothing
  *
  * Call context:
- *     Interrupt
+ *      Task
  *----------------------------------------------------------------
  */
-static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t)
+static void hfa384x_usbctlx_completion_task(struct work_struct *work)
 {
-       struct hfa384x *hw = from_tasklet(hw, t, completion_bh);
+       struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
        struct hfa384x_usbctlx *ctlx, *temp;
        unsigned long flags;
 
@@ -2686,7 +2686,7 @@ static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t)
        spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
 
        if (reap)
-               tasklet_schedule(&hw->reaper_bh);
+               schedule_work(&hw->reaper_bh);
 }
 
 /*----------------------------------------------------------------
@@ -2743,7 +2743,7 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
  * aren't active and the timers should have been stopped.
  *
  * The CTLX is migrated to the "completing" queue, and the completing
- * tasklet is scheduled.
+ * work is scheduled.
  *
  * Arguments:
  *     hw              ptr to a struct hfa384x structure
@@ -2766,7 +2766,7 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw,
         * queue.
         */
        list_move_tail(&ctlx->list, &hw->ctlxq.completing);
-       tasklet_schedule(&hw->completion_bh);
+       schedule_work(&hw->completion_bh);
 
        switch (ctlx->state) {
        case CTLX_COMPLETE:
index dc0749b..e13da7f 100644 (file)
@@ -165,8 +165,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
                spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
 
                /* There's no hardware to shutdown, but the driver
-                * might have some tasks or tasklets that must be
-                * stopped before we can tear everything down.
+                * might have some tasks that must be stopped before
+                * we can tear everything down.
                 */
                prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
 
@@ -181,8 +181,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
                usb_kill_urb(&hw->tx_urb);
                usb_kill_urb(&hw->ctlx_urb);
 
-               tasklet_kill(&hw->completion_bh);
-               tasklet_kill(&hw->reaper_bh);
+               cancel_work_sync(&hw->completion_bh);
+               cancel_work_sync(&hw->reaper_bh);
 
                cancel_work_sync(&hw->link_bh);
                cancel_work_sync(&hw->commsqual_bh);
index bb3fb18..e6a967d 100644 (file)
@@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
 
        cmd->priv = scmd->cmnd;
 
-       blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
-                       pscsi_req_done);
+       blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
 
        return 0;
 
index 6554e06..28f87cd 100644 (file)
@@ -512,7 +512,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
         * Allow kernel address to register with OP-TEE as kernel
         * pages are configured as normal memory only.
         */
-       if (virt_addr_valid(start) || is_vmalloc_addr((void *)start))
+       if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start))
                return 0;
 
        mmap_read_lock(mm);
index 770d2b0..80d4e06 100644 (file)
@@ -663,6 +663,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
        {"INT3400", 0},
        {"INTC1040", 0},
        {"INTC1041", 0},
+       {"INTC1042", 0},
        {"INTC10A0", 0},
        {}
 };
index 07e2532..71d084c 100644 (file)
@@ -285,6 +285,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
        {"INT3403", 0},
        {"INTC1043", 0},
        {"INTC1046", 0},
+       {"INTC1062", 0},
        {"INTC10A1", 0},
        {"", 0},
 };
index 49932a6..7d52fcf 100644 (file)
@@ -24,6 +24,7 @@
 #define PCI_DEVICE_ID_INTEL_HSB_THERMAL        0x0A03
 #define PCI_DEVICE_ID_INTEL_ICL_THERMAL        0x8a03
 #define PCI_DEVICE_ID_INTEL_JSL_THERMAL        0x4E03
+#define PCI_DEVICE_ID_INTEL_MTLP_THERMAL       0x7D03
 #define PCI_DEVICE_ID_INTEL_RPL_THERMAL        0xA71D
 #define PCI_DEVICE_ID_INTEL_SKL_THERMAL        0x1903
 #define PCI_DEVICE_ID_INTEL_TGL_THERMAL        0x9A03
index ca40b09..c2dc4c1 100644 (file)
@@ -358,6 +358,7 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
 
 static const struct pci_device_id proc_thermal_pci_ids[] = {
        { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
+       { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
        { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
        { },
 };
index 4986edf..e92c658 100644 (file)
@@ -158,21 +158,20 @@ static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
 static struct tb_cfg_request *
 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
 {
-       struct tb_cfg_request *req;
-       bool found = false;
+       struct tb_cfg_request *req = NULL, *iter;
 
        mutex_lock(&pkg->ctl->request_queue_lock);
-       list_for_each_entry(req, &pkg->ctl->request_queue, list) {
-               tb_cfg_request_get(req);
-               if (req->match(req, pkg)) {
-                       found = true;
+       list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
+               tb_cfg_request_get(iter);
+               if (iter->match(iter, pkg)) {
+                       req = iter;
                        break;
                }
-               tb_cfg_request_put(req);
+               tb_cfg_request_put(iter);
        }
        mutex_unlock(&pkg->ctl->request_queue_lock);
 
-       return found ? req : NULL;
+       return req;
 }
 
 /* utility functions */
index 7018d95..2889a21 100644 (file)
@@ -7,9 +7,7 @@
  */
 
 #include <linux/device.h>
-#include <linux/dmar.h>
 #include <linux/idr.h>
-#include <linux/iommu.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
@@ -257,13 +255,9 @@ static ssize_t iommu_dma_protection_show(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       /*
-        * Kernel DMA protection is a feature where Thunderbolt security is
-        * handled natively using IOMMU. It is enabled when IOMMU is
-        * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
-        */
-       return sprintf(buf, "%d\n",
-                      iommu_present(&pci_bus_type) && dmar_platform_optin());
+       struct tb *tb = container_of(dev, struct tb, dev);
+
+       return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
 }
 static DEVICE_ATTR_RO(iommu_dma_protection);
 
index 4a58218..1333b15 100644 (file)
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/iommu.h>
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/property.h>
+#include <linux/string_helpers.h>
 
 #include "nhi.h"
 #include "nhi_regs.h"
@@ -1103,6 +1105,47 @@ static void nhi_check_quirks(struct tb_nhi *nhi)
                nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
 }
 
+static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
+{
+       if (!pdev->external_facing ||
+           !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION))
+               return 0;
+       *(bool *)data = true;
+       return 1; /* Stop walking */
+}
+
+static void nhi_check_iommu(struct tb_nhi *nhi)
+{
+       struct pci_bus *bus = nhi->pdev->bus;
+       bool port_ok = false;
+
+       /*
+        * Ideally what we'd do here is grab every PCI device that
+        * represents a tunnelling adapter for this NHI and check their
+        * status directly, but unfortunately USB4 seems to make it
+        * obnoxiously difficult to reliably make any correlation.
+        *
+        * So for now we'll have to bodge it... Hoping that the system
+        * is at least sane enough that an adapter is in the same PCI
+        * segment as its NHI, if we can find *something* on that segment
+        * which meets the requirements for Kernel DMA Protection, we'll
+        * take that to imply that firmware is aware and has (hopefully)
+        * done the right thing in general. We need to know that the PCI
+        * layer has seen the ExternalFacingPort property which will then
+        * inform the IOMMU layer to enforce the complete "untrusted DMA"
+        * flow, but also that the IOMMU driver itself can be trusted not
+        * to have been subverted by a pre-boot DMA attack.
+        */
+       while (bus->parent)
+               bus = bus->parent;
+
+       pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok);
+
+       nhi->iommu_dma_protection = port_ok;
+       dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
+               str_enabled_disabled(port_ok));
+}
+
 static int nhi_init_msi(struct tb_nhi *nhi)
 {
        struct pci_dev *pdev = nhi->pdev;
@@ -1207,7 +1250,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        nhi->pdev = pdev;
        nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
-       /* cannot fail - table is allocated bin pcim_iomap_regions */
+       /* cannot fail - table is allocated in pcim_iomap_regions */
        nhi->iobase = pcim_iomap_table(pdev)[0];
        nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
        dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
@@ -1220,6 +1263,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENOMEM;
 
        nhi_check_quirks(nhi);
+       nhi_check_iommu(nhi);
 
        res = nhi_init_msi(nhi);
        if (res) {
index 299712a..ee03fd7 100644 (file)
@@ -166,6 +166,9 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
                return NULL;
        }
 
+       tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
+              path->name, tb_route(src->sw), src->port);
+
        p = src;
        h = src_hopid;
 
@@ -198,10 +201,13 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
                path->hops[i].out_port = out_port;
                path->hops[i].next_hop_index = next_hop;
 
+               tb_dump_hop(&path->hops[i], &hop);
+
                h = next_hop;
                p = out_port->remote;
        }
 
+       tb_dbg(path->tb, "path discovery complete\n");
        return path;
 
 err:
index ac87e8b..561e1d7 100644 (file)
@@ -693,8 +693,14 @@ static int __tb_port_enable(struct tb_port *port, bool enable)
        else
                phy |= LANE_ADP_CS_1_LD;
 
-       return tb_port_write(port, &phy, TB_CFG_PORT,
-                            port->cap_phy + LANE_ADP_CS_1, 1);
+
+       ret = tb_port_write(port, &phy, TB_CFG_PORT,
+                           port->cap_phy + LANE_ADP_CS_1, 1);
+       if (ret)
+               return ret;
+
+       tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
+       return 0;
 }
 
 /**
@@ -993,7 +999,17 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
        return !!(widths & width);
 }
 
-static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+/**
+ * tb_port_set_link_width() - Set target link width of the lane adapter
+ * @port: Lane adapter
+ * @width: Target link width (%1 or %2)
+ *
+ * Sets the target link width of the lane adapter to @width. Does not
+ * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_port_set_link_width(struct tb_port *port, unsigned int width)
 {
        u32 val;
        int ret;
@@ -1020,13 +1036,59 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
                return -EINVAL;
        }
 
-       val |= LANE_ADP_CS_1_LB;
-
        return tb_port_write(port, &val, TB_CFG_PORT,
                             port->cap_phy + LANE_ADP_CS_1, 1);
 }
 
 /**
+ * tb_port_set_lane_bonding() - Enable/disable lane bonding
+ * @port: Lane adapter
+ * @bonding: enable/disable bonding
+ *
+ * Enables or disables lane bonding. This should be called after target
+ * link width has been set (tb_port_set_link_width()). Note in most
+ * cases one should use tb_port_lane_bonding_enable() instead to enable
+ * lane bonding.
+ *
+ * As a side effect sets @port->bonding accordingly (and does the same
+ * for lane 1 too).
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
+{
+       u32 val;
+       int ret;
+
+       if (!port->cap_phy)
+               return -EINVAL;
+
+       ret = tb_port_read(port, &val, TB_CFG_PORT,
+                          port->cap_phy + LANE_ADP_CS_1, 1);
+       if (ret)
+               return ret;
+
+       if (bonding)
+               val |= LANE_ADP_CS_1_LB;
+       else
+               val &= ~LANE_ADP_CS_1_LB;
+
+       ret = tb_port_write(port, &val, TB_CFG_PORT,
+                           port->cap_phy + LANE_ADP_CS_1, 1);
+       if (ret)
+               return ret;
+
+       /*
+        * When lane 0 bonding is set it will affect lane 1 too so
+        * update both.
+        */
+       port->bonded = bonding;
+       port->dual_link_port->bonded = bonding;
+
+       return 0;
+}
+
+/**
  * tb_port_lane_bonding_enable() - Enable bonding on port
  * @port: port to enable
  *
@@ -1050,22 +1112,27 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
        if (ret == 1) {
                ret = tb_port_set_link_width(port, 2);
                if (ret)
-                       return ret;
+                       goto err_lane0;
        }
 
        ret = tb_port_get_link_width(port->dual_link_port);
        if (ret == 1) {
                ret = tb_port_set_link_width(port->dual_link_port, 2);
-               if (ret) {
-                       tb_port_set_link_width(port, 1);
-                       return ret;
-               }
+               if (ret)
+                       goto err_lane0;
        }
 
-       port->bonded = true;
-       port->dual_link_port->bonded = true;
+       ret = tb_port_set_lane_bonding(port, true);
+       if (ret)
+               goto err_lane1;
 
        return 0;
+
+err_lane1:
+       tb_port_set_link_width(port->dual_link_port, 1);
+err_lane0:
+       tb_port_set_link_width(port, 1);
+       return ret;
 }
 
 /**
@@ -1074,13 +1141,10 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
  *
  * Disable bonding by setting the link width of the port and the
  * other port in case of dual link port.
- *
  */
 void tb_port_lane_bonding_disable(struct tb_port *port)
 {
-       port->dual_link_port->bonded = false;
-       port->bonded = false;
-
+       tb_port_set_lane_bonding(port, false);
        tb_port_set_link_width(port->dual_link_port, 1);
        tb_port_set_link_width(port, 1);
 }
@@ -1104,10 +1168,17 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width,
 
        do {
                ret = tb_port_get_link_width(port);
-               if (ret < 0)
-                       return ret;
-               else if (ret == width)
+               if (ret < 0) {
+                       /*
+                        * Sometimes we get port locked error when
+                        * polling the lanes so we can ignore it and
+                        * retry.
+                        */
+                       if (ret != -EACCES)
+                               return ret;
+               } else if (ret == width) {
                        return 0;
+               }
 
                usleep_range(1000, 2000);
        } while (ktime_before(ktime_get(), timeout));
index 9beb47b..9a3214f 100644 (file)
@@ -169,12 +169,6 @@ static void tb_discover_tunnels(struct tb *tb)
 
 static int tb_port_configure_xdomain(struct tb_port *port)
 {
-       /*
-        * XDomain paths currently only support single lane so we must
-        * disable the other lane according to USB4 spec.
-        */
-       tb_port_disable(port->dual_link_port);
-
        if (tb_switch_is_usb4(port->sw))
                return usb4_port_configure_xdomain(port);
        return tb_lc_configure_xdomain(port);
@@ -867,7 +861,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
 
 static void tb_tunnel_dp(struct tb *tb)
 {
-       int available_up, available_down, ret;
+       int available_up, available_down, ret, link_nr;
        struct tb_cm *tcm = tb_priv(tb);
        struct tb_port *port, *in, *out;
        struct tb_tunnel *tunnel;
@@ -913,6 +907,20 @@ static void tb_tunnel_dp(struct tb *tb)
        }
 
        /*
+        * This is only applicable to links that are not bonded (so
+        * when Thunderbolt 1 hardware is involved somewhere in the
+        * topology). For these try to share the DP bandwidth between
+        * the two lanes.
+        */
+       link_nr = 1;
+       list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+               if (tb_tunnel_is_dp(tunnel)) {
+                       link_nr = 0;
+                       break;
+               }
+       }
+
+       /*
         * DP stream needs the domain to be active so runtime resume
         * both ends of the tunnel.
         *
@@ -943,7 +951,8 @@ static void tb_tunnel_dp(struct tb *tb)
        tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
               available_up, available_down);
 
-       tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
+       tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
+                                   available_down);
        if (!tunnel) {
                tb_port_dbg(out, "could not allocate DP tunnel\n");
                goto err_reclaim;
index b6fcd8d..4602c69 100644 (file)
@@ -674,7 +674,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
 #define __TB_PORT_PRINT(level, _port, fmt, arg...)                      \
        do {                                                            \
                const struct tb_port *__port = (_port);                 \
-               level(__port->sw->tb, "%llx:%x: " fmt,                  \
+               level(__port->sw->tb, "%llx:%u: " fmt,                  \
                      tb_route(__port->sw), __port->port, ## arg);      \
        } while (0)
 #define tb_port_WARN(port, fmt, arg...) \
@@ -991,6 +991,7 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw);
 int tb_switch_xhci_connect(struct tb_switch *sw);
 void tb_switch_xhci_disconnect(struct tb_switch *sw);
 
+int tb_port_state(struct tb_port *port);
 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
 int tb_port_add_nfc_credits(struct tb_port *port, int credits);
 int tb_port_clear_counter(struct tb_port *port, int counter);
@@ -1023,7 +1024,8 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
 
 int tb_port_get_link_speed(struct tb_port *port);
 int tb_port_get_link_width(struct tb_port *port);
-int tb_port_state(struct tb_port *port);
+int tb_port_set_link_width(struct tb_port *port, unsigned int width);
+int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
 int tb_port_lane_bonding_enable(struct tb_port *port);
 void tb_port_lane_bonding_disable(struct tb_port *port);
 int tb_port_wait_for_link_width(struct tb_port *port, int width,
index fe1afa4..33c4c7a 100644 (file)
@@ -527,6 +527,10 @@ enum tb_xdp_type {
        PROPERTIES_CHANGED_RESPONSE,
        ERROR_RESPONSE,
        UUID_REQUEST = 12,
+       LINK_STATE_STATUS_REQUEST = 15,
+       LINK_STATE_STATUS_RESPONSE,
+       LINK_STATE_CHANGE_REQUEST,
+       LINK_STATE_CHANGE_RESPONSE,
 };
 
 struct tb_xdp_header {
@@ -540,6 +544,41 @@ struct tb_xdp_error_response {
        u32 error;
 };
 
+struct tb_xdp_link_state_status {
+       struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_link_state_status_response {
+       union {
+               struct tb_xdp_error_response err;
+               struct {
+                       struct tb_xdp_header hdr;
+                       u32 status;
+                       u8 slw;
+                       u8 tlw;
+                       u8 sls;
+                       u8 tls;
+               };
+       };
+};
+
+struct tb_xdp_link_state_change {
+       struct tb_xdp_header hdr;
+       u8 tlw;
+       u8 tls;
+       u16 reserved;
+};
+
+struct tb_xdp_link_state_change_response {
+       union {
+               struct tb_xdp_error_response err;
+               struct {
+                       struct tb_xdp_header hdr;
+                       u32 status;
+               };
+       };
+};
+
 struct tb_xdp_uuid {
        struct tb_xdp_header hdr;
 };
index b301eeb..6a16f61 100644 (file)
@@ -311,11 +311,16 @@ struct tb_regs_port_header {
 
 /* Lane adapter registers */
 #define LANE_ADP_CS_0                          0x00
+#define LANE_ADP_CS_0_SUPPORTED_SPEED_MASK     GENMASK(19, 16)
+#define LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT    16
 #define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK     GENMASK(25, 20)
 #define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT    20
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL     0x2
 #define LANE_ADP_CS_0_CL0S_SUPPORT             BIT(26)
 #define LANE_ADP_CS_0_CL1_SUPPORT              BIT(27)
 #define LANE_ADP_CS_1                          0x01
+#define LANE_ADP_CS_1_TARGET_SPEED_MASK                GENMASK(3, 0)
+#define LANE_ADP_CS_1_TARGET_SPEED_GEN3                0xc
 #define LANE_ADP_CS_1_TARGET_WIDTH_MASK                GENMASK(9, 4)
 #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT       4
 #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE      0x1
index be9b1d7..ee37f8b 100644 (file)
@@ -341,6 +341,47 @@ static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
        return sw;
 }
 
+static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
+                                             struct tb_switch *parent,
+                                             u64 route, bool bonded)
+{
+       struct tb_switch *sw;
+       int i;
+
+       sw = alloc_dev_default(test, parent, route, bonded);
+       if (!sw)
+               return NULL;
+       /*
+        * Device with:
+        * 2x USB4 Adapters (adapters 1,2 and 3,4),
+        * 1x PCIe Upstream (adapter 9),
+        * 1x PCIe Downstream (adapter 10),
+        * 1x USB3 Upstream (adapter 16),
+        * 1x USB3 Downstream (adapter 17)
+        */
+       for (i = 5; i <= 8; i++)
+               sw->ports[i].disabled = true;
+
+       for (i = 11; i <= 14; i++)
+               sw->ports[i].disabled = true;
+
+       sw->ports[13].cap_adap = 0;
+       sw->ports[14].cap_adap = 0;
+
+       for (i = 18; i <= 19; i++)
+               sw->ports[i].disabled = true;
+
+       sw->generation = 4;
+       sw->credit_allocation = true;
+       sw->max_usb3_credits = 109;
+       sw->min_dp_aux_credits = 0;
+       sw->min_dp_main_credits = 0;
+       sw->max_pcie_credits = 30;
+       sw->max_dma_credits = 1;
+
+       return sw;
+}
+
 static struct tb_switch *alloc_dev_usb4(struct kunit *test,
                                        struct tb_switch *parent,
                                        u64 route, bool bonded)
@@ -1348,7 +1389,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
        in = &host->ports[5];
        out = &dev->ports[13];
 
-       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, tunnel);
        KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
        KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1394,7 +1435,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
        in = &host->ports[5];
        out = &dev4->ports[14];
 
-       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, tunnel);
        KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
        KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1444,7 +1485,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
        in = &dev2->ports[13];
        out = &dev5->ports[13];
 
-       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, tunnel);
        KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
        KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1509,7 +1550,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
        in = &dev6->ports[13];
        out = &dev12->ports[13];
 
-       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, tunnel);
        KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
        KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1627,7 +1668,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
        in = &dev2->ports[13];
        out = &dev5->ports[13];
 
-       dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
 
        KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -1996,6 +2037,56 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
        tb_tunnel_free(tunnel);
 }
 
+static void tb_test_credit_alloc_without_dp(struct kunit *test)
+{
+       struct tb_switch *host, *dev;
+       struct tb_port *up, *down;
+       struct tb_tunnel *tunnel;
+       struct tb_path *path;
+
+       host = alloc_host_usb4(test);
+       dev = alloc_dev_without_dp(test, host, 0x1, true);
+
+       /*
+        * The device has no DP therefore baMinDPmain = baMinDPaux = 0
+        *
+        * Create PCIe path with buffers less than baMaxPCIe.
+        *
+        * For a device with buffers configurations:
+        * baMaxUSB3 = 109
+        * baMinDPaux = 0
+        * baMinDPmain = 0
+        * baMaxPCIe = 30
+        * baMaxHI = 1
+        * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
+        * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
+        *              = Max(6, Min(30, 9) = 9
+        */
+       down = &host->ports[8];
+       up = &dev->ports[9];
+       tunnel = tb_tunnel_alloc_pci(NULL, up, down);
+       KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+       KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
+
+       /* PCIe downstream path */
+       path = tunnel->paths[0];
+       KUNIT_ASSERT_EQ(test, path->path_length, 2);
+       KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+       KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+       KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+       KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
+
+       /* PCIe upstream path */
+       path = tunnel->paths[1];
+       KUNIT_ASSERT_EQ(test, path->path_length, 2);
+       KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
+       KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
+       KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
+       KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
+
+       tb_tunnel_free(tunnel);
+}
+
 static void tb_test_credit_alloc_dp(struct kunit *test)
 {
        struct tb_switch *host, *dev;
@@ -2009,7 +2100,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
        in = &host->ports[5];
        out = &dev->ports[14];
 
-       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, tunnel);
        KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
 
@@ -2245,7 +2336,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
 
        in = &host->ports[5];
        out = &dev->ports[13];
-       dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
        KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
 
@@ -2282,7 +2373,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
 
        in = &host->ports[6];
        out = &dev->ports[14];
-       dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+       dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
        KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
        KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
 
@@ -2709,6 +2800,7 @@ static struct kunit_case tb_test_cases[] = {
        KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
        KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
        KUNIT_CASE(tb_test_credit_alloc_pcie),
+       KUNIT_CASE(tb_test_credit_alloc_without_dp),
        KUNIT_CASE(tb_test_credit_alloc_dp),
        KUNIT_CASE(tb_test_credit_alloc_usb3),
        KUNIT_CASE(tb_test_credit_alloc_dma),
index 118742e..2c3cf7f 100644 (file)
@@ -102,8 +102,11 @@ static unsigned int tb_available_credits(const struct tb_port *port,
                 * Maximum number of DP streams possible through the
                 * lane adapter.
                 */
-               ndp = (credits - (usb3 + pcie + spare)) /
-                     (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+               if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
+                       ndp = (credits - (usb3 + pcie + spare)) /
+                             (sw->min_dp_aux_credits + sw->min_dp_main_credits);
+               else
+                       ndp = 0;
        } else {
                ndp = 0;
        }
@@ -858,6 +861,7 @@ err_free:
  * @tb: Pointer to the domain structure
  * @in: DP in adapter port
  * @out: DP out adapter port
+ * @link_nr: Preferred lane adapter when the link is not bonded
  * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
  *         if not limited)
  * @max_down: Maximum available downstream bandwidth for the DP tunnel
@@ -869,8 +873,8 @@ err_free:
  * Return: Returns a tb_tunnel on success or NULL on failure.
  */
 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
-                                    struct tb_port *out, int max_up,
-                                    int max_down)
+                                    struct tb_port *out, int link_nr,
+                                    int max_up, int max_down)
 {
        struct tb_tunnel *tunnel;
        struct tb_path **paths;
@@ -894,21 +898,21 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
        paths = tunnel->paths;
 
        path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
-                            1, "Video");
+                            link_nr, "Video");
        if (!path)
                goto err_free;
        tb_dp_init_video_path(path);
        paths[TB_DP_VIDEO_PATH_OUT] = path;
 
        path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
-                            TB_DP_AUX_TX_HOPID, 1, "AUX TX");
+                            TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
        if (!path)
                goto err_free;
        tb_dp_init_aux_path(path);
        paths[TB_DP_AUX_PATH_OUT] = path;
 
        path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
-                            TB_DP_AUX_RX_HOPID, 1, "AUX RX");
+                            TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
        if (!path)
                goto err_free;
        tb_dp_init_aux_path(path);
index 03e5607..bb4d1f1 100644 (file)
@@ -71,8 +71,8 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
                                        bool alloc_hopid);
 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
-                                    struct tb_port *out, int max_up,
-                                    int max_down);
+                                    struct tb_port *out, int link_nr,
+                                    int max_up, int max_down);
 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
                                      struct tb_port *dst, int transmit_path,
                                      int transmit_ring, int receive_path,
index 29e2a4f..6b02945 100644 (file)
@@ -7,9 +7,37 @@
  */
 
 #include <linux/pm_runtime.h>
+#include <linux/component.h>
+#include <linux/property.h>
 
 #include "tb.h"
 
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+       int ret;
+
+       ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+       if (ret)
+               return ret;
+
+       ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
+       if (ret)
+               sysfs_remove_link(&dev->kobj, "connector");
+
+       return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+       sysfs_remove_link(&connector->kobj, dev_name(dev));
+       sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+       .bind = connector_bind,
+       .unbind = connector_unbind,
+};
+
 static ssize_t link_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
@@ -246,6 +274,14 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
                return ERR_PTR(ret);
        }
 
+       if (dev_fwnode(&usb4->dev)) {
+               ret = component_add(&usb4->dev, &connector_ops);
+               if (ret) {
+                       dev_err(&usb4->dev, "failed to add component\n");
+                       device_unregister(&usb4->dev);
+               }
+       }
+
        pm_runtime_no_callbacks(&usb4->dev);
        pm_runtime_set_active(&usb4->dev);
        pm_runtime_enable(&usb4->dev);
@@ -265,6 +301,8 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
  */
 void usb4_port_device_remove(struct usb4_port *usb4)
 {
+       if (dev_fwnode(&usb4->dev))
+               component_del(&usb4->dev, &connector_ops);
        device_unregister(&usb4->dev);
 }
 
index 01d6b72..c31c0d9 100644 (file)
 
 #include "tb.h"
 
-#define XDOMAIN_DEFAULT_TIMEOUT                        1000 /* ms */
-#define XDOMAIN_UUID_RETRIES                   10
-#define XDOMAIN_PROPERTIES_RETRIES             10
-#define XDOMAIN_PROPERTIES_CHANGED_RETRIES     10
-#define XDOMAIN_BONDING_WAIT                   100  /* ms */
+#define XDOMAIN_SHORT_TIMEOUT                  100     /* ms */
+#define XDOMAIN_DEFAULT_TIMEOUT                        1000    /* ms */
+#define XDOMAIN_BONDING_TIMEOUT                        10000   /* ms */
+#define XDOMAIN_RETRIES                                10
 #define XDOMAIN_DEFAULT_MAX_HOPID              15
 
+enum {
+       XDOMAIN_STATE_INIT,
+       XDOMAIN_STATE_UUID,
+       XDOMAIN_STATE_LINK_STATUS,
+       XDOMAIN_STATE_LINK_STATE_CHANGE,
+       XDOMAIN_STATE_LINK_STATUS2,
+       XDOMAIN_STATE_BONDING_UUID_LOW,
+       XDOMAIN_STATE_BONDING_UUID_HIGH,
+       XDOMAIN_STATE_PROPERTIES,
+       XDOMAIN_STATE_ENUMERATED,
+       XDOMAIN_STATE_ERROR,
+};
+
+static const char * const state_names[] = {
+       [XDOMAIN_STATE_INIT] = "INIT",
+       [XDOMAIN_STATE_UUID] = "UUID",
+       [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
+       [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
+       [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
+       [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
+       [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
+       [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
+       [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
+       [XDOMAIN_STATE_ERROR] = "ERROR",
+};
+
 struct xdomain_request_work {
        struct work_struct work;
        struct tb_xdp_header *pkg;
@@ -235,7 +260,7 @@ static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
 }
 
 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
-                              uuid_t *uuid)
+                              uuid_t *uuid, u64 *remote_route)
 {
        struct tb_xdp_uuid_response res;
        struct tb_xdp_uuid req;
@@ -258,6 +283,8 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
                return ret;
 
        uuid_copy(uuid, &res.src_uuid);
+       *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
+
        return 0;
 }
 
@@ -473,6 +500,112 @@ tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
                                     TB_CFG_PKG_XDOMAIN_RESP);
 }
 
+static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
+                                           u8 sequence, u8 *slw, u8 *tlw,
+                                           u8 *sls, u8 *tls)
+{
+       struct tb_xdp_link_state_status_response res;
+       struct tb_xdp_link_state_status req;
+       int ret;
+
+       memset(&req, 0, sizeof(req));
+       tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
+                          sizeof(req));
+
+       memset(&res, 0, sizeof(res));
+       ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+                                  &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+                                  XDOMAIN_DEFAULT_TIMEOUT);
+       if (ret)
+               return ret;
+
+       ret = tb_xdp_handle_error(&res.err);
+       if (ret)
+               return ret;
+
+       if (res.status != 0)
+               return -EREMOTEIO;
+
+       *slw = res.slw;
+       *tlw = res.tlw;
+       *sls = res.sls;
+       *tls = res.tls;
+
+       return 0;
+}
+
+static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
+                                            struct tb_xdomain *xd, u8 sequence)
+{
+       struct tb_switch *sw = tb_to_switch(xd->dev.parent);
+       struct tb_xdp_link_state_status_response res;
+       struct tb_port *port = tb_port_at(xd->route, sw);
+       u32 val[2];
+       int ret;
+
+       memset(&res, 0, sizeof(res));
+       tb_xdp_fill_header(&res.hdr, xd->route, sequence,
+                          LINK_STATE_STATUS_RESPONSE, sizeof(res));
+
+       ret = tb_port_read(port, val, TB_CFG_PORT,
+                          port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
+       if (ret)
+               return ret;
+
+       res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+                       LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+       res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
+                       LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
+       res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+       res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
+                       LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+
+       return __tb_xdomain_response(ctl, &res, sizeof(res),
+                                    TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
+                                           u8 sequence, u8 tlw, u8 tls)
+{
+       struct tb_xdp_link_state_change_response res;
+       struct tb_xdp_link_state_change req;
+       int ret;
+
+       memset(&req, 0, sizeof(req));
+       tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
+                          sizeof(req));
+       req.tlw = tlw;
+       req.tls = tls;
+
+       memset(&res, 0, sizeof(res));
+       ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+                                  &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+                                  XDOMAIN_DEFAULT_TIMEOUT);
+       if (ret)
+               return ret;
+
+       ret = tb_xdp_handle_error(&res.err);
+       if (ret)
+               return ret;
+
+       return res.status != 0 ? -EREMOTEIO : 0;
+}
+
+static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
+                                            u8 sequence, u32 status)
+{
+       struct tb_xdp_link_state_change_response res;
+
+       memset(&res, 0, sizeof(res));
+       tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
+                          sizeof(res));
+
+       res.status = status;
+
+       return __tb_xdomain_response(ctl, &res, sizeof(res),
+                                    TB_CFG_PKG_XDOMAIN_RESP);
+}
+
 /**
  * tb_register_protocol_handler() - Register protocol handler
  * @handler: Handler to register
@@ -600,14 +733,13 @@ static void tb_xdp_handle_request(struct work_struct *work)
                goto out;
        }
 
-       tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type);
-
        xd = tb_xdomain_find_by_route_locked(tb, route);
        if (xd)
                update_property_block(xd);
 
        switch (pkg->type) {
        case PROPERTIES_REQUEST:
+               tb_dbg(tb, "%llx: received XDomain properties request\n", route);
                if (xd) {
                        ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
                                (const struct tb_xdp_properties *)pkg);
@@ -615,6 +747,9 @@ static void tb_xdp_handle_request(struct work_struct *work)
                break;
 
        case PROPERTIES_CHANGED_REQUEST:
+               tb_dbg(tb, "%llx: received XDomain properties changed request\n",
+                      route);
+
                ret = tb_xdp_properties_changed_response(ctl, route, sequence);
 
                /*
@@ -622,18 +757,51 @@ static void tb_xdp_handle_request(struct work_struct *work)
                 * the xdomain related to this connection as well in
                 * case there is a change in services it offers.
                 */
-               if (xd && device_is_registered(&xd->dev)) {
-                       queue_delayed_work(tb->wq, &xd->get_properties_work,
-                                          msecs_to_jiffies(50));
-               }
+               if (xd && device_is_registered(&xd->dev))
+                       queue_delayed_work(tb->wq, &xd->state_work,
+                                          msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
                break;
 
        case UUID_REQUEST_OLD:
        case UUID_REQUEST:
+               tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
                ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
                break;
 
+       case LINK_STATE_STATUS_REQUEST:
+               tb_dbg(tb, "%llx: received XDomain link state status request\n",
+                      route);
+
+               if (xd) {
+                       ret = tb_xdp_link_state_status_response(tb, ctl, xd,
+                                                               sequence);
+               } else {
+                       tb_xdp_error_response(ctl, route, sequence,
+                                             ERROR_NOT_READY);
+               }
+               break;
+
+       case LINK_STATE_CHANGE_REQUEST:
+               tb_dbg(tb, "%llx: received XDomain link state change request\n",
+                      route);
+
+               if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
+                       const struct tb_xdp_link_state_change *lsc =
+                               (const struct tb_xdp_link_state_change *)pkg;
+
+                       ret = tb_xdp_link_state_change_response(ctl, route,
+                                                               sequence, 0);
+                       xd->target_link_width = lsc->tlw;
+                       queue_delayed_work(tb->wq, &xd->state_work,
+                                          msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+               } else {
+                       tb_xdp_error_response(ctl, route, sequence,
+                                             ERROR_NOT_READY);
+               }
+               break;
+
        default:
+               tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
                tb_xdp_error_response(ctl, route, sequence,
                                      ERROR_NOT_SUPPORTED);
                break;
@@ -1000,32 +1168,38 @@ static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
        return 0;
 }
 
-static void tb_xdomain_get_uuid(struct work_struct *work)
+static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
 {
-       struct tb_xdomain *xd = container_of(work, typeof(*xd),
-                                            get_uuid_work.work);
        struct tb *tb = xd->tb;
        uuid_t uuid;
+       u64 route;
        int ret;
 
        dev_dbg(&xd->dev, "requesting remote UUID\n");
 
-       ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
+       ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
+                                 &route);
        if (ret < 0) {
-               if (xd->uuid_retries-- > 0) {
+               if (xd->state_retries-- > 0) {
                        dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
-                       queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
-                                          msecs_to_jiffies(100));
+                       return -EAGAIN;
                } else {
                        dev_dbg(&xd->dev, "failed to read remote UUID\n");
                }
-               return;
+               return ret;
        }
 
        dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
 
-       if (uuid_equal(&uuid, xd->local_uuid))
-               dev_dbg(&xd->dev, "intra-domain loop detected\n");
+       if (uuid_equal(&uuid, xd->local_uuid)) {
+               if (route == xd->route)
+                       dev_dbg(&xd->dev, "loop back detected\n");
+               else
+                       dev_dbg(&xd->dev, "intra-domain loop detected\n");
+
+               /* Don't bond lanes automatically for loops */
+               xd->bonding_possible = false;
+       }
 
        /*
         * If the UUID is different, there is another domain connected
@@ -1035,27 +1209,152 @@ static void tb_xdomain_get_uuid(struct work_struct *work)
        if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
                dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
                xd->is_unplugged = true;
-               return;
+               return -ENODEV;
        }
 
        /* First time fill in the missing UUID */
        if (!xd->remote_uuid) {
                xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
                if (!xd->remote_uuid)
-                       return;
+                       return -ENOMEM;
        }
 
-       /* Now we can start the normal properties exchange */
-       queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
-                          msecs_to_jiffies(100));
-       queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
-                          msecs_to_jiffies(1000));
+       return 0;
 }
 
-static void tb_xdomain_get_properties(struct work_struct *work)
+static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
+{
+       struct tb *tb = xd->tb;
+       u8 slw, tlw, sls, tls;
+       int ret;
+
+       dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
+               xd->remote_uuid);
+
+       ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
+                                              xd->state_retries, &slw, &tlw, &sls,
+                                              &tls);
+       if (ret) {
+               if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+                       dev_dbg(&xd->dev,
+                               "failed to request remote link status, retrying\n");
+                       return -EAGAIN;
+               }
+               dev_dbg(&xd->dev, "failed to receive remote link status\n");
+               return ret;
+       }
+
+       dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
+
+       if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
+               dev_dbg(&xd->dev, "remote adapter is single lane only\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
+                                       unsigned int width)
+{
+       struct tb_switch *sw = tb_to_switch(xd->dev.parent);
+       struct tb_port *port = tb_port_at(xd->route, sw);
+       struct tb *tb = xd->tb;
+       u8 tlw, tls;
+       u32 val;
+       int ret;
+
+       if (width == 2)
+               tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
+       else if (width == 1)
+               tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
+       else
+               return -EINVAL;
+
+       /* Use the current target speed */
+       ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
+       if (ret)
+               return ret;
+       tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+
+       dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
+               tlw, tls);
+
+       ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
+                                              xd->state_retries, tlw, tls);
+       if (ret) {
+               if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+                       dev_dbg(&xd->dev,
+                               "failed to change remote link state, retrying\n");
+                       return -EAGAIN;
+               }
+               dev_err(&xd->dev, "failed request link state change, aborting\n");
+               return ret;
+       }
+
+       dev_dbg(&xd->dev, "received link state change response\n");
+       return 0;
+}
+
+static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
+{
+       struct tb_port *port;
+       int ret, width;
+
+       if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
+               width = 1;
+       } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
+               width = 2;
+       } else {
+               if (xd->state_retries-- > 0) {
+                       dev_dbg(&xd->dev,
+                               "link state change request not received yet, retrying\n");
+                       return -EAGAIN;
+               }
+               dev_dbg(&xd->dev, "timeout waiting for link change request\n");
+               return -ETIMEDOUT;
+       }
+
+       port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+
+       /*
+        * We can't use tb_xdomain_lane_bonding_enable() here because it
+        * is the other side that initiates lane bonding. So here we
+        * just set the width to both lane adapters and wait for the
+        * link to transition bonded.
+        */
+       ret = tb_port_set_link_width(port->dual_link_port, width);
+       if (ret) {
+               tb_port_warn(port->dual_link_port,
+                            "failed to set link width to %d\n", width);
+               return ret;
+       }
+
+       ret = tb_port_set_link_width(port, width);
+       if (ret) {
+               tb_port_warn(port, "failed to set link width to %d\n", width);
+               return ret;
+       }
+
+       ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
+       if (ret) {
+               dev_warn(&xd->dev, "error waiting for link width to become %d\n",
+                        width);
+               return ret;
+       }
+
+       port->bonded = width == 2;
+       port->dual_link_port->bonded = width == 2;
+
+       tb_port_update_credits(port);
+       tb_xdomain_update_link_attributes(xd);
+
+       dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis");
+       return 0;
+}
+
+static int tb_xdomain_get_properties(struct tb_xdomain *xd)
 {
-       struct tb_xdomain *xd = container_of(work, typeof(*xd),
-                                            get_properties_work.work);
        struct tb_property_dir *dir;
        struct tb *tb = xd->tb;
        bool update = false;
@@ -1066,34 +1365,35 @@ static void tb_xdomain_get_properties(struct work_struct *work)
        dev_dbg(&xd->dev, "requesting remote properties\n");
 
        ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
-                                       xd->remote_uuid, xd->properties_retries,
+                                       xd->remote_uuid, xd->state_retries,
                                        &block, &gen);
        if (ret < 0) {
-               if (xd->properties_retries-- > 0) {
+               if (xd->state_retries-- > 0) {
                        dev_dbg(&xd->dev,
                                "failed to request remote properties, retrying\n");
-                       queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
-                                          msecs_to_jiffies(1000));
+                       return -EAGAIN;
                } else {
                        /* Give up now */
                        dev_err(&xd->dev,
                                "failed read XDomain properties from %pUb\n",
                                xd->remote_uuid);
                }
-               return;
-       }
 
-       xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+               return ret;
+       }
 
        mutex_lock(&xd->lock);
 
        /* Only accept newer generation properties */
-       if (xd->remote_properties && gen <= xd->remote_property_block_gen)
+       if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
+               ret = 0;
                goto err_free_block;
+       }
 
        dir = tb_property_parse_dir(block, ret);
        if (!dir) {
                dev_err(&xd->dev, "failed to parse XDomain properties\n");
+               ret = -ENOMEM;
                goto err_free_block;
        }
 
@@ -1124,9 +1424,16 @@ static void tb_xdomain_get_properties(struct work_struct *work)
         * registered, we notify the userspace that it has changed.
         */
        if (!update) {
+               struct tb_port *port;
+
+               /* Now disable lane 1 if bonding was not enabled */
+               port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+               if (!port->bonded)
+                       tb_port_disable(port->dual_link_port);
+
                if (device_add(&xd->dev)) {
                        dev_err(&xd->dev, "failed to add XDomain device\n");
-                       return;
+                       return -ENODEV;
                }
                dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
                         xd->vendor, xd->device);
@@ -1138,13 +1445,193 @@ static void tb_xdomain_get_properties(struct work_struct *work)
        }
 
        enumerate_services(xd);
-       return;
+       return 0;
 
 err_free_dir:
        tb_property_free_dir(dir);
 err_free_block:
        kfree(block);
        mutex_unlock(&xd->lock);
+
+       return ret;
+}
+
+static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
+{
+       xd->state = XDOMAIN_STATE_UUID;
+       xd->state_retries = XDOMAIN_RETRIES;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
+{
+       xd->state = XDOMAIN_STATE_LINK_STATUS;
+       xd->state_retries = XDOMAIN_RETRIES;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
+{
+       xd->state = XDOMAIN_STATE_LINK_STATUS2;
+       xd->state_retries = XDOMAIN_RETRIES;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
+{
+       if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
+               dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
+               xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
+       } else {
+               dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
+               xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
+       }
+
+       xd->state_retries = XDOMAIN_RETRIES;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
+{
+       xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
+       xd->state_retries = XDOMAIN_RETRIES;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
+{
+       xd->state = XDOMAIN_STATE_PROPERTIES;
+       xd->state_retries = XDOMAIN_RETRIES;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
+{
+       xd->properties_changed_retries = XDOMAIN_RETRIES;
+       queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+                          msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_state_work(struct work_struct *work)
+{
+       struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
+       int ret, state = xd->state;
+
+       if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
+                        state > XDOMAIN_STATE_ERROR))
+               return;
+
+       dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
+
+       switch (state) {
+       case XDOMAIN_STATE_INIT:
+               if (xd->needs_uuid) {
+                       tb_xdomain_queue_uuid(xd);
+               } else {
+                       tb_xdomain_queue_properties_changed(xd);
+                       tb_xdomain_queue_properties(xd);
+               }
+               break;
+
+       case XDOMAIN_STATE_UUID:
+               ret = tb_xdomain_get_uuid(xd);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               goto retry_state;
+                       xd->state = XDOMAIN_STATE_ERROR;
+               } else {
+                       tb_xdomain_queue_properties_changed(xd);
+                       if (xd->bonding_possible)
+                               tb_xdomain_queue_link_status(xd);
+                       else
+                               tb_xdomain_queue_properties(xd);
+               }
+               break;
+
+       case XDOMAIN_STATE_LINK_STATUS:
+               ret = tb_xdomain_get_link_status(xd);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               goto retry_state;
+
+                       /*
+                        * If any of the lane bonding states fail we skip
+                        * bonding completely and try to continue from
+                        * reading properties.
+                        */
+                       tb_xdomain_queue_properties(xd);
+               } else {
+                       tb_xdomain_queue_bonding(xd);
+               }
+               break;
+
+       case XDOMAIN_STATE_LINK_STATE_CHANGE:
+               ret = tb_xdomain_link_state_change(xd, 2);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               goto retry_state;
+                       tb_xdomain_queue_properties(xd);
+               } else {
+                       tb_xdomain_queue_link_status2(xd);
+               }
+               break;
+
+       case XDOMAIN_STATE_LINK_STATUS2:
+               ret = tb_xdomain_get_link_status(xd);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               goto retry_state;
+                       tb_xdomain_queue_properties(xd);
+               } else {
+                       tb_xdomain_queue_bonding_uuid_low(xd);
+               }
+               break;
+
+       case XDOMAIN_STATE_BONDING_UUID_LOW:
+               tb_xdomain_lane_bonding_enable(xd);
+               tb_xdomain_queue_properties(xd);
+               break;
+
+       case XDOMAIN_STATE_BONDING_UUID_HIGH:
+               if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
+                       goto retry_state;
+               tb_xdomain_queue_properties(xd);
+               break;
+
+       case XDOMAIN_STATE_PROPERTIES:
+               ret = tb_xdomain_get_properties(xd);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               goto retry_state;
+                       xd->state = XDOMAIN_STATE_ERROR;
+               } else {
+                       xd->state = XDOMAIN_STATE_ENUMERATED;
+               }
+               break;
+
+       case XDOMAIN_STATE_ENUMERATED:
+               tb_xdomain_queue_properties(xd);
+               break;
+
+       case XDOMAIN_STATE_ERROR:
+               break;
+
+       default:
+               dev_warn(&xd->dev, "unexpected state %d\n", state);
+               break;
+       }
+
+       return;
+
+retry_state:
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
 }
 
 static void tb_xdomain_properties_changed(struct work_struct *work)
@@ -1163,13 +1650,13 @@ static void tb_xdomain_properties_changed(struct work_struct *work)
                                "failed to send properties changed notification, retrying\n");
                        queue_delayed_work(xd->tb->wq,
                                           &xd->properties_changed_work,
-                                          msecs_to_jiffies(1000));
+                                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
                }
                dev_err(&xd->dev, "failed to send properties changed notification\n");
                return;
        }
 
-       xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+       xd->properties_changed_retries = XDOMAIN_RETRIES;
 }
 
 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
@@ -1304,31 +1791,17 @@ static void tb_xdomain_release(struct device *dev)
 
 static void start_handshake(struct tb_xdomain *xd)
 {
-       xd->uuid_retries = XDOMAIN_UUID_RETRIES;
-       xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
-       xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
-
-       if (xd->needs_uuid) {
-               queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
-                                  msecs_to_jiffies(100));
-       } else {
-               /* Start exchanging properties with the other host */
-               queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
-                                  msecs_to_jiffies(100));
-               queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
-                                  msecs_to_jiffies(1000));
-       }
+       xd->state = XDOMAIN_STATE_INIT;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
 }
 
 static void stop_handshake(struct tb_xdomain *xd)
 {
-       xd->uuid_retries = 0;
-       xd->properties_retries = 0;
-       xd->properties_changed_retries = 0;
-
-       cancel_delayed_work_sync(&xd->get_uuid_work);
-       cancel_delayed_work_sync(&xd->get_properties_work);
        cancel_delayed_work_sync(&xd->properties_changed_work);
+       cancel_delayed_work_sync(&xd->state_work);
+       xd->properties_changed_retries = 0;
+       xd->state_retries = 0;
 }
 
 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
@@ -1389,8 +1862,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
        ida_init(&xd->in_hopids);
        ida_init(&xd->out_hopids);
        mutex_init(&xd->lock);
-       INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
-       INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+       INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
        INIT_DELAYED_WORK(&xd->properties_changed_work,
                          tb_xdomain_properties_changed);
 
@@ -1405,6 +1877,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
                        goto err_free_local_uuid;
        } else {
                xd->needs_uuid = true;
+               xd->bonding_possible = !!down->dual_link_port;
        }
 
        device_initialize(&xd->dev);
@@ -1523,9 +1996,9 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
                return ret;
        }
 
-       ret = tb_port_wait_for_link_width(port, 2, 100);
+       ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
        if (ret) {
-               tb_port_warn(port, "timeout enabling lane bonding\n");
+               tb_port_warn(port, "failed to enable lane bonding\n");
                return ret;
        }
 
index 533d02b..afb2d37 100644 (file)
@@ -588,10 +588,8 @@ static void change_speed(struct tty_struct *tty, struct serial_state *info,
        }
        if (!(cflag & PARODD))
                cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
        if (cflag & CMSPAR)
                cval |= UART_LCR_SPAR;
-#endif
 
        /* Determine divisor based on baud rate */
        baud = tty_get_baud_rate(tty);
index 9e8ccb8..c7968ae 100644 (file)
@@ -405,6 +405,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
 err_tty_register_device_failed:
        free_irq(irq, qtty);
 err_dec_line_count:
+       tty_port_destroy(&qtty->port);
        goldfish_tty_current_line_count--;
        if (goldfish_tty_current_line_count == 0)
                goldfish_tty_delete_driver();
@@ -426,6 +427,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
        iounmap(qtty->base);
        qtty->base = NULL;
        free_irq(qtty->irq, pdev);
+       tty_port_destroy(&qtty->port);
        goldfish_tty_current_line_count--;
        if (goldfish_tty_current_line_count == 0)
                goldfish_tty_delete_driver();
index 8d60e0f..4f9264d 100644 (file)
@@ -87,6 +87,25 @@ config HVC_DCC
          driver. This console is used through a JTAG only on ARM. If you don't have
          a JTAG then you probably don't want this option.
 
+config HVC_DCC_SERIALIZE_SMP
+       bool "Use DCC only on CPU core 0"
+       depends on SMP && HVC_DCC
+       help
+         This is a DEBUG option to serialize all console input and output to CPU 0.
+         Some external debuggers, do not handle reads/writes from/to DCC on more
+         than one CPU core. Each core has its own DCC device registers, so when a
+         CPU core reads or writes from/to DCC, it only accesses its own DCC device.
+         Since kernel code can run on any CPU core, every time the kernel wants to
+         write to the console, it might write to a different DCC.
+
+         In SMP mode, external debuggers create multiple windows, and each window
+         shows the DCC output only from that core's DCC. The result is that
+         console output is either lost or scattered across windows.
+
+         Enable this option only if you are sure that you do not need features like
+         CPU hotplug to work. For example, during early chipset bringups without
+         debug serial console support. If unsure, say N.
+
 config HVC_RISCV_SBI
        bool "RISC-V SBI console support"
        depends on RISCV_SBI_V01
index bd61f93..1751108 100644 (file)
@@ -1,10 +1,15 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.  */
+/* Copyright (c) 2010, 2014, 2022 The Linux Foundation. All rights reserved.  */
 
 #include <linux/console.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
 #include <linux/init.h>
+#include <linux/kfifo.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
 
 #include <asm/dcc.h>
 #include <asm/processor.h>
 #define DCC_STATUS_RX          (1 << 30)
 #define DCC_STATUS_TX          (1 << 29)
 
+#define DCC_INBUF_SIZE         128
+#define DCC_OUTBUF_SIZE                1024
+
+/* Lock to serialize access to DCC fifo */
+static DEFINE_SPINLOCK(dcc_lock);
+
+static DEFINE_KFIFO(inbuf, unsigned char, DCC_INBUF_SIZE);
+static DEFINE_KFIFO(outbuf, unsigned char, DCC_OUTBUF_SIZE);
+
 static void dcc_uart_console_putchar(struct uart_port *port, unsigned char ch)
 {
        while (__dcc_getstatus() & DCC_STATUS_TX)
@@ -67,24 +81,176 @@ static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
        return i;
 }
 
+/*
+ * Check if the DCC is enabled. If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
+ * then we assume then this function will be called first on core0. That way,
+ * dcc_core0_available will be true only if it's available on core0.
+ */
 static bool hvc_dcc_check(void)
 {
        unsigned long time = jiffies + (HZ / 10);
+       static bool dcc_core0_available;
+
+       /*
+        * If we're not on core 0, but we previously confirmed that DCC is
+        * active, then just return true.
+        */
+       int cpu = get_cpu();
+
+       if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP) && cpu && dcc_core0_available) {
+               put_cpu();
+               return true;
+       }
+
+       put_cpu();
 
        /* Write a test character to check if it is handled */
        __dcc_putchar('\n');
 
        while (time_is_after_jiffies(time)) {
-               if (!(__dcc_getstatus() & DCC_STATUS_TX))
+               if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
+                       dcc_core0_available = true;
                        return true;
+               }
        }
 
        return false;
 }
 
+/*
+ * Workqueue function that writes the output FIFO to the DCC on core 0.
+ */
+static void dcc_put_work(struct work_struct *work)
+{
+       unsigned char ch;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dcc_lock, irqflags);
+
+       /* While there's data in the output FIFO, write it to the DCC */
+       while (kfifo_get(&outbuf, &ch))
+               hvc_dcc_put_chars(0, &ch, 1);
+
+       /* While we're at it, check for any input characters */
+       while (!kfifo_is_full(&inbuf)) {
+               if (!hvc_dcc_get_chars(0, &ch, 1))
+                       break;
+               kfifo_put(&inbuf, ch);
+       }
+
+       spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+static DECLARE_WORK(dcc_pwork, dcc_put_work);
+
+/*
+ * Workqueue function that reads characters from DCC and puts them into the
+ * input FIFO.
+ */
+static void dcc_get_work(struct work_struct *work)
+{
+       unsigned char ch;
+       unsigned long irqflags;
+
+       /*
+        * Read characters from DCC and put them into the input FIFO, as
+        * long as there is room and we have characters to read.
+        */
+       spin_lock_irqsave(&dcc_lock, irqflags);
+
+       while (!kfifo_is_full(&inbuf)) {
+               if (!hvc_dcc_get_chars(0, &ch, 1))
+                       break;
+               kfifo_put(&inbuf, ch);
+       }
+       spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+static DECLARE_WORK(dcc_gwork, dcc_get_work);
+
+/*
+ * Write characters directly to the DCC if we're on core 0 and the FIFO
+ * is empty, or write them to the FIFO if we're not.
+ */
+static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
+{
+       int len;
+       unsigned long irqflags;
+
+       if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
+               return hvc_dcc_put_chars(vt, buf, count);
+
+       spin_lock_irqsave(&dcc_lock, irqflags);
+       if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
+               len = kfifo_in(&outbuf, buf, count);
+               spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+               /*
+                * We just push data to the output FIFO, so schedule the
+                * workqueue that will actually write that data to DCC.
+                * CPU hotplug is disabled in dcc_init so CPU0 cannot be
+                * offlined after the cpu online check.
+                */
+               if (cpu_online(0))
+                       schedule_work_on(0, &dcc_pwork);
+
+               return len;
+       }
+
+       /*
+        * If we're already on core 0, and the FIFO is empty, then just
+        * write the data to DCC.
+        */
+       len = hvc_dcc_put_chars(vt, buf, count);
+       spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+       return len;
+}
+
+/*
+ * Read characters directly from the DCC if we're on core 0 and the FIFO
+ * is empty, or read them from the FIFO if we're not.
+ */
+static int hvc_dcc0_get_chars(u32 vt, char *buf, int count)
+{
+       int len;
+       unsigned long irqflags;
+
+       if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
+               return hvc_dcc_get_chars(vt, buf, count);
+
+       spin_lock_irqsave(&dcc_lock, irqflags);
+
+       if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
+               len = kfifo_out(&inbuf, buf, count);
+               spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+               /*
+                * If the FIFO was empty, there may be characters in the DCC
+                * that we haven't read yet.  Schedule a workqueue to fill
+                * the input FIFO, so that the next time this function is
+                * called, we'll have data. CPU hotplug is disabled in dcc_init
+                * so CPU0 cannot be offlined after the cpu online check.
+                */
+               if (!len && cpu_online(0))
+                       schedule_work_on(0, &dcc_gwork);
+
+               return len;
+       }
+
+       /*
+        * If we're already on core 0, and the FIFO is empty, then just
+        * read the data from DCC.
+        */
+       len = hvc_dcc_get_chars(vt, buf, count);
+       spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+       return len;
+}
+
 static const struct hv_ops hvc_dcc_get_put_ops = {
-       .get_chars = hvc_dcc_get_chars,
-       .put_chars = hvc_dcc_put_chars,
+       .get_chars = hvc_dcc0_get_chars,
+       .put_chars = hvc_dcc0_put_chars,
 };
 
 static int __init hvc_dcc_console_init(void)
@@ -108,6 +274,26 @@ static int __init hvc_dcc_init(void)
        if (!hvc_dcc_check())
                return -ENODEV;
 
+       if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP)) {
+               pr_warn("\n");
+               pr_warn("********************************************************************\n");
+               pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
+               pr_warn("**                                                                **\n");
+               pr_warn("**  HVC_DCC_SERIALIZE_SMP SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
+               pr_warn("**                                                                **\n");
+               pr_warn("** This means that this is a DEBUG kernel and unsafe for          **\n");
+               pr_warn("** production use and has important feature like CPU hotplug      **\n");
+               pr_warn("** disabled.                                                      **\n");
+               pr_warn("**                                                                **\n");
+               pr_warn("** If you see this message and you are not debugging the          **\n");
+               pr_warn("** kernel, report this immediately to your vendor!                **\n");
+               pr_warn("**                                                                **\n");
+               pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
+               pr_warn("********************************************************************\n");
+
+               cpu_hotplug_disable();
+       }
+
        p = hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
 
        return PTR_ERR_OR_ZERO(p);
index 056ae21..794c7b1 100644 (file)
 #include <linux/slab.h>
 #include <linux/console.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
 
 #include <asm/hvconsole.h>
-#include <asm/prom.h>
 #include <asm/firmware.h>
 #include <asm/hvsi.h>
 #include <asm/udbg.h>
@@ -342,9 +342,9 @@ void __init hvc_opal_init_early(void)
                 * path, so we hard wire it
                 */
                opal = of_find_node_by_path("/ibm,opal/consoles");
-               if (opal)
+               if (opal) {
                        pr_devel("hvc_opal: Found consoles in new location\n");
-               if (!opal) {
+               } else {
                        opal = of_find_node_by_path("/ibm,opal");
                        if (opal)
                                pr_devel("hvc_opal: "
index 72b11aa..736b230 100644 (file)
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/console.h>
+#include <linux/of.h>
 
 #include <asm/hvconsole.h>
 #include <asm/vio.h>
-#include <asm/prom.h>
 #include <asm/hvsi.h>
 #include <asm/udbg.h>
 #include <asm/machdep.h>
index ebaf750..7c23112 100644 (file)
@@ -253,7 +253,7 @@ static int xen_hvm_console_init(void)
        if (r < 0 || v == 0)
                goto err;
        gfn = v;
-       info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE);
+       info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
        if (info->intf == NULL)
                goto err;
        info->vtermno = HVC_COOKIE;
index 245da1d..9b7e824 100644 (file)
@@ -581,10 +581,9 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
 
        spin_unlock_irqrestore(&hvcsd->lock, flags);
        /* This is synch -- FIXME :js: it is not! */
-       if(got)
+       if (got)
                tty_flip_buffer_push(&hvcsd->port);
-
-       if (!got) {
+       else {
                /* Do this _after_ the flip_buffer_push */
                spin_lock_irqsave(&hvcsd->lock, flags);
                vio_enable_interrupts(hvcsd->vdev);
index aa81f48..a200d01 100644 (file)
 #include <linux/module.h>
 #include <linux/major.h>
 #include <linux/kernel.h>
+#include <linux/of_irq.h>
 #include <linux/spinlock.h>
 #include <linux/sysrq.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <asm/hvcall.h>
 #include <asm/hvconsole.h>
-#include <asm/prom.h>
 #include <linux/uaccess.h>
 #include <asm/vio.h>
 #include <asm/param.h>
index 6ebd3e4..70b982b 100644 (file)
@@ -528,7 +528,6 @@ static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
        outb(quot >> 8, info->ioaddr + UART_DLM);       /* MS of divisor */
        outb(cval, info->ioaddr + UART_LCR);    /* reset DLAB */
 
-#ifdef BOTHER
        if (C_BAUD(tty) == BOTHER) {
                quot = MXSER_BAUD_BASE % newspd;
                quot *= 8;
@@ -539,9 +538,9 @@ static int mxser_set_baud(struct tty_struct *tty, speed_t newspd)
                        quot /= newspd;
 
                mxser_set_must_enum_value(info->ioaddr, quot);
-       } else
-#endif
+       } else {
                mxser_set_must_enum_value(info->ioaddr, 0);
+       }
 
        return 0;
 }
index fd8b86d..137eebd 100644 (file)
@@ -444,6 +444,25 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
        return modembits;
 }
 
+static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
+                              unsigned long len)
+{
+       char *prefix;
+
+       if (!fname) {
+               print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len,
+                              true);
+               return;
+       }
+
+       prefix = kasprintf(GFP_KERNEL, "%s: ", fname);
+       if (!prefix)
+               return;
+       print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
+                      true);
+       kfree(prefix);
+}
+
 /**
  *     gsm_print_packet        -       display a frame for debug
  *     @hdr: header to print before decode
@@ -508,7 +527,7 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
        else
                pr_cont("(F)");
 
-       print_hex_dump_bytes("", DUMP_PREFIX_NONE, data, dlen);
+       gsm_hex_dump_bytes(NULL, data, dlen);
 }
 
 
@@ -698,9 +717,7 @@ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
                }
 
                if (debug & 4)
-                       print_hex_dump_bytes("gsm_data_kick: ",
-                                            DUMP_PREFIX_OFFSET,
-                                            gsm->txframe, len);
+                       gsm_hex_dump_bytes(__func__, gsm->txframe, len);
                if (gsmld_output(gsm, gsm->txframe, len) <= 0)
                        break;
                /* FIXME: Can eliminate one SOF in many more cases */
@@ -749,7 +766,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
 
        *--dp = msg->ctrl;
        if (gsm->initiator)
-               *--dp = (msg->addr << 2) | 2 | EA;
+               *--dp = (msg->addr << 2) | CR | EA;
        else
                *--dp = (msg->addr << 2) | EA;
        *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
@@ -1907,10 +1924,6 @@ static void gsm_queue(struct gsm_mux *gsm)
        case UI|PF:
        case UIH:
        case UIH|PF:
-#if 0
-               if (cr)
-                       goto invalid;
-#endif
                if (dlci == NULL || dlci->state != DLCI_OPEN) {
                        gsm_command(gsm, address, DM|PF);
                        return;
@@ -2448,8 +2461,7 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
                return -ENOSPC;
        }
        if (debug & 4)
-               print_hex_dump_bytes("gsmld_output: ", DUMP_PREFIX_OFFSET,
-                                    data, len);
+               gsm_hex_dump_bytes(__func__, data, len);
        return gsm->tty->ops->write(gsm->tty, data, len);
 }
 
@@ -2525,8 +2537,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
        char flags = TTY_NORMAL;
 
        if (debug & 4)
-               print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
-                                    cp, count);
+               gsm_hex_dump_bytes(__func__, cp, count);
 
        for (; count; count--, cp++) {
                if (fp)
index efc7210..640c9e8 100644 (file)
@@ -1220,21 +1220,34 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
                process_echoes(tty);
 }
 
+static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+{
+       return c == START_CHAR(tty) || c == STOP_CHAR(tty);
+}
+
+/* Returns true if c is consumed as flow-control character */
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+{
+       if (!n_tty_is_char_flow_ctrl(tty, c))
+               return false;
+
+       if (c == START_CHAR(tty)) {
+               start_tty(tty);
+               process_echoes(tty);
+               return true;
+       }
+
+       /* STOP_CHAR */
+       stop_tty(tty);
+       return true;
+}
+
 static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
 {
        struct n_tty_data *ldata = tty->disc_data;
 
-       if (I_IXON(tty)) {
-               if (c == START_CHAR(tty)) {
-                       start_tty(tty);
-                       process_echoes(tty);
-                       return;
-               }
-               if (c == STOP_CHAR(tty)) {
-                       stop_tty(tty);
-                       return;
-               }
-       }
+       if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c))
+               return;
 
        if (L_ISIG(tty)) {
                if (c == INTR_CHAR(tty)) {
@@ -1975,6 +1988,35 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
        return ldata->read_tail != canon_head;
 }
 
+/*
+ * If we finished a read at the exact location of an
+ * EOF (special EOL character that's a __DISABLED_CHAR)
+ * in the stream, silently eat the EOF.
+ */
+static void canon_skip_eof(struct tty_struct *tty)
+{
+       struct n_tty_data *ldata = tty->disc_data;
+       size_t tail, canon_head;
+
+       canon_head = smp_load_acquire(&ldata->canon_head);
+       tail = ldata->read_tail;
+
+       // No data?
+       if (tail == canon_head)
+               return;
+
+       // See if the tail position is EOF in the circular buffer
+       tail &= (N_TTY_BUF_SIZE - 1);
+       if (!test_bit(tail, ldata->read_flags))
+               return;
+       if (read_buf(ldata, tail) != __DISABLED_CHAR)
+               return;
+
+       // Clear the EOL bit, skip the EOF char.
+       clear_bit(tail, ldata->read_flags);
+       smp_store_release(&ldata->read_tail, ldata->read_tail + 1);
+}
+
 /**
  * job_control         -       check job control
  * @tty: tty
@@ -2045,7 +2087,14 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
         */
        if (*cookie) {
                if (ldata->icanon && !L_EXTPROC(tty)) {
-                       if (canon_copy_from_read_buf(tty, &kb, &nr))
+                       /*
+                        * If we have filled the user buffer, see
+                        * if we should skip an EOF character before
+                        * releasing the lock and returning done.
+                        */
+                       if (!nr)
+                               canon_skip_eof(tty);
+                       else if (canon_copy_from_read_buf(tty, &kb, &nr))
                                return kb - kbuf;
                } else {
                        if (copy_from_read_buf(tty, &kb, &nr))
index a8830e1..696030c 100644 (file)
@@ -17,6 +17,8 @@
 struct uart_8250_dma {
        int (*tx_dma)(struct uart_8250_port *p);
        int (*rx_dma)(struct uart_8250_port *p);
+       void (*prepare_tx_dma)(struct uart_8250_port *p);
+       void (*prepare_rx_dma)(struct uart_8250_port *p);
 
        /* Filter function */
        dma_filter_fn           fn;
@@ -83,6 +85,7 @@ struct serial8250_config {
 #define UART_CAP_MINI  BIT(17) /* Mini UART on BCM283X family lacks:
                                         * STOP PARITY EPAR SPAR WLEN5 WLEN6
                                         */
+#define UART_CAP_NOTEMT        BIT(18) /* UART without interrupt on TEMT available */
 
 #define UART_BUG_QUOT  BIT(0)  /* UART has buggy quot LSB */
 #define UART_BUG_TXEN  BIT(1)  /* UART has buggy TX IIR status */
@@ -120,6 +123,28 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value)
        up->port.serial_out(&up->port, offset, value);
 }
 
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+       serial_out(up, UART_SCR, offset);
+       serial_out(up, UART_ICR, value);
+}
+
+static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up,
+                                                  int offset)
+{
+       unsigned int value;
+
+       serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+       serial_out(up, UART_SCR, offset);
+       value = serial_in(up, UART_ICR);
+       serial_icr_write(up, UART_ACR, up->acr);
+
+       return value;
+}
+
 void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p);
 
 static inline int serial_dl_read(struct uart_8250_port *up)
@@ -302,6 +327,22 @@ extern int serial8250_rx_dma(struct uart_8250_port *);
 extern void serial8250_rx_dma_flush(struct uart_8250_port *);
 extern int serial8250_request_dma(struct uart_8250_port *);
 extern void serial8250_release_dma(struct uart_8250_port *);
+
+static inline void serial8250_do_prepare_tx_dma(struct uart_8250_port *p)
+{
+       struct uart_8250_dma *dma = p->dma;
+
+       if (dma->prepare_tx_dma)
+               dma->prepare_tx_dma(p);
+}
+
+static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p)
+{
+       struct uart_8250_dma *dma = p->dma;
+
+       if (dma->prepare_rx_dma)
+               dma->prepare_rx_dma(p);
+}
 #else
 static inline int serial8250_tx_dma(struct uart_8250_port *p)
 {
index 93fe10c..9d2a785 100644 (file)
@@ -429,6 +429,8 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
        timer_setup(&vuart->unthrottle_timer, aspeed_vuart_unthrottle_exp, 0);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -EINVAL;
 
        memset(&port, 0, sizeof(port));
        port.port.private_data = vuart;
index 01d30f6..cfbd2de 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
-#include <linux/pm_runtime.h>
 #include <linux/io.h>
 #ifdef CONFIG_SPARC
 #include <linux/sunserialcore.h>
index b3c3f7e..7133fce 100644 (file)
@@ -34,7 +34,7 @@ static void __dma_tx_complete(void *param)
                uart_write_wakeup(&p->port);
 
        ret = serial8250_tx_dma(p);
-       if (ret)
+       if (ret || !dma->tx_running)
                serial8250_set_THRI(p);
 
        spin_unlock_irqrestore(&p->port.lock, flags);
@@ -80,12 +80,13 @@ int serial8250_tx_dma(struct uart_8250_port *p)
 
        if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
                /* We have been called from __dma_tx_complete() */
-               serial8250_rpm_put_tx(p);
                return 0;
        }
 
        dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
 
+       serial8250_do_prepare_tx_dma(p);
+
        desc = dmaengine_prep_slave_single(dma->txchan,
                                           dma->tx_addr + xmit->tail,
                                           dma->tx_size, DMA_MEM_TO_DEV,
@@ -123,6 +124,8 @@ int serial8250_rx_dma(struct uart_8250_port *p)
        if (dma->rx_running)
                return 0;
 
+       serial8250_do_prepare_rx_dma(p);
+
        desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
                                           dma->rx_size, DMA_DEV_TO_MEM,
                                           DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
index 1769808..f57bbd3 100644 (file)
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/io.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/serial_8250.h>
 #include <linux/serial_reg.h>
 #include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/workqueue.h>
 #include <linux/notifier.h>
 #include <linux/slab.h>
 
 /* Offsets for the DesignWare specific registers */
 #define DW_UART_USR    0x1f /* UART Status Register */
+#define DW_UART_DMASA  0xa8 /* DMA Software Ack */
+
+#define OCTEON_UART_USR        0x27 /* UART Status Register */
+
+#define RZN1_UART_TDMACR 0x10c /* DMA Control Register Transmit Mode */
+#define RZN1_UART_RDMACR 0x110 /* DMA Control Register Receive Mode */
 
 /* DesignWare specific register fields */
 #define DW_UART_MCR_SIRE               BIT(6)
 
-struct dw8250_data {
-       struct dw8250_port_data data;
+/* Renesas specific register fields */
+#define RZN1_UART_xDMACR_DMA_EN                BIT(0)
+#define RZN1_UART_xDMACR_1_WORD_BURST  (0 << 1)
+#define RZN1_UART_xDMACR_4_WORD_BURST  (1 << 1)
+#define RZN1_UART_xDMACR_8_WORD_BURST  (3 << 1)
+#define RZN1_UART_xDMACR_BLK_SZ(x)     ((x) << 3)
 
-       u8                      usr_reg;
-       int                     msr_mask_on;
-       int                     msr_mask_off;
-       struct clk              *clk;
-       struct clk              *pclk;
-       struct notifier_block   clk_notifier;
-       struct work_struct      clk_work;
-       struct reset_control    *rst;
-
-       unsigned int            skip_autocfg:1;
-       unsigned int            uart_16550_compatible:1;
-};
-
-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
-{
-       return container_of(data, struct dw8250_data, data);
-}
+/* Quirks */
+#define DW_UART_QUIRK_OCTEON           BIT(0)
+#define DW_UART_QUIRK_ARMADA_38X       BIT(1)
+#define DW_UART_QUIRK_SKIP_SET_RATE    BIT(2)
+#define DW_UART_QUIRK_IS_DMA_FC                BIT(3)
 
 static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
 {
@@ -238,6 +236,8 @@ static int dw8250_handle_irq(struct uart_port *p)
        struct uart_8250_port *up = up_to_u8250p(p);
        struct dw8250_data *d = to_dw8250_data(p->private_data);
        unsigned int iir = p->serial_in(p, UART_IIR);
+       bool rx_timeout = (iir & 0x3f) == UART_IIR_RX_TIMEOUT;
+       unsigned int quirks = d->pdata->quirks;
        unsigned int status;
        unsigned long flags;
 
@@ -251,7 +251,7 @@ static int dw8250_handle_irq(struct uart_port *p)
         * This problem has only been observed so far when not in DMA mode
         * so we limit the workaround only to non-DMA mode.
         */
-       if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) {
+       if (!up->dma && rx_timeout) {
                spin_lock_irqsave(&p->lock, flags);
                status = p->serial_in(p, UART_LSR);
 
@@ -261,12 +261,21 @@ static int dw8250_handle_irq(struct uart_port *p)
                spin_unlock_irqrestore(&p->lock, flags);
        }
 
+       /* Manually stop the Rx DMA transfer when acting as flow controller */
+       if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
+               status = p->serial_in(p, UART_LSR);
+               if (status & (UART_LSR_DR | UART_LSR_BI)) {
+                       dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
+                       dw8250_writel_ext(p, DW_UART_DMASA, 1);
+               }
+       }
+
        if (serial8250_handle_irq(p, iir))
                return 1;
 
        if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
                /* Clear the USR */
-               (void)p->serial_in(p, d->usr_reg);
+               (void)p->serial_in(p, d->pdata->usr_reg);
 
                return 1;
        }
@@ -384,11 +393,48 @@ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
        return param == chan->device->dev;
 }
 
+static u32 dw8250_rzn1_get_dmacr_burst(int max_burst)
+{
+       if (max_burst >= 8)
+               return RZN1_UART_xDMACR_8_WORD_BURST;
+       else if (max_burst >= 4)
+               return RZN1_UART_xDMACR_4_WORD_BURST;
+       else
+               return RZN1_UART_xDMACR_1_WORD_BURST;
+}
+
+static void dw8250_prepare_tx_dma(struct uart_8250_port *p)
+{
+       struct uart_port *up = &p->port;
+       struct uart_8250_dma *dma = p->dma;
+       u32 val;
+
+       dw8250_writel_ext(up, RZN1_UART_TDMACR, 0);
+       val = dw8250_rzn1_get_dmacr_burst(dma->txconf.dst_maxburst) |
+             RZN1_UART_xDMACR_BLK_SZ(dma->tx_size) |
+             RZN1_UART_xDMACR_DMA_EN;
+       dw8250_writel_ext(up, RZN1_UART_TDMACR, val);
+}
+
+static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
+{
+       struct uart_port *up = &p->port;
+       struct uart_8250_dma *dma = p->dma;
+       u32 val;
+
+       dw8250_writel_ext(up, RZN1_UART_RDMACR, 0);
+       val = dw8250_rzn1_get_dmacr_burst(dma->rxconf.src_maxburst) |
+             RZN1_UART_xDMACR_BLK_SZ(dma->rx_size) |
+             RZN1_UART_xDMACR_DMA_EN;
+       dw8250_writel_ext(up, RZN1_UART_RDMACR, val);
+}
+
 static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
 {
        struct device_node *np = p->dev->of_node;
 
        if (np) {
+               unsigned int quirks = data->pdata->quirks;
                int id;
 
                /* get index of serial line, if found in DT aliases */
@@ -396,12 +442,11 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
                if (id >= 0)
                        p->line = id;
 #ifdef CONFIG_64BIT
-               if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) {
+               if (quirks & DW_UART_QUIRK_OCTEON) {
                        p->serial_in = dw8250_serial_inq;
                        p->serial_out = dw8250_serial_outq;
                        p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
                        p->type = PORT_OCTEON;
-                       data->usr_reg = 0x27;
                        data->skip_autocfg = true;
                }
 #endif
@@ -412,10 +457,16 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
                        p->serial_out = dw8250_serial_out32be;
                }
 
-               if (of_device_is_compatible(np, "marvell,armada-38x-uart"))
+               if (quirks & DW_UART_QUIRK_ARMADA_38X)
                        p->serial_out = dw8250_serial_out38x;
-               if (of_device_is_compatible(np, "starfive,jh7100-uart"))
+               if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
                        p->set_termios = dw8250_do_set_termios;
+               if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
+                       data->data.dma.txconf.device_fc = 1;
+                       data->data.dma.rxconf.device_fc = 1;
+                       data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
+                       data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
+               }
 
        } else if (acpi_dev_present("APMC0D08", NULL, -1)) {
                p->iotype = UPIO_MEM32;
@@ -433,21 +484,30 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
        }
 }
 
+static void dw8250_clk_disable_unprepare(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
+static void dw8250_reset_control_assert(void *data)
+{
+       reset_control_assert(data);
+}
+
 static int dw8250_probe(struct platform_device *pdev)
 {
        struct uart_8250_port uart = {}, *up = &uart;
-       struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct uart_port *p = &up->port;
        struct device *dev = &pdev->dev;
        struct dw8250_data *data;
+       struct resource *regs;
        int irq;
        int err;
        u32 val;
 
-       if (!regs) {
-               dev_err(dev, "no registers defined\n");
-               return -EINVAL;
-       }
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs)
+               return dev_err_probe(dev, -EINVAL, "no registers defined\n");
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -476,7 +536,7 @@ static int dw8250_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        data->data.dma.fn = dw8250_fallback_dma_filter;
-       data->usr_reg = DW_UART_USR;
+       data->pdata = device_get_match_data(p->dev);
        p->private_data = &data->data;
 
        data->uart_16550_compatible = device_property_read_bool(dev,
@@ -532,37 +592,41 @@ static int dw8250_probe(struct platform_device *pdev)
 
        err = clk_prepare_enable(data->clk);
        if (err)
-               dev_warn(dev, "could not enable optional baudclk: %d\n", err);
+               return dev_err_probe(dev, err, "could not enable optional baudclk\n");
+
+       err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->clk);
+       if (err)
+               return err;
 
        if (data->clk)
                p->uartclk = clk_get_rate(data->clk);
 
        /* If no clock rate is defined, fail. */
-       if (!p->uartclk) {
-               dev_err(dev, "clock rate not defined\n");
-               err = -EINVAL;
-               goto err_clk;
-       }
+       if (!p->uartclk)
+               return dev_err_probe(dev, -EINVAL, "clock rate not defined\n");
 
        data->pclk = devm_clk_get_optional(dev, "apb_pclk");
-       if (IS_ERR(data->pclk)) {
-               err = PTR_ERR(data->pclk);
-               goto err_clk;
-       }
+       if (IS_ERR(data->pclk))
+               return PTR_ERR(data->pclk);
 
        err = clk_prepare_enable(data->pclk);
-       if (err) {
-               dev_err(dev, "could not enable apb_pclk\n");
-               goto err_clk;
-       }
+       if (err)
+               return dev_err_probe(dev, err, "could not enable apb_pclk\n");
+
+       err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->pclk);
+       if (err)
+               return err;
 
        data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
-       if (IS_ERR(data->rst)) {
-               err = PTR_ERR(data->rst);
-               goto err_pclk;
-       }
+       if (IS_ERR(data->rst))
+               return PTR_ERR(data->rst);
+
        reset_control_deassert(data->rst);
 
+       err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
+       if (err)
+               return err;
+
        dw8250_quirks(p, data);
 
        /* If the Busy Functionality is not implemented, don't handle it */
@@ -580,10 +644,8 @@ static int dw8250_probe(struct platform_device *pdev)
        }
 
        data->data.line = serial8250_register_8250_port(up);
-       if (data->data.line < 0) {
-               err = data->data.line;
-               goto err_reset;
-       }
+       if (data->data.line < 0)
+               return data->data.line;
 
        /*
         * Some platforms may provide a reference clock shared between several
@@ -593,9 +655,8 @@ static int dw8250_probe(struct platform_device *pdev)
        if (data->clk) {
                err = clk_notifier_register(data->clk, &data->clk_notifier);
                if (err)
-                       dev_warn(p->dev, "Failed to set the clock notifier\n");
-               else
-                       queue_work(system_unbound_wq, &data->clk_work);
+                       return dev_err_probe(dev, err, "Failed to set the clock notifier\n");
+               queue_work(system_unbound_wq, &data->clk_work);
        }
 
        platform_set_drvdata(pdev, data);
@@ -604,17 +665,6 @@ static int dw8250_probe(struct platform_device *pdev)
        pm_runtime_enable(dev);
 
        return 0;
-
-err_reset:
-       reset_control_assert(data->rst);
-
-err_pclk:
-       clk_disable_unprepare(data->pclk);
-
-err_clk:
-       clk_disable_unprepare(data->clk);
-
-       return err;
 }
 
 static int dw8250_remove(struct platform_device *pdev)
@@ -632,12 +682,6 @@ static int dw8250_remove(struct platform_device *pdev)
 
        serial8250_unregister_port(data->data.line);
 
-       reset_control_assert(data->rst);
-
-       clk_disable_unprepare(data->pclk);
-
-       clk_disable_unprepare(data->clk);
-
        pm_runtime_disable(dev);
        pm_runtime_put_noidle(dev);
 
@@ -693,12 +737,37 @@ static const struct dev_pm_ops dw8250_pm_ops = {
        SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
 };
 
+static const struct dw8250_platform_data dw8250_dw_apb = {
+       .usr_reg = DW_UART_USR,
+};
+
+static const struct dw8250_platform_data dw8250_octeon_3860_data = {
+       .usr_reg = OCTEON_UART_USR,
+       .quirks = DW_UART_QUIRK_OCTEON,
+};
+
+static const struct dw8250_platform_data dw8250_armada_38x_data = {
+       .usr_reg = DW_UART_USR,
+       .quirks = DW_UART_QUIRK_ARMADA_38X,
+};
+
+static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
+       .usr_reg = DW_UART_USR,
+       .cpr_val = 0x00012f32,
+       .quirks = DW_UART_QUIRK_IS_DMA_FC,
+};
+
+static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
+       .usr_reg = DW_UART_USR,
+       .quirks = DW_UART_QUIRK_SKIP_SET_RATE,
+};
+
 static const struct of_device_id dw8250_of_match[] = {
-       { .compatible = "snps,dw-apb-uart" },
-       { .compatible = "cavium,octeon-3860-uart" },
-       { .compatible = "marvell,armada-38x-uart" },
-       { .compatible = "renesas,rzn1-uart" },
-       { .compatible = "starfive,jh7100-uart" },
+       { .compatible = "snps,dw-apb-uart", .data = &dw8250_dw_apb },
+       { .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data },
+       { .compatible = "marvell,armada-38x-uart", .data = &dw8250_armada_38x_data },
+       { .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data },
+       { .compatible = "starfive,jh7100-uart", .data = &dw8250_starfive_jh7100_data },
        { /* Sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, dw8250_of_match);
index 622d3b0..fbabfdd 100644 (file)
@@ -2,19 +2,32 @@
 /* Synopsys DesignWare 8250 library. */
 
 #include <linux/bitops.h>
+#include <linux/bitfield.h>
 #include <linux/device.h>
-#include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/property.h>
 #include <linux/serial_8250.h>
 #include <linux/serial_core.h>
 
 #include "8250_dwlib.h"
 
 /* Offsets for the DesignWare specific registers */
+#define DW_UART_TCR    0xac /* Transceiver Control Register (RS485) */
+#define DW_UART_DE_EN  0xb0 /* Driver Output Enable Register */
+#define DW_UART_RE_EN  0xb4 /* Receiver Output Enable Register */
 #define DW_UART_DLF    0xc0 /* Divisor Latch Fraction Register */
 #define DW_UART_CPR    0xf4 /* Component Parameter Register */
 #define DW_UART_UCV    0xf8 /* UART Component Version */
 
+/* Transceiver Control Register bits */
+#define DW_UART_TCR_RS485_EN           BIT(0)
+#define DW_UART_TCR_RE_POL             BIT(1)
+#define DW_UART_TCR_DE_POL             BIT(2)
+#define DW_UART_TCR_XFER_MODE          GENMASK(4, 3)
+#define DW_UART_TCR_XFER_MODE_DE_DURING_RE     FIELD_PREP(DW_UART_TCR_XFER_MODE, 0)
+#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE      FIELD_PREP(DW_UART_TCR_XFER_MODE, 1)
+#define DW_UART_TCR_XFER_MODE_DE_OR_RE         FIELD_PREP(DW_UART_TCR_XFER_MODE, 2)
+
 /* Component Parameter Register bits */
 #define DW_UART_CPR_ABP_DATA_WIDTH     (3 << 0)
 #define DW_UART_CPR_AFCE_MODE          (1 << 4)
 /* Helper for FIFO size calculation */
 #define DW_UART_CPR_FIFO_SIZE(a)       (((a >> 16) & 0xff) * 16)
 
-static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
-{
-       if (p->iotype == UPIO_MEM32BE)
-               return ioread32be(p->membase + offset);
-       return readl(p->membase + offset);
-}
-
-static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
-{
-       if (p->iotype == UPIO_MEM32BE)
-               iowrite32be(reg, p->membase + offset);
-       else
-               writel(reg, p->membase + offset);
-}
-
 /*
  * divisor = div(I) + div(F)
  * "I" means integer, "F" means fractional
@@ -87,11 +85,87 @@ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct
 }
 EXPORT_SYMBOL_GPL(dw8250_do_set_termios);
 
+static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
+{
+       u32 tcr;
+
+       tcr = dw8250_readl_ext(p, DW_UART_TCR);
+       tcr &= ~DW_UART_TCR_XFER_MODE;
+
+       if (rs485->flags & SER_RS485_ENABLED) {
+               /* Clear unsupported flags. */
+               rs485->flags &= SER_RS485_ENABLED | SER_RS485_RX_DURING_TX |
+                               SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND;
+               tcr |= DW_UART_TCR_RS485_EN;
+
+               if (rs485->flags & SER_RS485_RX_DURING_TX) {
+                       tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE;
+               } else {
+                       /* HW does not support same DE level for tx and rx */
+                       if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+                           !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+                               return -EINVAL;
+
+                       tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE;
+               }
+               dw8250_writel_ext(p, DW_UART_DE_EN, 1);
+               dw8250_writel_ext(p, DW_UART_RE_EN, 1);
+       } else {
+               rs485->flags = 0;
+
+               tcr &= ~DW_UART_TCR_RS485_EN;
+       }
+
+       /* Reset to default polarity */
+       tcr |= DW_UART_TCR_DE_POL;
+       tcr &= ~DW_UART_TCR_RE_POL;
+
+       if (!(rs485->flags & SER_RS485_RTS_ON_SEND))
+               tcr &= ~DW_UART_TCR_DE_POL;
+       if (device_property_read_bool(p->dev, "rs485-rx-active-high"))
+               tcr |= DW_UART_TCR_RE_POL;
+
+       dw8250_writel_ext(p, DW_UART_TCR, tcr);
+
+       rs485->delay_rts_before_send = 0;
+       rs485->delay_rts_after_send = 0;
+
+       p->rs485 = *rs485;
+
+       return 0;
+}
+
+/*
+ * Tests if RE_EN register can have non-zero value to see if RS-485 HW support
+ * is present.
+ */
+static bool dw8250_detect_rs485_hw(struct uart_port *p)
+{
+       u32 reg;
+
+       dw8250_writel_ext(p, DW_UART_RE_EN, 1);
+       reg = dw8250_readl_ext(p, DW_UART_RE_EN);
+       dw8250_writel_ext(p, DW_UART_RE_EN, 0);
+       return reg;
+}
+
 void dw8250_setup_port(struct uart_port *p)
 {
+       struct dw8250_port_data *pd = p->private_data;
+       struct dw8250_data *data = to_dw8250_data(pd);
        struct uart_8250_port *up = up_to_u8250p(p);
        u32 reg;
 
+       pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
+       if (pd->hw_rs485_support) {
+               p->rs485_config = dw8250_rs485_config;
+       } else {
+               p->rs485_config = serial8250_em485_config;
+               up->rs485_start_tx = serial8250_em485_start_tx;
+               up->rs485_stop_tx = serial8250_em485_stop_tx;
+       }
+       up->capabilities |= UART_CAP_NOTEMT;
+
        /*
         * If the Component Version Register returns zero, we know that
         * ADDITIONAL_FEATURES are not enabled. No need to go any further.
@@ -108,14 +182,16 @@ void dw8250_setup_port(struct uart_port *p)
        dw8250_writel_ext(p, DW_UART_DLF, 0);
 
        if (reg) {
-               struct dw8250_port_data *d = p->private_data;
-
-               d->dlf_size = fls(reg);
+               pd->dlf_size = fls(reg);
                p->get_divisor = dw8250_get_divisor;
                p->set_divisor = dw8250_set_divisor;
        }
 
        reg = dw8250_readl_ext(p, DW_UART_CPR);
+       if (!reg) {
+               reg = data->pdata->cpr_val;
+               dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
+       }
        if (!reg)
                return;
 
@@ -124,7 +200,7 @@ void dw8250_setup_port(struct uart_port *p)
                p->type = PORT_16550A;
                p->flags |= UPF_FIXED_TYPE;
                p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
-               up->capabilities = UART_CAP_FIFO;
+               up->capabilities = UART_CAP_FIFO | UART_CAP_NOTEMT;
        }
 
        if (reg & DW_UART_CPR_AFCE_MODE)
index 83d528e..055bfdc 100644 (file)
@@ -1,10 +1,16 @@
 /* SPDX-License-Identifier: GPL-2.0+ */
 /* Synopsys DesignWare 8250 library header file. */
 
+#include <linux/io.h>
+#include <linux/notifier.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 
 #include "8250.h"
 
+struct clk;
+struct reset_control;
+
 struct dw8250_port_data {
        /* Port properties */
        int                     line;
@@ -14,7 +20,52 @@ struct dw8250_port_data {
 
        /* Hardware configuration */
        u8                      dlf_size;
+
+       /* RS485 variables */
+       bool                    hw_rs485_support;
+};
+
+struct dw8250_platform_data {
+       u8 usr_reg;
+       u32 cpr_val;
+       unsigned int quirks;
+};
+
+struct dw8250_data {
+       struct dw8250_port_data data;
+       const struct dw8250_platform_data *pdata;
+
+       int                     msr_mask_on;
+       int                     msr_mask_off;
+       struct clk              *clk;
+       struct clk              *pclk;
+       struct notifier_block   clk_notifier;
+       struct work_struct      clk_work;
+       struct reset_control    *rst;
+
+       unsigned int            skip_autocfg:1;
+       unsigned int            uart_16550_compatible:1;
 };
 
 void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old);
 void dw8250_setup_port(struct uart_port *p);
+
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+{
+       return container_of(data, struct dw8250_data, data);
+}
+
+static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+{
+       if (p->iotype == UPIO_MEM32BE)
+               return ioread32be(p->membase + offset);
+       return readl(p->membase + offset);
+}
+
+static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
+{
+       if (p->iotype == UPIO_MEM32BE)
+               iowrite32be(reg, p->membase + offset);
+       else
+               writel(reg, p->membase + offset);
+}
index 251f001..dba5950 100644 (file)
@@ -200,12 +200,12 @@ static int fintek_8250_rs485_config(struct uart_port *port,
        if (!pdata)
                return -EINVAL;
 
-       /* Hardware do not support same RTS level on send and receive */
-       if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
-                       !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
-               return -EINVAL;
 
        if (rs485->flags & SER_RS485_ENABLED) {
+               /* Hardware do not support same RTS level on send and receive */
+               if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+                   !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+                       return -EINVAL;
                memset(rs485->padding, 0, sizeof(rs485->padding));
                config |= RS485_URA;
        } else {
index 21053db..54051ec 100644 (file)
@@ -54,9 +54,6 @@
 #define MTK_UART_TX_TRIGGER    1
 #define MTK_UART_RX_TRIGGER    MTK_UART_RX_SIZE
 
-#define MTK_UART_FEATURE_SEL   39      /* Feature Selection register */
-#define MTK_UART_FEAT_NEWRMAP  BIT(0)  /* Use new register map */
-
 #define MTK_UART_XON1          40      /* I/O: Xon character 1 */
 #define MTK_UART_XOFF1         42      /* I/O: Xoff character 1 */
 
@@ -575,10 +572,6 @@ static int mtk8250_probe(struct platform_device *pdev)
                uart.dma = data->dma;
 #endif
 
-       /* Set AP UART new register map */
-       writel(MTK_UART_FEAT_NEWRMAP, uart.port.membase +
-              (MTK_UART_FEATURE_SEL << uart.port.regshift));
-
        /* Disable Rate Fix function */
        writel(0x0, uart.port.membase +
                        (MTK_UART_RATE_FIX << uart.port.regshift));
index be86262..5a699a1 100644 (file)
@@ -326,6 +326,8 @@ static const struct of_device_id of_platform_serial_table[] = {
                .data = (void *)PORT_ALTR_16550_F64, },
        { .compatible = "altr,16550-FIFO128",
                .data = (void *)PORT_ALTR_16550_F128, },
+       { .compatible = "fsl,16550-FIFO64",
+               .data = (void *)PORT_16550A_FSL64, },
        { .compatible = "mediatek,mtk-btif",
                .data = (void *)PORT_MTK_BTIF, },
        { .compatible = "mrvl,mmp-uart",
index a293e9f..a17619d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/pci.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/math.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/tty.h>
@@ -994,41 +995,29 @@ static void pci_ite887x_exit(struct pci_dev *dev)
 }
 
 /*
- * EndRun Technologies.
- * Determine the number of ports available on the device.
+ * Oxford Semiconductor Inc.
+ * Check if an OxSemi device is part of the Tornado range of devices.
  */
 #define PCI_VENDOR_ID_ENDRUN                   0x7401
 #define PCI_DEVICE_ID_ENDRUN_1588      0xe100
 
-static int pci_endrun_init(struct pci_dev *dev)
+static bool pci_oxsemi_tornado_p(struct pci_dev *dev)
 {
-       u8 __iomem *p;
-       unsigned long deviceID;
-       unsigned int  number_uarts = 0;
+       /* OxSemi Tornado devices are all 0xCxxx */
+       if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
+           (dev->device & 0xf000) != 0xc000)
+               return false;
 
-       /* EndRun device is all 0xexxx */
+       /* EndRun devices are all 0xExxx */
        if (dev->vendor == PCI_VENDOR_ID_ENDRUN &&
-               (dev->device & 0xf000) != 0xe000)
-               return 0;
-
-       p = pci_iomap(dev, 0, 5);
-       if (p == NULL)
-               return -ENOMEM;
+           (dev->device & 0xf000) != 0xe000)
+               return false;
 
-       deviceID = ioread32(p);
-       /* EndRun device */
-       if (deviceID == 0x07000200) {
-               number_uarts = ioread8(p + 4);
-               pci_dbg(dev, "%d ports detected on EndRun PCI Express device\n", number_uarts);
-       }
-       pci_iounmap(dev, p);
-       return number_uarts;
+       return true;
 }
 
 /*
- * Oxford Semiconductor Inc.
- * Check that device is part of the Tornado range of devices, then determine
- * the number of ports available on the device.
+ * Determine the number of ports available on a Tornado device.
  */
 static int pci_oxsemi_tornado_init(struct pci_dev *dev)
 {
@@ -1036,9 +1025,7 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
        unsigned long deviceID;
        unsigned int  number_uarts = 0;
 
-       /* OxSemi Tornado devices are all 0xCxxx */
-       if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
-           (dev->device & 0xF000) != 0xC000)
+       if (!pci_oxsemi_tornado_p(dev))
                return 0;
 
        p = pci_iomap(dev, 0, 5);
@@ -1049,12 +1036,217 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
        /* Tornado device */
        if (deviceID == 0x07000200) {
                number_uarts = ioread8(p + 4);
-               pci_dbg(dev, "%d ports detected on Oxford PCI Express device\n", number_uarts);
+               pci_dbg(dev, "%d ports detected on %s PCI Express device\n",
+                       number_uarts,
+                       dev->vendor == PCI_VENDOR_ID_ENDRUN ?
+                       "EndRun" : "Oxford");
        }
        pci_iounmap(dev, p);
        return number_uarts;
 }
 
+/* Tornado-specific constants for the TCR and CPR registers; see below.  */
+#define OXSEMI_TORNADO_TCR_MASK        0xf
+#define OXSEMI_TORNADO_CPR_MASK        0x1ff
+#define OXSEMI_TORNADO_CPR_MIN 0x008
+#define OXSEMI_TORNADO_CPR_DEF 0x10f
+
+/*
+ * Determine the oversampling rate, the clock prescaler, and the clock
+ * divisor for the requested baud rate.  The clock rate is 62.5 MHz,
+ * which is four times the baud base, and the prescaler increments in
+ * steps of 1/8.  Therefore to make calculations on integers we need
+ * to use a scaled clock rate, which is the baud base multiplied by 32
+ * (or our assumed UART clock rate multiplied by 2).
+ *
+ * The allowed oversampling rates are from 4 up to 16 inclusive (values
+ * from 0 to 3 inclusive map to 16).  Likewise the clock prescaler allows
+ * values between 1.000 and 63.875 inclusive (operation for values from
+ * 0.000 to 0.875 has not been specified).  The clock divisor is the usual
+ * unsigned 16-bit integer.
+ *
+ * For the most accurate baud rate we use a table of predetermined
+ * oversampling rates and clock prescalers that records all possible
+ * products of the two parameters in the range from 4 up to 255 inclusive,
+ * and additionally 335 for the 1500000bps rate, with the prescaler scaled
+ * by 8.  The table is sorted by the decreasing value of the oversampling
+ * rate and ties are resolved by sorting by the decreasing value of the
+ * product.  This way preference is given to higher oversampling rates.
+ *
+ * We iterate over the table and choose the product of an oversampling
+ * rate and a clock prescaler that gives the lowest integer division
+ * result deviation, or if an exact integer divider is found we stop
+ * looking for it right away.  We do some fixup if the resulting clock
+ * divisor required would be out of its unsigned 16-bit integer range.
+ *
+ * Finally we abuse the supposed fractional part returned to encode the
+ * 4-bit value of the oversampling rate and the 9-bit value of the clock
+ * prescaler which will end up in the TCR and CPR/CPR2 registers.
+ */
+static unsigned int pci_oxsemi_tornado_get_divisor(struct uart_port *port,
+                                                  unsigned int baud,
+                                                  unsigned int *frac)
+{
+       static u8 p[][2] = {
+               { 16, 14, }, { 16, 13, }, { 16, 12, }, { 16, 11, },
+               { 16, 10, }, { 16,  9, }, { 16,  8, }, { 15, 17, },
+               { 15, 16, }, { 15, 15, }, { 15, 14, }, { 15, 13, },
+               { 15, 12, }, { 15, 11, }, { 15, 10, }, { 15,  9, },
+               { 15,  8, }, { 14, 18, }, { 14, 17, }, { 14, 14, },
+               { 14, 13, }, { 14, 12, }, { 14, 11, }, { 14, 10, },
+               { 14,  9, }, { 14,  8, }, { 13, 19, }, { 13, 18, },
+               { 13, 17, }, { 13, 13, }, { 13, 12, }, { 13, 11, },
+               { 13, 10, }, { 13,  9, }, { 13,  8, }, { 12, 19, },
+               { 12, 18, }, { 12, 17, }, { 12, 11, }, { 12,  9, },
+               { 12,  8, }, { 11, 23, }, { 11, 22, }, { 11, 21, },
+               { 11, 20, }, { 11, 19, }, { 11, 18, }, { 11, 17, },
+               { 11, 11, }, { 11, 10, }, { 11,  9, }, { 11,  8, },
+               { 10, 25, }, { 10, 23, }, { 10, 20, }, { 10, 19, },
+               { 10, 17, }, { 10, 10, }, { 10,  9, }, { 10,  8, },
+               {  9, 27, }, {  9, 23, }, {  9, 21, }, {  9, 19, },
+               {  9, 18, }, {  9, 17, }, {  9,  9, }, {  9,  8, },
+               {  8, 31, }, {  8, 29, }, {  8, 23, }, {  8, 19, },
+               {  8, 17, }, {  8,  8, }, {  7, 35, }, {  7, 31, },
+               {  7, 29, }, {  7, 25, }, {  7, 23, }, {  7, 21, },
+               {  7, 19, }, {  7, 17, }, {  7, 15, }, {  7, 14, },
+               {  7, 13, }, {  7, 12, }, {  7, 11, }, {  7, 10, },
+               {  7,  9, }, {  7,  8, }, {  6, 41, }, {  6, 37, },
+               {  6, 31, }, {  6, 29, }, {  6, 23, }, {  6, 19, },
+               {  6, 17, }, {  6, 13, }, {  6, 11, }, {  6, 10, },
+               {  6,  9, }, {  6,  8, }, {  5, 67, }, {  5, 47, },
+               {  5, 43, }, {  5, 41, }, {  5, 37, }, {  5, 31, },
+               {  5, 29, }, {  5, 25, }, {  5, 23, }, {  5, 19, },
+               {  5, 17, }, {  5, 15, }, {  5, 13, }, {  5, 11, },
+               {  5, 10, }, {  5,  9, }, {  5,  8, }, {  4, 61, },
+               {  4, 59, }, {  4, 53, }, {  4, 47, }, {  4, 43, },
+               {  4, 41, }, {  4, 37, }, {  4, 31, }, {  4, 29, },
+               {  4, 23, }, {  4, 19, }, {  4, 17, }, {  4, 13, },
+               {  4,  9, }, {  4,  8, },
+       };
+       /* Scale the quotient for comparison to get the fractional part.  */
+       const unsigned int quot_scale = 65536;
+       unsigned int sclk = port->uartclk * 2;
+       unsigned int sdiv = DIV_ROUND_CLOSEST(sclk, baud);
+       unsigned int best_squot;
+       unsigned int squot;
+       unsigned int quot;
+       u16 cpr;
+       u8 tcr;
+       int i;
+
+       /* Old custom speed handling.  */
+       if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
+               unsigned int cust_div = port->custom_divisor;
+
+               quot = cust_div & UART_DIV_MAX;
+               tcr = (cust_div >> 16) & OXSEMI_TORNADO_TCR_MASK;
+               cpr = (cust_div >> 20) & OXSEMI_TORNADO_CPR_MASK;
+               if (cpr < OXSEMI_TORNADO_CPR_MIN)
+                       cpr = OXSEMI_TORNADO_CPR_DEF;
+       } else {
+               best_squot = quot_scale;
+               for (i = 0; i < ARRAY_SIZE(p); i++) {
+                       unsigned int spre;
+                       unsigned int srem;
+                       u8 cp;
+                       u8 tc;
+
+                       tc = p[i][0];
+                       cp = p[i][1];
+                       spre = tc * cp;
+
+                       srem = sdiv % spre;
+                       if (srem > spre / 2)
+                               srem = spre - srem;
+                       squot = DIV_ROUND_CLOSEST(srem * quot_scale, spre);
+
+                       if (srem == 0) {
+                               tcr = tc;
+                               cpr = cp;
+                               quot = sdiv / spre;
+                               break;
+                       } else if (squot < best_squot) {
+                               best_squot = squot;
+                               tcr = tc;
+                               cpr = cp;
+                               quot = DIV_ROUND_CLOSEST(sdiv, spre);
+                       }
+               }
+               while (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1 &&
+                      quot % 2 == 0) {
+                       quot >>= 1;
+                       tcr <<= 1;
+               }
+               while (quot > UART_DIV_MAX) {
+                       if (tcr <= (OXSEMI_TORNADO_TCR_MASK + 1) >> 1) {
+                               quot >>= 1;
+                               tcr <<= 1;
+                       } else if (cpr <= OXSEMI_TORNADO_CPR_MASK >> 1) {
+                               quot >>= 1;
+                               cpr <<= 1;
+                       } else {
+                               quot = quot * cpr / OXSEMI_TORNADO_CPR_MASK;
+                               cpr = OXSEMI_TORNADO_CPR_MASK;
+                       }
+               }
+       }
+
+       *frac = (cpr << 8) | (tcr & OXSEMI_TORNADO_TCR_MASK);
+       return quot;
+}
+
+/*
+ * Set the oversampling rate in the transmitter clock cycle register (TCR),
+ * the clock prescaler in the clock prescaler register (CPR and CPR2), and
+ * the clock divisor in the divisor latch (DLL and DLM).  Note that for
+ * backwards compatibility any write to CPR clears CPR2 and therefore CPR
+ * has to be written first, followed by CPR2, which occupies the location
+ * of CKS used with earlier UART designs.
+ */
+static void pci_oxsemi_tornado_set_divisor(struct uart_port *port,
+                                          unsigned int baud,
+                                          unsigned int quot,
+                                          unsigned int quot_frac)
+{
+       struct uart_8250_port *up = up_to_u8250p(port);
+       u8 cpr2 = quot_frac >> 16;
+       u8 cpr = quot_frac >> 8;
+       u8 tcr = quot_frac;
+
+       serial_icr_write(up, UART_TCR, tcr);
+       serial_icr_write(up, UART_CPR, cpr);
+       serial_icr_write(up, UART_CKS, cpr2);
+       serial8250_do_set_divisor(port, baud, quot, 0);
+}
+
+/*
+ * For Tornado devices we force MCR[7] set for the Divide-by-M N/8 baud rate
+ * generator prescaler (CPR and CPR2).  Otherwise no prescaler would be used.
+ */
+static void pci_oxsemi_tornado_set_mctrl(struct uart_port *port,
+                                        unsigned int mctrl)
+{
+       struct uart_8250_port *up = up_to_u8250p(port);
+
+       up->mcr |= UART_MCR_CLKSEL;
+       serial8250_do_set_mctrl(port, mctrl);
+}
+
+static int pci_oxsemi_tornado_setup(struct serial_private *priv,
+                                   const struct pciserial_board *board,
+                                   struct uart_8250_port *up, int idx)
+{
+       struct pci_dev *dev = priv->dev;
+
+       if (pci_oxsemi_tornado_p(dev)) {
+               up->port.get_divisor = pci_oxsemi_tornado_get_divisor;
+               up->port.set_divisor = pci_oxsemi_tornado_set_divisor;
+               up->port.set_mctrl = pci_oxsemi_tornado_set_mctrl;
+       }
+
+       return pci_default_setup(priv, board, up, idx);
+}
+
 static int pci_asix_setup(struct serial_private *priv,
                  const struct pciserial_board *board,
                  struct uart_8250_port *port, int idx)
@@ -2244,7 +2436,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .device         = PCI_ANY_ID,
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
-               .init           = pci_endrun_init,
+               .init           = pci_oxsemi_tornado_init,
                .setup          = pci_default_setup,
        },
        /*
@@ -2256,7 +2448,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
                .init           = pci_oxsemi_tornado_init,
-               .setup          = pci_default_setup,
+               .setup          = pci_oxsemi_tornado_setup,
        },
        {
                .vendor         = PCI_VENDOR_ID_MAINPINE,
@@ -2264,7 +2456,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
                .init           = pci_oxsemi_tornado_init,
-               .setup          = pci_default_setup,
+               .setup          = pci_oxsemi_tornado_setup,
        },
        {
                .vendor         = PCI_VENDOR_ID_DIGI,
@@ -2272,7 +2464,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .subvendor              = PCI_SUBVENDOR_ID_IBM,
                .subdevice              = PCI_ANY_ID,
                .init                   = pci_oxsemi_tornado_init,
-               .setup          = pci_default_setup,
+               .setup          = pci_oxsemi_tornado_setup,
        },
        {
                .vendor         = PCI_VENDOR_ID_INTEL,
@@ -2589,7 +2781,7 @@ enum pci_board_num_t {
        pbn_b0_2_1843200,
        pbn_b0_4_1843200,
 
-       pbn_b0_1_3906250,
+       pbn_b0_1_15625000,
 
        pbn_b0_bt_1_115200,
        pbn_b0_bt_2_115200,
@@ -2667,12 +2859,11 @@ enum pci_board_num_t {
        pbn_panacom2,
        pbn_panacom4,
        pbn_plx_romulus,
-       pbn_endrun_2_3906250,
        pbn_oxsemi,
-       pbn_oxsemi_1_3906250,
-       pbn_oxsemi_2_3906250,
-       pbn_oxsemi_4_3906250,
-       pbn_oxsemi_8_3906250,
+       pbn_oxsemi_1_15625000,
+       pbn_oxsemi_2_15625000,
+       pbn_oxsemi_4_15625000,
+       pbn_oxsemi_8_15625000,
        pbn_intel_i960,
        pbn_sgi_ioc3,
        pbn_computone_4,
@@ -2815,10 +3006,10 @@ static struct pciserial_board pci_boards[] = {
                .uart_offset    = 8,
        },
 
-       [pbn_b0_1_3906250] = {
+       [pbn_b0_1_15625000] = {
                .flags          = FL_BASE0,
                .num_ports      = 1,
-               .base_baud      = 3906250,
+               .base_baud      = 15625000,
                .uart_offset    = 8,
        },
 
@@ -3190,20 +3381,6 @@ static struct pciserial_board pci_boards[] = {
        },
 
        /*
-        * EndRun Technologies
-       * Uses the size of PCI Base region 0 to
-       * signal now many ports are available
-       * 2 port 952 Uart support
-       */
-       [pbn_endrun_2_3906250] = {
-               .flags          = FL_BASE0,
-               .num_ports      = 2,
-               .base_baud      = 3906250,
-               .uart_offset    = 0x200,
-               .first_offset   = 0x1000,
-       },
-
-       /*
         * This board uses the size of PCI Base region 0 to
         * signal now many ports are available
         */
@@ -3213,31 +3390,31 @@ static struct pciserial_board pci_boards[] = {
                .base_baud      = 115200,
                .uart_offset    = 8,
        },
-       [pbn_oxsemi_1_3906250] = {
+       [pbn_oxsemi_1_15625000] = {
                .flags          = FL_BASE0,
                .num_ports      = 1,
-               .base_baud      = 3906250,
+               .base_baud      = 15625000,
                .uart_offset    = 0x200,
                .first_offset   = 0x1000,
        },
-       [pbn_oxsemi_2_3906250] = {
+       [pbn_oxsemi_2_15625000] = {
                .flags          = FL_BASE0,
                .num_ports      = 2,
-               .base_baud      = 3906250,
+               .base_baud      = 15625000,
                .uart_offset    = 0x200,
                .first_offset   = 0x1000,
        },
-       [pbn_oxsemi_4_3906250] = {
+       [pbn_oxsemi_4_15625000] = {
                .flags          = FL_BASE0,
                .num_ports      = 4,
-               .base_baud      = 3906250,
+               .base_baud      = 15625000,
                .uart_offset    = 0x200,
                .first_offset   = 0x1000,
        },
-       [pbn_oxsemi_8_3906250] = {
+       [pbn_oxsemi_8_15625000] = {
                .flags          = FL_BASE0,
                .num_ports      = 8,
-               .base_baud      = 3906250,
+               .base_baud      = 15625000,
                .uart_offset    = 0x200,
                .first_offset   = 0x1000,
        },
@@ -3518,6 +3695,12 @@ static struct pciserial_board pci_boards[] = {
        },
 };
 
+#define REPORT_CONFIG(option) \
+       (IS_ENABLED(CONFIG_##option) ? 0 : (kernel_ulong_t)&#option)
+#define REPORT_8250_CONFIG(option) \
+       (IS_ENABLED(CONFIG_SERIAL_8250_##option) ? \
+        0 : (kernel_ulong_t)&"SERIAL_8250_"#option)
+
 static const struct pci_device_id blacklist[] = {
        /* softmodems */
        { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
@@ -3525,40 +3708,43 @@ static const struct pci_device_id blacklist[] = {
        { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
 
        /* multi-io cards handled by parport_serial */
-       { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
-       { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
-       { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+       /* WCH CH353 2S1P */
+       { PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+       /* WCH CH353 1S1P */
+       { PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
+       /* WCH CH382 2S1P */
+       { PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
 
        /* Intel platforms with MID UART */
-       { PCI_VDEVICE(INTEL, 0x081b), },
-       { PCI_VDEVICE(INTEL, 0x081c), },
-       { PCI_VDEVICE(INTEL, 0x081d), },
-       { PCI_VDEVICE(INTEL, 0x1191), },
-       { PCI_VDEVICE(INTEL, 0x18d8), },
-       { PCI_VDEVICE(INTEL, 0x19d8), },
+       { PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), },
+       { PCI_VDEVICE(INTEL, 0x081c), REPORT_8250_CONFIG(MID), },
+       { PCI_VDEVICE(INTEL, 0x081d), REPORT_8250_CONFIG(MID), },
+       { PCI_VDEVICE(INTEL, 0x1191), REPORT_8250_CONFIG(MID), },
+       { PCI_VDEVICE(INTEL, 0x18d8), REPORT_8250_CONFIG(MID), },
+       { PCI_VDEVICE(INTEL, 0x19d8), REPORT_8250_CONFIG(MID), },
 
        /* Intel platforms with DesignWare UART */
-       { PCI_VDEVICE(INTEL, 0x0936), },
-       { PCI_VDEVICE(INTEL, 0x0f0a), },
-       { PCI_VDEVICE(INTEL, 0x0f0c), },
-       { PCI_VDEVICE(INTEL, 0x228a), },
-       { PCI_VDEVICE(INTEL, 0x228c), },
-       { PCI_VDEVICE(INTEL, 0x4b96), },
-       { PCI_VDEVICE(INTEL, 0x4b97), },
-       { PCI_VDEVICE(INTEL, 0x4b98), },
-       { PCI_VDEVICE(INTEL, 0x4b99), },
-       { PCI_VDEVICE(INTEL, 0x4b9a), },
-       { PCI_VDEVICE(INTEL, 0x4b9b), },
-       { PCI_VDEVICE(INTEL, 0x9ce3), },
-       { PCI_VDEVICE(INTEL, 0x9ce4), },
+       { PCI_VDEVICE(INTEL, 0x0936), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x0f0a), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x0f0c), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x228a), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x228c), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x4b96), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x4b97), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x4b98), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x4b99), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x4b9a), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x4b9b), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x9ce3), REPORT_8250_CONFIG(LPSS), },
+       { PCI_VDEVICE(INTEL, 0x9ce4), REPORT_8250_CONFIG(LPSS), },
 
        /* Exar devices */
-       { PCI_VDEVICE(EXAR, PCI_ANY_ID), },
-       { PCI_VDEVICE(COMMTECH, PCI_ANY_ID), },
+       { PCI_VDEVICE(EXAR, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), },
+       { PCI_VDEVICE(COMMTECH, PCI_ANY_ID), REPORT_8250_CONFIG(EXAR), },
 
        /* Pericom devices */
-       { PCI_VDEVICE(PERICOM, PCI_ANY_ID), },
-       { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), },
+       { PCI_VDEVICE(PERICOM, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), },
+       { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), REPORT_8250_CONFIG(PERICOM), },
 
        /* End of the black list */
        { }
@@ -3840,8 +4026,12 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
        board = &pci_boards[ent->driver_data];
 
        exclude = pci_match_id(blacklist, dev);
-       if (exclude)
+       if (exclude) {
+               if (exclude->driver_data)
+                       pci_warn(dev, "ignoring port, enable %s to handle\n",
+                                (const char *)exclude->driver_data);
                return -ENODEV;
+       }
 
        rc = pcim_enable_device(dev);
        pci_save_state(dev);
@@ -4110,13 +4300,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
                0x10b5, 0x106a, 0, 0,
                pbn_plx_romulus },
        /*
-       * EndRun Technologies. PCI express device range.
-       *    EndRun PTP/1588 has 2 Native UARTs.
-       */
-       {       PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_endrun_2_3906250 },
-       /*
         * Quatech cards. These actually have configurable clocks but for
         * now we just use the default.
         *
@@ -4225,158 +4408,165 @@ static const struct pci_device_id serial_pci_tbl[] = {
         */
        {       PCI_VENDOR_ID_OXSEMI, 0xc101,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc105,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc11b,    /* OXPCIe952 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc11f,    /* OXPCIe952 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc120,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc124,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc138,    /* OXPCIe952 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc13d,    /* OXPCIe952 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc140,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc141,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc144,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc145,    /* OXPCIe952 1 Legacy UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_b0_1_3906250 },
+               pbn_b0_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc158,    /* OXPCIe952 2 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_2_3906250 },
+               pbn_oxsemi_2_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc15d,    /* OXPCIe952 2 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_2_3906250 },
+               pbn_oxsemi_2_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc208,    /* OXPCIe954 4 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_4_3906250 },
+               pbn_oxsemi_4_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc20d,    /* OXPCIe954 4 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_4_3906250 },
+               pbn_oxsemi_4_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc308,    /* OXPCIe958 8 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_8_3906250 },
+               pbn_oxsemi_8_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc30d,    /* OXPCIe958 8 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_8_3906250 },
+               pbn_oxsemi_8_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc40b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc40f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc41b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc41f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc42b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc42f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc43b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc43f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc44b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc44f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc45b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc45f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc46b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc46f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc47b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc47f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc48b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc48f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc49b,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc49f,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc4ab,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc4af,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc4bb,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc4bf,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc4cb,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_OXSEMI, 0xc4cf,    /* OXPCIe200 1 Native UART */
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        /*
         * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
         */
        {       PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
                PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
-               pbn_oxsemi_1_3906250 },
+               pbn_oxsemi_1_15625000 },
        {       PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
                PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
-               pbn_oxsemi_2_3906250 },
+               pbn_oxsemi_2_15625000 },
        {       PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
                PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
-               pbn_oxsemi_4_3906250 },
+               pbn_oxsemi_4_15625000 },
        {       PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
                PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
-               pbn_oxsemi_8_3906250 },
+               pbn_oxsemi_8_15625000 },
 
        /*
         * Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado
         */
        {       PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM,
                PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0,
-               pbn_oxsemi_2_3906250 },
+               pbn_oxsemi_2_15625000 },
+       /*
+        * EndRun Technologies. PCI express device range.
+        * EndRun PTP/1588 has 2 Native UARTs utilizing OxSemi 952.
+        */
+       {       PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_oxsemi_2_15625000 },
 
        /*
         * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
index 1fbd5bf..78b6ded 100644 (file)
@@ -263,7 +263,7 @@ static const struct serial8250_config uart_config[] = {
                .tx_loadsz      = 63,
                .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
                                  UART_FCR7_64BYTE,
-               .flags          = UART_CAP_FIFO,
+               .flags          = UART_CAP_FIFO | UART_CAP_NOTEMT,
        },
        [PORT_RT2880] = {
                .name           = "Palmchip BK-3103",
@@ -538,27 +538,6 @@ serial_port_out_sync(struct uart_port *p, int offset, int value)
 }
 
 /*
- * For the 16C950
- */
-static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
-{
-       serial_out(up, UART_SCR, offset);
-       serial_out(up, UART_ICR, value);
-}
-
-static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
-{
-       unsigned int value;
-
-       serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
-       serial_out(up, UART_SCR, offset);
-       value = serial_in(up, UART_ICR);
-       serial_icr_write(up, UART_ACR, up->acr);
-
-       return value;
-}
-
-/*
  * FIFO support.
  */
 static void serial8250_clear_fifos(struct uart_8250_port *p)
@@ -1504,18 +1483,19 @@ static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
        hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL);
 }
 
-static void __stop_tx_rs485(struct uart_8250_port *p)
+static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
 {
        struct uart_8250_em485 *em485 = p->em485;
 
+       stop_delay += (u64)p->port.rs485.delay_rts_after_send * NSEC_PER_MSEC;
+
        /*
         * rs485_stop_tx() is going to set RTS according to config
         * AND flush RX FIFO if required.
         */
-       if (p->port.rs485.delay_rts_after_send > 0) {
+       if (stop_delay > 0) {
                em485->active_timer = &em485->stop_tx_timer;
-               start_hrtimer_ms(&em485->stop_tx_timer,
-                                  p->port.rs485.delay_rts_after_send);
+               hrtimer_start(&em485->stop_tx_timer, ns_to_ktime(stop_delay), HRTIMER_MODE_REL);
        } else {
                p->rs485_stop_tx(p);
                em485->active_timer = NULL;
@@ -1535,16 +1515,32 @@ static inline void __stop_tx(struct uart_8250_port *p)
 
        if (em485) {
                unsigned char lsr = serial_in(p, UART_LSR);
+               u64 stop_delay = 0;
+
+               if (!(lsr & UART_LSR_THRE))
+                       return;
                /*
                 * To provide required timeing and allow FIFO transfer,
                 * __stop_tx_rs485() must be called only when both FIFO and
-                * shift register are empty. It is for device driver to enable
-                * interrupt on TEMT.
+                * shift register are empty. The device driver should either
+                * enable interrupt on TEMT or set UART_CAP_NOTEMT that will
+                * enlarge stop_tx_timer by the tx time of one frame to cover
+                * for emptying of the shift register.
                 */
-               if ((lsr & BOTH_EMPTY) != BOTH_EMPTY)
-                       return;
+               if (!(lsr & UART_LSR_TEMT)) {
+                       if (!(p->capabilities & UART_CAP_NOTEMT))
+                               return;
+                       /*
+                        * RTS might get deasserted too early with the normal
+                        * frame timing formula. It seems to suggest THRE might
+                        * get asserted already during tx of the stop bit
+                        * rather than after it is fully sent.
+                        * Roughly estimate 1 extra bit here with / 7.
+                        */
+                       stop_delay = p->port.frame_time + DIV_ROUND_UP(p->port.frame_time, 7);
+               }
 
-               __stop_tx_rs485(p);
+               __stop_tx_rs485(p, stop_delay);
        }
        __do_stop_tx(p);
 }
@@ -1948,9 +1944,12 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
                        status = serial8250_rx_chars(up, status);
        }
        serial8250_modem_status(up);
-       if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
-               (up->ier & UART_IER_THRI))
-               serial8250_tx_chars(up);
+       if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) {
+               if (!up->dma || up->dma->tx_err)
+                       serial8250_tx_chars(up);
+               else
+                       __stop_tx(up);
+       }
 
        uart_unlock_and_check_sysrq_irqrestore(port, flags);
 
@@ -2077,10 +2076,7 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
        serial8250_rpm_put(up);
 }
 
-/*
- *     Wait for transmitter & holding register to empty
- */
-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+static void wait_for_lsr(struct uart_8250_port *up, int bits)
 {
        unsigned int status, tmout = 10000;
 
@@ -2097,6 +2093,16 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
                udelay(1);
                touch_nmi_watchdog();
        }
+}
+
+/*
+ *     Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+{
+       unsigned int tmout;
+
+       wait_for_lsr(up, bits);
 
        /* Wait up to 1s for flow control if necessary */
        if (up->port.flags & UPF_CONS_FLOW) {
@@ -2614,10 +2620,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
        }
        if (!(c_cflag & PARODD))
                cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
        if (c_cflag & CMSPAR)
                cval |= UART_LCR_SPAR;
-#endif
 
        return cval;
 }
@@ -3333,6 +3337,35 @@ static void serial8250_console_restore(struct uart_8250_port *up)
 }
 
 /*
+ * Print a string to the serial port using the device FIFO
+ *
+ * It sends fifosize bytes and then waits for the fifo
+ * to get empty.
+ */
+static void serial8250_console_fifo_write(struct uart_8250_port *up,
+                                         const char *s, unsigned int count)
+{
+       int i;
+       const char *end = s + count;
+       unsigned int fifosize = up->tx_loadsz;
+       bool cr_sent = false;
+
+       while (s != end) {
+               wait_for_lsr(up, UART_LSR_THRE);
+
+               for (i = 0; i < fifosize && s != end; ++i) {
+                       if (*s == '\n' && !cr_sent) {
+                               serial_out(up, UART_TX, '\r');
+                               cr_sent = true;
+                       } else {
+                               serial_out(up, UART_TX, *s++);
+                               cr_sent = false;
+                       }
+               }
+       }
+}
+
+/*
  *     Print a string to the serial port trying not to disturb
  *     any possible real use of the port...
  *
@@ -3347,7 +3380,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
        struct uart_8250_em485 *em485 = up->em485;
        struct uart_port *port = &up->port;
        unsigned long flags;
-       unsigned int ier;
+       unsigned int ier, use_fifo;
        int locked = 1;
 
        touch_nmi_watchdog();
@@ -3379,7 +3412,30 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
                mdelay(port->rs485.delay_rts_before_send);
        }
 
-       uart_console_write(port, s, count, serial8250_console_putchar);
+       use_fifo = (up->capabilities & UART_CAP_FIFO) &&
+               /*
+                * BCM283x requires to check the fifo
+                * after each byte.
+                */
+               !(up->capabilities & UART_CAP_MINI) &&
+               /*
+                * tx_loadsz contains the transmit fifo size
+                */
+               up->tx_loadsz > 1 &&
+               (up->fcr & UART_FCR_ENABLE_FIFO) &&
+               port->state &&
+               test_bit(TTY_PORT_INITIALIZED, &port->state->port.iflags) &&
+               /*
+                * After we put a data in the fifo, the controller will send
+                * it regardless of the CTS state. Therefore, only use fifo
+                * if we don't use control flow.
+                */
+               !(up->port.flags & UPF_CONS_FLOW);
+
+       if (likely(use_fifo))
+               serial8250_console_fifo_write(up, s, count);
+       else
+               uart_console_write(port, s, count, serial8250_console_putchar);
 
        /*
         *      Finally, wait for transmitter to become empty
index 33ca98b..795e551 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
-#include <linux/pm_runtime.h>
 
 #include "8250.h"
 
index cd93ea6..fdb6c41 100644 (file)
@@ -380,7 +380,7 @@ config SERIAL_8250_DW
 config SERIAL_8250_EM
        tristate "Support for Emma Mobile integrated serial port"
        depends on SERIAL_8250 && HAVE_CLK
-       depends on (ARM && ARCH_RENESAS) || COMPILE_TEST
+       depends on ARCH_RENESAS || COMPILE_TEST
        help
          Selecting this option will add support for the integrated serial
          port hardware found on the Emma Mobile line of processors.
index dbac90e..a452748 100644 (file)
@@ -782,7 +782,7 @@ config SERIAL_PMACZILOG_CONSOLE
 
 config SERIAL_CPM
        tristate "CPM SCC/SMC serial port support"
-       depends on CPM2 || CPM1
+       depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST)
        select SERIAL_CORE
        help
          This driver supports the SCC and SMC serial ports on Motorola 
@@ -806,7 +806,7 @@ config SERIAL_CPM_CONSOLE
 
 config SERIAL_PIC32
        tristate "Microchip PIC32 serial support"
-       depends on MACH_PIC32
+       depends on MACH_PIC32 || (MIPS && COMPILE_TEST)
        select SERIAL_CORE
        help
          If you have a PIC32, this driver supports the serial ports.
@@ -817,7 +817,7 @@ config SERIAL_PIC32
 
 config SERIAL_PIC32_CONSOLE
        bool "PIC32 serial console support"
-       depends on SERIAL_PIC32
+       depends on SERIAL_PIC32=y
        select SERIAL_CORE_CONSOLE
        help
          If you have a PIC32, this driver supports the putting a console on one
@@ -1246,7 +1246,7 @@ config SERIAL_XILINX_PS_UART_CONSOLE
 
 config SERIAL_AR933X
        tristate "AR933X serial port support"
-       depends on HAVE_CLK && ATH79
+       depends on (HAVE_CLK && ATH79) || (MIPS && COMPILE_TEST)
        select SERIAL_CORE
        select SERIAL_MCTRL_GPIO if GPIOLIB
        help
@@ -1442,6 +1442,7 @@ config SERIAL_STM32_CONSOLE
        bool "Support for console on STM32"
        depends on SERIAL_STM32=y
        select SERIAL_CORE_CONSOLE
+       select SERIAL_EARLYCON
 
 config SERIAL_MVEBU_UART
        bool "Marvell EBU serial port support"
index 1c16345..cb791c5 100644 (file)
@@ -168,10 +168,8 @@ static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
                }
        }
 
-       if (pending == 0) {
-               pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
-               writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
-       }
+       if (pending == 0)
+               altera_jtaguart_stop_tx(port);
 }
 
 static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
index 4d11a3e..97ef41c 100644 (file)
@@ -42,8 +42,6 @@
 #include <linux/io.h>
 #include <linux/acpi.h>
 
-#include "amba-pl011.h"
-
 #define UART_NR                        14
 
 #define SERIAL_AMBA_MAJOR      204
 #define UART_DR_ERROR          (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
 #define UART_DUMMY_DR_RX       (1 << 16)
 
+enum {
+       REG_DR,
+       REG_ST_DMAWM,
+       REG_ST_TIMEOUT,
+       REG_FR,
+       REG_LCRH_RX,
+       REG_LCRH_TX,
+       REG_IBRD,
+       REG_FBRD,
+       REG_CR,
+       REG_IFLS,
+       REG_IMSC,
+       REG_RIS,
+       REG_MIS,
+       REG_ICR,
+       REG_DMACR,
+       REG_ST_XFCR,
+       REG_ST_XON1,
+       REG_ST_XON2,
+       REG_ST_XOFF1,
+       REG_ST_XOFF2,
+       REG_ST_ITCR,
+       REG_ST_ITIP,
+       REG_ST_ABCR,
+       REG_ST_ABIMSC,
+
+       /* The size of the array - must be last */
+       REG_ARRAY_SIZE,
+};
+
 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
        [REG_DR] = UART01x_DR,
        [REG_FR] = UART01x_FR,
@@ -2175,25 +2203,11 @@ static int pl011_rs485_config(struct uart_port *port,
        struct uart_amba_port *uap =
                container_of(port, struct uart_amba_port, port);
 
-       /* pick sane settings if the user hasn't */
-       if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
-           !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
-               rs485->flags |= SER_RS485_RTS_ON_SEND;
-               rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-       }
-       /* clamp the delays to [0, 100ms] */
-       rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
-       rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-       memset(rs485->padding, 0, sizeof(rs485->padding));
-
        if (port->rs485.flags & SER_RS485_ENABLED)
                pl011_rs485_tx_stop(uap);
 
-       /* Set new configuration */
-       port->rs485 = *rs485;
-
        /* Make sure auto RTS is disabled */
-       if (port->rs485.flags & SER_RS485_ENABLED) {
+       if (rs485->flags & SER_RS485_ENABLED) {
                u32 cr = pl011_read(uap, REG_CR);
 
                cr &= ~UART011_CR_RTSEN;
diff --git a/drivers/tty/serial/amba-pl011.h b/drivers/tty/serial/amba-pl011.h
deleted file mode 100644 (file)
index 077eb12..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AMBA_PL011_H
-#define AMBA_PL011_H
-
-enum {
-       REG_DR,
-       REG_ST_DMAWM,
-       REG_ST_TIMEOUT,
-       REG_FR,
-       REG_LCRH_RX,
-       REG_LCRH_TX,
-       REG_IBRD,
-       REG_FBRD,
-       REG_CR,
-       REG_IFLS,
-       REG_IMSC,
-       REG_RIS,
-       REG_MIS,
-       REG_ICR,
-       REG_DMACR,
-       REG_ST_XFCR,
-       REG_ST_XON1,
-       REG_ST_XON2,
-       REG_ST_XOFF1,
-       REG_ST_XOFF2,
-       REG_ST_ITCR,
-       REG_ST_ITIP,
-       REG_ST_ABCR,
-       REG_ST_ABIMSC,
-
-       /* The size of the array - must be last */
-       REG_ARRAY_SIZE,
-};
-
-#endif
index 3a45e4f..dd1c7e4 100644 (file)
@@ -299,11 +299,9 @@ static int atmel_config_rs485(struct uart_port *port,
        /* Resetting serial mode to RS232 (0x0) */
        mode &= ~ATMEL_US_USMODE;
 
-       port->rs485 = *rs485conf;
-
        if (rs485conf->flags & SER_RS485_ENABLED) {
                dev_dbg(port->dev, "Setting UART to RS485\n");
-               if (port->rs485.flags & SER_RS485_RX_DURING_TX)
+               if (rs485conf->flags & SER_RS485_RX_DURING_TX)
                        atmel_port->tx_done_mask = ATMEL_US_TXRDY;
                else
                        atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
index 6113b95..8c58277 100644 (file)
@@ -19,6 +19,8 @@ struct gpio_desc;
 #include "cpm_uart_cpm2.h"
 #elif defined(CONFIG_CPM1)
 #include "cpm_uart_cpm1.h"
+#elif defined(CONFIG_COMPILE_TEST)
+#include "cpm_uart_cpm2.h"
 #endif
 
 #define SERIAL_CPM_MAJOR       204
index d6d3db9..db07d6a 100644 (file)
@@ -1247,7 +1247,7 @@ static int cpm_uart_init_port(struct device_node *np,
        }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
-#ifdef CONFIG_CONSOLE_POLL
+#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_CPM_CONSOLE)
        if (!udbg_port)
 #endif
                udbg_putc = NULL;
index 6a1cd03..108af25 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/fs_pd.h>
-#include <asm/prom.h>
 
 #include <linux/serial_core.h>
 #include <linux/kernel.h>
index e37a917..af951e6 100644 (file)
@@ -309,6 +309,8 @@ static void digicolor_uart_set_termios(struct uart_port *port,
        case CS8:
        default:
                config |= UA_CONFIG_CHAR_LEN;
+               termios->c_cflag &= ~CSIZE;
+               termios->c_cflag |= CS8;
                break;
        }
 
index be12fee..0d6e62f 100644 (file)
 /* IMX lpuart has four extra unused regs located at the beginning */
 #define IMX_REG_OFF    0x10
 
-static DEFINE_IDA(fsl_lpuart_ida);
-
 enum lpuart_type {
        VF610_LPUART,
        LS1021A_LPUART,
@@ -276,7 +274,6 @@ struct lpuart_port {
        int                     rx_dma_rng_buf_len;
        unsigned int            dma_tx_nents;
        wait_queue_head_t       dma_wait;
-       bool                    id_allocated;
 };
 
 struct lpuart_soc_data {
@@ -1118,7 +1115,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
        struct dma_chan *chan = sport->dma_rx_chan;
        struct circ_buf *ring = &sport->rx_ring;
        unsigned long flags;
-       int count = 0, copied;
+       int count, copied;
 
        if (lpuart_is_32(sport)) {
                unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
@@ -1378,19 +1375,6 @@ static int lpuart_config_rs485(struct uart_port *port,
                modem |= UARTMODEM_TXRTSE;
 
                /*
-                * RTS needs to be logic HIGH either during transfer _or_ after
-                * transfer, other variants are not supported by the hardware.
-                */
-
-               if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
-                               SER_RS485_RTS_AFTER_SEND)))
-                       rs485->flags |= SER_RS485_RTS_ON_SEND;
-
-               if (rs485->flags & SER_RS485_RTS_ON_SEND &&
-                               rs485->flags & SER_RS485_RTS_AFTER_SEND)
-                       rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-
-               /*
                 * The hardware defaults to RTS logic HIGH while transfer.
                 * Switch polarity in case RTS shall be logic HIGH
                 * after transfer.
@@ -1402,9 +1386,6 @@ static int lpuart_config_rs485(struct uart_port *port,
                        modem |= UARTMODEM_TXRTSPOL;
        }
 
-       /* Store the new configuration */
-       sport->port.rs485 = *rs485;
-
        writeb(modem, sport->port.membase + UARTMODEM);
        return 0;
 }
@@ -1429,19 +1410,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
                modem |= UARTMODEM_TXRTSE;
 
                /*
-                * RTS needs to be logic HIGH either during transfer _or_ after
-                * transfer, other variants are not supported by the hardware.
-                */
-
-               if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
-                               SER_RS485_RTS_AFTER_SEND)))
-                       rs485->flags |= SER_RS485_RTS_ON_SEND;
-
-               if (rs485->flags & SER_RS485_RTS_ON_SEND &&
-                               rs485->flags & SER_RS485_RTS_AFTER_SEND)
-                       rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-
-               /*
                 * The hardware defaults to RTS logic HIGH while transfer.
                 * Switch polarity in case RTS shall be logic HIGH
                 * after transfer.
@@ -1453,9 +1421,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
                        modem |= UARTMODEM_TXRTSPOL;
        }
 
-       /* Store the new configuration */
-       sport->port.rs485 = *rs485;
-
        lpuart32_write(&sport->port, modem, UARTMODIR);
        return 0;
 }
@@ -2145,12 +2110,10 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
        if (sport->port.rs485.flags & SER_RS485_ENABLED)
                termios->c_cflag &= ~CRTSCTS;
 
-       if (termios->c_cflag & CRTSCTS) {
-               modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
-       } else {
-               termios->c_cflag &= ~CRTSCTS;
+       if (termios->c_cflag & CRTSCTS)
+               modem |= UARTMODIR_RXRTSE | UARTMODIR_TXCTSE;
+       else
                modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
-       }
 
        if (termios->c_cflag & CSTOPB)
                bd |= UARTBAUD_SBNS;
@@ -2717,23 +2680,18 @@ static int lpuart_probe(struct platform_device *pdev)
 
        ret = of_alias_get_id(np, "serial");
        if (ret < 0) {
-               ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
-               if (ret < 0) {
-                       dev_err(&pdev->dev, "port line is full, add device failed\n");
-                       return ret;
-               }
-               sport->id_allocated = true;
+               dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+               return ret;
        }
        if (ret >= ARRAY_SIZE(lpuart_ports)) {
                dev_err(&pdev->dev, "serial%d out of range\n", ret);
-               ret = -EINVAL;
-               goto failed_out_of_range;
+               return -EINVAL;
        }
        sport->port.line = ret;
 
        ret = lpuart_enable_clks(sport);
        if (ret)
-               goto failed_clock_enable;
+               return ret;
        sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
 
        lpuart_ports[sport->port.line] = sport;
@@ -2781,10 +2739,6 @@ failed_reset:
        uart_remove_one_port(&lpuart_reg, &sport->port);
 failed_attach_port:
        lpuart_disable_clks(sport);
-failed_clock_enable:
-failed_out_of_range:
-       if (sport->id_allocated)
-               ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
        return ret;
 }
 
@@ -2794,9 +2748,6 @@ static int lpuart_remove(struct platform_device *pdev)
 
        uart_remove_one_port(&lpuart_reg, &sport->port);
 
-       if (sport->id_allocated)
-               ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
-
        lpuart_disable_clks(sport);
 
        if (sport->dma_tx_chan)
@@ -2926,7 +2877,6 @@ static int __init lpuart_serial_init(void)
 
 static void __exit lpuart_serial_exit(void)
 {
-       ida_destroy(&fsl_lpuart_ida);
        platform_driver_unregister(&lpuart_driver);
        uart_unregister_driver(&lpuart_reg);
 }
index 03a2fe9..45df299 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/fs.h>
 #include <linux/tty_flip.h>
 #include <linux/serial.h>
+#include <linux/serial_core.h>
 #include <linux/serial_reg.h>
 #include <linux/major.h>
 #include <linux/string.h>
 #include <asm/irq.h>
 #include <linux/uaccess.h>
 
-#include "icom.h"
-
 /*#define ICOM_TRACE            enable port trace capabilities */
 
 #define ICOM_DRIVER_NAME "icom"
-#define ICOM_VERSION_STR "1.3.1"
 #define NR_PORTS              128
-#define ICOM_PORT ((struct icom_port *)port)
-#define to_icom_adapter(d) container_of(d, struct icom_adapter, kref)
+
+static const unsigned int icom_acfg_baud[] = {
+       300,
+       600,
+       900,
+       1200,
+       1800,
+       2400,
+       3600,
+       4800,
+       7200,
+       9600,
+       14400,
+       19200,
+       28800,
+       38400,
+       57600,
+       76800,
+       115200,
+       153600,
+       230400,
+       307200,
+       460800,
+};
+#define BAUD_TABLE_LIMIT       (ARRAY_SIZE(icom_acfg_baud) - 1)
+
+struct icom_regs {
+       u32 control;            /* Adapter Control Register     */
+       u32 interrupt;          /* Adapter Interrupt Register   */
+       u32 int_mask;           /* Adapter Interrupt Mask Reg   */
+       u32 int_pri;            /* Adapter Interrupt Priority r */
+       u32 int_reg_b;          /* Adapter non-masked Interrupt */
+       u32 resvd01;
+       u32 resvd02;
+       u32 resvd03;
+       u32 control_2;          /* Adapter Control Register 2   */
+       u32 interrupt_2;        /* Adapter Interrupt Register 2 */
+       u32 int_mask_2;         /* Adapter Interrupt Mask 2     */
+       u32 int_pri_2;          /* Adapter Interrupt Prior 2    */
+       u32 int_reg_2b;         /* Adapter non-masked 2         */
+};
+
+struct func_dram {
+       u32 reserved[108];      /* 0-1B0   reserved by personality code */
+       u32 RcvStatusAddr;      /* 1B0-1B3 Status Address for Next rcv */
+       u8 RcvStnAddr;          /* 1B4     Receive Station Addr */
+       u8 IdleState;           /* 1B5     Idle State */
+       u8 IdleMonitor;         /* 1B6     Idle Monitor */
+       u8 FlagFillIdleTimer;   /* 1B7     Flag Fill Idle Timer */
+       u32 XmitStatusAddr;     /* 1B8-1BB Transmit Status Address */
+       u8 StartXmitCmd;        /* 1BC     Start Xmit Command */
+       u8 HDLCConfigReg;       /* 1BD     Reserved */
+       u8 CauseCode;           /* 1BE     Cause code for fatal error */
+       u8 xchar;               /* 1BF     High priority send */
+       u32 reserved3;          /* 1C0-1C3 Reserved */
+       u8 PrevCmdReg;          /* 1C4     Reserved */
+       u8 CmdReg;              /* 1C5     Command Register */
+       u8 async_config2;       /* 1C6     Async Config Byte 2 */
+       u8 async_config3;       /* 1C7     Async Config Byte 3 */
+       u8 dce_resvd[20];       /* 1C8-1DB DCE Rsvd           */
+       u8 dce_resvd21;         /* 1DC     DCE Rsvd (21st byte */
+       u8 misc_flags;          /* 1DD     misc flags         */
+#define V2_HARDWARE     0x40
+#define ICOM_HDW_ACTIVE 0x01
+       u8 call_length;         /* 1DE     Phone #/CFI buff ln */
+       u8 call_length2;        /* 1DF     Upper byte (unused) */
+       u32 call_addr;          /* 1E0-1E3 Phn #/CFI buff addr */
+       u16 timer_value;        /* 1E4-1E5 general timer value */
+       u8 timer_command;       /* 1E6     general timer cmd  */
+       u8 dce_command;         /* 1E7     dce command reg    */
+       u8 dce_cmd_status;      /* 1E8     dce command stat   */
+       u8 x21_r1_ioff;         /* 1E9     dce ready counter  */
+       u8 x21_r0_ioff;         /* 1EA     dce not ready ctr  */
+       u8 x21_ralt_ioff;       /* 1EB     dce CNR counter    */
+       u8 x21_r1_ion;          /* 1EC     dce ready I on ctr */
+       u8 rsvd_ier;            /* 1ED     Rsvd for IER (if ne */
+       u8 ier;                 /* 1EE     Interrupt Enable   */
+       u8 isr;                 /* 1EF     Input Signal Reg   */
+       u8 osr;                 /* 1F0     Output Signal Reg  */
+       u8 reset;               /* 1F1     Reset/Reload Reg   */
+       u8 disable;             /* 1F2     Disable Reg        */
+       u8 sync;                /* 1F3     Sync Reg           */
+       u8 error_stat;          /* 1F4     Error Status       */
+       u8 cable_id;            /* 1F5     Cable ID           */
+       u8 cs_length;           /* 1F6     CS Load Length     */
+       u8 mac_length;          /* 1F7     Mac Load Length    */
+       u32 cs_load_addr;       /* 1F8-1FB Call Load PCI Addr */
+       u32 mac_load_addr;      /* 1FC-1FF Mac Load PCI Addr  */
+};
+
+/*
+ * adapter defines and structures
+ */
+#define ICOM_CONTROL_START_A         0x00000008
+#define ICOM_CONTROL_STOP_A          0x00000004
+#define ICOM_CONTROL_START_B         0x00000002
+#define ICOM_CONTROL_STOP_B          0x00000001
+#define ICOM_CONTROL_START_C         0x00000008
+#define ICOM_CONTROL_STOP_C          0x00000004
+#define ICOM_CONTROL_START_D         0x00000002
+#define ICOM_CONTROL_STOP_D          0x00000001
+#define ICOM_IRAM_OFFSET             0x1000
+#define ICOM_IRAM_SIZE               0x0C00
+#define ICOM_DCE_IRAM_OFFSET         0x0A00
+#define ICOM_CABLE_ID_VALID          0x01
+#define ICOM_CABLE_ID_MASK           0xF0
+#define ICOM_DISABLE                 0x80
+#define CMD_XMIT_RCV_ENABLE          0xC0
+#define CMD_XMIT_ENABLE              0x40
+#define CMD_RCV_DISABLE              0x00
+#define CMD_RCV_ENABLE               0x80
+#define CMD_RESTART                  0x01
+#define CMD_HOLD_XMIT                0x02
+#define CMD_SND_BREAK                0x04
+#define RS232_CABLE                  0x06
+#define V24_CABLE                    0x0E
+#define V35_CABLE                    0x0C
+#define V36_CABLE                    0x02
+#define NO_CABLE                     0x00
+#define START_DOWNLOAD               0x80
+#define ICOM_INT_MASK_PRC_A          0x00003FFF
+#define ICOM_INT_MASK_PRC_B          0x3FFF0000
+#define ICOM_INT_MASK_PRC_C          0x00003FFF
+#define ICOM_INT_MASK_PRC_D          0x3FFF0000
+#define INT_RCV_COMPLETED            0x1000
+#define INT_XMIT_COMPLETED           0x2000
+#define INT_IDLE_DETECT              0x0800
+#define INT_RCV_DISABLED             0x0400
+#define INT_XMIT_DISABLED            0x0200
+#define INT_RCV_XMIT_SHUTDOWN        0x0100
+#define INT_FATAL_ERROR              0x0080
+#define INT_CABLE_PULL               0x0020
+#define INT_SIGNAL_CHANGE            0x0010
+#define HDLC_PPP_PURE_ASYNC          0x02
+#define HDLC_FF_FILL                 0x00
+#define HDLC_HDW_FLOW                0x01
+#define START_XMIT                   0x80
+#define ICOM_ACFG_DRIVE1             0x20
+#define ICOM_ACFG_NO_PARITY          0x00
+#define ICOM_ACFG_PARITY_ENAB        0x02
+#define ICOM_ACFG_PARITY_ODD         0x01
+#define ICOM_ACFG_8BPC               0x00
+#define ICOM_ACFG_7BPC               0x04
+#define ICOM_ACFG_6BPC               0x08
+#define ICOM_ACFG_5BPC               0x0C
+#define ICOM_ACFG_1STOP_BIT          0x00
+#define ICOM_ACFG_2STOP_BIT          0x10
+#define ICOM_DTR                     0x80
+#define ICOM_RTS                     0x40
+#define ICOM_RI                      0x08
+#define ICOM_DSR                     0x80
+#define ICOM_DCD                     0x20
+#define ICOM_CTS                     0x40
+
+#define NUM_XBUFFS 1
+#define NUM_RBUFFS 2
+#define RCV_BUFF_SZ 0x0200
+#define XMIT_BUFF_SZ 0x1000
+struct statusArea {
+    /**********************************************/
+       /* Transmit Status Area                       */
+    /**********************************************/
+       struct xmit_status_area{
+               __le32 leNext;  /* Next entry in Little Endian on Adapter */
+               __le32 leNextASD;
+               __le32 leBuffer;        /* Buffer for entry in LE for Adapter */
+               __le16 leLengthASD;
+               __le16 leOffsetASD;
+               __le16 leLength;        /* Length of data in segment */
+               __le16 flags;
+#define SA_FLAGS_DONE           0x0080 /* Done with Segment */
+#define SA_FLAGS_CONTINUED      0x8000 /* More Segments */
+#define SA_FLAGS_IDLE           0x4000 /* Mark IDLE after frm */
+#define SA_FLAGS_READY_TO_XMIT  0x0800
+#define SA_FLAGS_STAT_MASK      0x007F
+       } xmit[NUM_XBUFFS];
+
+    /**********************************************/
+       /* Receive Status Area                        */
+    /**********************************************/
+       struct {
+               __le32 leNext;  /* Next entry in Little Endian on Adapter */
+               __le32 leNextASD;
+               __le32 leBuffer;        /* Buffer for entry in LE for Adapter */
+               __le16 WorkingLength;   /* size of segment */
+               __le16 reserv01;
+               __le16 leLength;        /* Length of data in segment */
+               __le16 flags;
+#define SA_FL_RCV_DONE           0x0010        /* Data ready */
+#define SA_FLAGS_OVERRUN         0x0040
+#define SA_FLAGS_PARITY_ERROR    0x0080
+#define SA_FLAGS_FRAME_ERROR     0x0001
+#define SA_FLAGS_FRAME_TRUNC     0x0002
+#define SA_FLAGS_BREAK_DET       0x0004        /* set conditionally by device driver, not hardware */
+#define SA_FLAGS_RCV_MASK        0xFFE6
+       } rcv[NUM_RBUFFS];
+};
+
+struct icom_adapter;
+
+
+#define ICOM_MAJOR       243
+#define ICOM_MINOR_START 0
+
+struct icom_port {
+       struct uart_port uart_port;
+       unsigned char cable_id;
+       unsigned char read_status_mask;
+       unsigned char ignore_status_mask;
+       void __iomem * int_reg;
+       struct icom_regs __iomem *global_reg;
+       struct func_dram __iomem *dram;
+       int port;
+       struct statusArea *statStg;
+       dma_addr_t statStg_pci;
+       __le32 *xmitRestart;
+       dma_addr_t xmitRestart_pci;
+       unsigned char *xmit_buf;
+       dma_addr_t xmit_buf_pci;
+       unsigned char *recv_buf;
+       dma_addr_t recv_buf_pci;
+       int next_rcv;
+       int status;
+#define ICOM_PORT_ACTIVE       1       /* Port exists. */
+#define ICOM_PORT_OFF          0       /* Port does not exist. */
+       struct icom_adapter *adapter;
+};
+
+struct icom_adapter {
+       void __iomem * base_addr;
+       unsigned long base_addr_pci;
+       struct pci_dev *pci_dev;
+       struct icom_port port_info[4];
+       int index;
+       int version;
+#define ADAPTER_V1     0x0001
+#define ADAPTER_V2     0x0002
+       u32 subsystem_id;
+#define FOUR_PORT_MODEL                                0x0252
+#define V2_TWO_PORTS_RVX                       0x021A
+#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM     0x0251
+       int numb_ports;
+       struct list_head icom_adapter_entry;
+       struct kref kref;
+};
+
+/* prototype */
+extern void iCom_sercons_init(void);
+
+struct lookup_proc_table {
+       u32     __iomem *global_control_reg;
+       unsigned long   processor_id;
+};
+
+struct lookup_int_table {
+       u32     __iomem *global_int_mask;
+       unsigned long   processor_id;
+};
+
+static inline struct icom_port *to_icom_port(struct uart_port *port)
+{
+       return container_of(port, struct icom_port, uart_port);
+}
 
 static const struct pci_device_id icom_pci_table[] = {
        {
@@ -222,7 +481,7 @@ static int get_port_memory(struct icom_port *icom_port)
                if (index < (NUM_XBUFFS - 1)) {
                        memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
                        icom_port->statStg->xmit[index].leLengthASD =
-                           (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+                           cpu_to_le16(XMIT_BUFF_SZ);
                        trace(icom_port, "FOD_ADDR", stgAddr);
                        trace(icom_port, "FOD_XBUFF",
                              (unsigned long) icom_port->xmit_buf);
@@ -231,7 +490,7 @@ static int get_port_memory(struct icom_port *icom_port)
                } else if (index == (NUM_XBUFFS - 1)) {
                        memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
                        icom_port->statStg->xmit[index].leLengthASD =
-                           (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+                           cpu_to_le16(XMIT_BUFF_SZ);
                        trace(icom_port, "FOD_XBUFF",
                              (unsigned long) icom_port->xmit_buf);
                        icom_port->statStg->xmit[index].leBuffer =
@@ -249,7 +508,7 @@ static int get_port_memory(struct icom_port *icom_port)
                stgAddr = stgAddr + sizeof(icom_port->statStg->rcv[0]);
                icom_port->statStg->rcv[index].leLength = 0;
                icom_port->statStg->rcv[index].WorkingLength =
-                   (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+                   cpu_to_le16(RCV_BUFF_SZ);
                if (index < (NUM_RBUFFS - 1) ) {
                        offset = stgAddr - (unsigned long) icom_port->statStg;
                        icom_port->statStg->rcv[index].leNext =
@@ -617,16 +876,17 @@ unlock:
 
 static int icom_write(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned long data_count;
        unsigned char cmdReg;
        unsigned long offset;
        int temp_tail = port->state->xmit.tail;
 
-       trace(ICOM_PORT, "WRITE", 0);
+       trace(icom_port, "WRITE", 0);
 
-       if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+       if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
            SA_FLAGS_READY_TO_XMIT) {
-               trace(ICOM_PORT, "WRITE_FULL", 0);
+               trace(icom_port, "WRITE_FULL", 0);
                return 0;
        }
 
@@ -634,7 +894,7 @@ static int icom_write(struct uart_port *port)
        while ((port->state->xmit.head != temp_tail) &&
               (data_count <= XMIT_BUFF_SZ)) {
 
-               ICOM_PORT->xmit_buf[data_count++] =
+               icom_port->xmit_buf[data_count++] =
                    port->state->xmit.buf[temp_tail];
 
                temp_tail++;
@@ -642,22 +902,22 @@ static int icom_write(struct uart_port *port)
        }
 
        if (data_count) {
-               ICOM_PORT->statStg->xmit[0].flags =
+               icom_port->statStg->xmit[0].flags =
                    cpu_to_le16(SA_FLAGS_READY_TO_XMIT);
-               ICOM_PORT->statStg->xmit[0].leLength =
+               icom_port->statStg->xmit[0].leLength =
                    cpu_to_le16(data_count);
                offset =
-                   (unsigned long) &ICOM_PORT->statStg->xmit[0] -
-                   (unsigned long) ICOM_PORT->statStg;
-               *ICOM_PORT->xmitRestart =
-                   cpu_to_le32(ICOM_PORT->statStg_pci + offset);
-               cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+                   (unsigned long) &icom_port->statStg->xmit[0] -
+                   (unsigned long) icom_port->statStg;
+               *icom_port->xmitRestart =
+                   cpu_to_le32(icom_port->statStg_pci + offset);
+               cmdReg = readb(&icom_port->dram->CmdReg);
                writeb(cmdReg | CMD_XMIT_RCV_ENABLE,
-                      &ICOM_PORT->dram->CmdReg);
-               writeb(START_XMIT, &ICOM_PORT->dram->StartXmitCmd);
-               trace(ICOM_PORT, "WRITE_START", data_count);
+                      &icom_port->dram->CmdReg);
+               writeb(START_XMIT, &icom_port->dram->StartXmitCmd);
+               trace(icom_port, "WRITE_START", data_count);
                /* write flush */
-               readb(&ICOM_PORT->dram->StartXmitCmd);
+               readb(&icom_port->dram->StartXmitCmd);
        }
 
        return data_count;
@@ -696,8 +956,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
 
 static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
 {
-       unsigned short int count;
-       int i;
+       u16 count, i;
 
        if (port_int_reg & (INT_XMIT_COMPLETED)) {
                trace(icom_port, "XMIT_COMPLETE", 0);
@@ -706,8 +965,7 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
                icom_port->statStg->xmit[0].flags &=
                        cpu_to_le16(~SA_FLAGS_READY_TO_XMIT);
 
-               count = (unsigned short int)
-                       cpu_to_le16(icom_port->statStg->xmit[0].leLength);
+               count = le16_to_cpu(icom_port->statStg->xmit[0].leLength);
                icom_port->uart_port.icount.tx += count;
 
                for (i=0; i<count &&
@@ -729,7 +987,7 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
 {
        short int count, rcv_buff;
        struct tty_port *port = &icom_port->uart_port.state->port;
-       unsigned short int status;
+       u16 status;
        struct uart_icount *icount;
        unsigned long offset;
        unsigned char flag;
@@ -737,19 +995,18 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
        trace(icom_port, "RCV_COMPLETE", 0);
        rcv_buff = icom_port->next_rcv;
 
-       status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+       status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
        while (status & SA_FL_RCV_DONE) {
                int first = -1;
 
                trace(icom_port, "FID_STATUS", status);
-               count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
+               count = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].leLength);
 
                trace(icom_port, "RCV_COUNT", count);
 
                trace(icom_port, "REAL_COUNT", count);
 
-               offset =
-                       cpu_to_le32(icom_port->statStg->rcv[rcv_buff].leBuffer) -
+               offset = le32_to_cpu(icom_port->statStg->rcv[rcv_buff].leBuffer) -
                        icom_port->recv_buf_pci;
 
                /* Block copy all but the last byte as this may have status */
@@ -819,13 +1076,13 @@ ignore_char:
                icom_port->statStg->rcv[rcv_buff].flags = 0;
                icom_port->statStg->rcv[rcv_buff].leLength = 0;
                icom_port->statStg->rcv[rcv_buff].WorkingLength =
-                       (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+                       cpu_to_le16(RCV_BUFF_SZ);
 
                rcv_buff++;
                if (rcv_buff == NUM_RBUFFS)
                        rcv_buff = 0;
 
-               status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+               status = le16_to_cpu(icom_port->statStg->rcv[rcv_buff].flags);
        }
        icom_port->next_rcv = rcv_buff;
 
@@ -925,11 +1182,12 @@ static irqreturn_t icom_interrupt(int irq, void *dev_id)
  */
 static unsigned int icom_tx_empty(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        int ret;
        unsigned long flags;
 
        spin_lock_irqsave(&port->lock, flags);
-       if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+       if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
            SA_FLAGS_READY_TO_XMIT)
                ret = TIOCSER_TEMT;
        else
@@ -941,38 +1199,40 @@ static unsigned int icom_tx_empty(struct uart_port *port)
 
 static void icom_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char local_osr;
 
-       trace(ICOM_PORT, "SET_MODEM", 0);
-       local_osr = readb(&ICOM_PORT->dram->osr);
+       trace(icom_port, "SET_MODEM", 0);
+       local_osr = readb(&icom_port->dram->osr);
 
        if (mctrl & TIOCM_RTS) {
-               trace(ICOM_PORT, "RAISE_RTS", 0);
+               trace(icom_port, "RAISE_RTS", 0);
                local_osr |= ICOM_RTS;
        } else {
-               trace(ICOM_PORT, "LOWER_RTS", 0);
+               trace(icom_port, "LOWER_RTS", 0);
                local_osr &= ~ICOM_RTS;
        }
 
        if (mctrl & TIOCM_DTR) {
-               trace(ICOM_PORT, "RAISE_DTR", 0);
+               trace(icom_port, "RAISE_DTR", 0);
                local_osr |= ICOM_DTR;
        } else {
-               trace(ICOM_PORT, "LOWER_DTR", 0);
+               trace(icom_port, "LOWER_DTR", 0);
                local_osr &= ~ICOM_DTR;
        }
 
-       writeb(local_osr, &ICOM_PORT->dram->osr);
+       writeb(local_osr, &icom_port->dram->osr);
 }
 
 static unsigned int icom_get_mctrl(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char status;
        unsigned int result;
 
-       trace(ICOM_PORT, "GET_MODEM", 0);
+       trace(icom_port, "GET_MODEM", 0);
 
-       status = readb(&ICOM_PORT->dram->isr);
+       status = readb(&icom_port->dram->isr);
 
        result = ((status & ICOM_DCD) ? TIOCM_CAR : 0)
            | ((status & ICOM_RI) ? TIOCM_RNG : 0)
@@ -983,44 +1243,47 @@ static unsigned int icom_get_mctrl(struct uart_port *port)
 
 static void icom_stop_tx(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char cmdReg;
 
-       trace(ICOM_PORT, "STOP", 0);
-       cmdReg = readb(&ICOM_PORT->dram->CmdReg);
-       writeb(cmdReg | CMD_HOLD_XMIT, &ICOM_PORT->dram->CmdReg);
+       trace(icom_port, "STOP", 0);
+       cmdReg = readb(&icom_port->dram->CmdReg);
+       writeb(cmdReg | CMD_HOLD_XMIT, &icom_port->dram->CmdReg);
 }
 
 static void icom_start_tx(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char cmdReg;
 
-       trace(ICOM_PORT, "START", 0);
-       cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+       trace(icom_port, "START", 0);
+       cmdReg = readb(&icom_port->dram->CmdReg);
        if ((cmdReg & CMD_HOLD_XMIT) == CMD_HOLD_XMIT)
                writeb(cmdReg & ~CMD_HOLD_XMIT,
-                      &ICOM_PORT->dram->CmdReg);
+                      &icom_port->dram->CmdReg);
 
        icom_write(port);
 }
 
 static void icom_send_xchar(struct uart_port *port, char ch)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char xdata;
        int index;
        unsigned long flags;
 
-       trace(ICOM_PORT, "SEND_XCHAR", ch);
+       trace(icom_port, "SEND_XCHAR", ch);
 
        /* wait .1 sec to send char */
        for (index = 0; index < 10; index++) {
                spin_lock_irqsave(&port->lock, flags);
-               xdata = readb(&ICOM_PORT->dram->xchar);
+               xdata = readb(&icom_port->dram->xchar);
                if (xdata == 0x00) {
-                       trace(ICOM_PORT, "QUICK_WRITE", 0);
-                       writeb(ch, &ICOM_PORT->dram->xchar);
+                       trace(icom_port, "QUICK_WRITE", 0);
+                       writeb(ch, &icom_port->dram->xchar);
 
                        /* flush write operation */
-                       xdata = readb(&ICOM_PORT->dram->xchar);
+                       xdata = readb(&icom_port->dram->xchar);
                        spin_unlock_irqrestore(&port->lock, flags);
                        break;
                }
@@ -1031,38 +1294,41 @@ static void icom_send_xchar(struct uart_port *port, char ch)
 
 static void icom_stop_rx(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char cmdReg;
 
-       cmdReg = readb(&ICOM_PORT->dram->CmdReg);
-       writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+       cmdReg = readb(&icom_port->dram->CmdReg);
+       writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
 }
 
 static void icom_break(struct uart_port *port, int break_state)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char cmdReg;
        unsigned long flags;
 
        spin_lock_irqsave(&port->lock, flags);
-       trace(ICOM_PORT, "BREAK", 0);
-       cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+       trace(icom_port, "BREAK", 0);
+       cmdReg = readb(&icom_port->dram->CmdReg);
        if (break_state == -1) {
-               writeb(cmdReg | CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+               writeb(cmdReg | CMD_SND_BREAK, &icom_port->dram->CmdReg);
        } else {
-               writeb(cmdReg & ~CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+               writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
        }
        spin_unlock_irqrestore(&port->lock, flags);
 }
 
 static int icom_open(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        int retval;
 
-       kref_get(&ICOM_PORT->adapter->kref);
-       retval = startup(ICOM_PORT);
+       kref_get(&icom_port->adapter->kref);
+       retval = startup(icom_port);
 
        if (retval) {
-               kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
-               trace(ICOM_PORT, "STARTUP_ERROR", 0);
+               kref_put(&icom_port->adapter->kref, icom_kref_release);
+               trace(icom_port, "STARTUP_ERROR", 0);
                return retval;
        }
 
@@ -1071,23 +1337,25 @@ static int icom_open(struct uart_port *port)
 
 static void icom_close(struct uart_port *port)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        unsigned char cmdReg;
 
-       trace(ICOM_PORT, "CLOSE", 0);
+       trace(icom_port, "CLOSE", 0);
 
        /* stop receiver */
-       cmdReg = readb(&ICOM_PORT->dram->CmdReg);
-       writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+       cmdReg = readb(&icom_port->dram->CmdReg);
+       writeb(cmdReg & ~CMD_RCV_ENABLE, &icom_port->dram->CmdReg);
 
-       shutdown(ICOM_PORT);
+       shutdown(icom_port);
 
-       kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
+       kref_put(&icom_port->adapter->kref, icom_kref_release);
 }
 
 static void icom_set_termios(struct uart_port *port,
                             struct ktermios *termios,
                             struct ktermios *old_termios)
 {
+       struct icom_port *icom_port = to_icom_port(port);
        int baud;
        unsigned cflag, iflag;
        char new_config2;
@@ -1099,7 +1367,7 @@ static void icom_set_termios(struct uart_port *port,
        unsigned long flags;
 
        spin_lock_irqsave(&port->lock, flags);
-       trace(ICOM_PORT, "CHANGE_SPEED", 0);
+       trace(icom_port, "CHANGE_SPEED", 0);
 
        cflag = termios->c_cflag;
        iflag = termios->c_iflag;
@@ -1130,12 +1398,12 @@ static void icom_set_termios(struct uart_port *port,
        if (cflag & PARENB) {
                /* parity bit enabled */
                new_config2 |= ICOM_ACFG_PARITY_ENAB;
-               trace(ICOM_PORT, "PARENB", 0);
+               trace(icom_port, "PARENB", 0);
        }
        if (cflag & PARODD) {
                /* odd parity */
                new_config2 |= ICOM_ACFG_PARITY_ODD;
-               trace(ICOM_PORT, "PARODD", 0);
+               trace(icom_port, "PARODD", 0);
        }
 
        /* Determine divisor based on baud rate */
@@ -1155,100 +1423,99 @@ static void icom_set_termios(struct uart_port *port,
        uart_update_timeout(port, cflag, baud);
 
        /* CTS flow control flag and modem status interrupts */
-       tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+       tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
        if (cflag & CRTSCTS)
                tmp_byte |= HDLC_HDW_FLOW;
        else
                tmp_byte &= ~HDLC_HDW_FLOW;
-       writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
+       writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
 
        /*
         * Set up parity check flag
         */
-       ICOM_PORT->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
+       icom_port->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
        if (iflag & INPCK)
-               ICOM_PORT->read_status_mask |=
+               icom_port->read_status_mask |=
                    SA_FLAGS_FRAME_ERROR | SA_FLAGS_PARITY_ERROR;
 
        if ((iflag & BRKINT) || (iflag & PARMRK))
-               ICOM_PORT->read_status_mask |= SA_FLAGS_BREAK_DET;
+               icom_port->read_status_mask |= SA_FLAGS_BREAK_DET;
 
        /*
         * Characters to ignore
         */
-       ICOM_PORT->ignore_status_mask = 0;
+       icom_port->ignore_status_mask = 0;
        if (iflag & IGNPAR)
-               ICOM_PORT->ignore_status_mask |=
+               icom_port->ignore_status_mask |=
                    SA_FLAGS_PARITY_ERROR | SA_FLAGS_FRAME_ERROR;
        if (iflag & IGNBRK) {
-               ICOM_PORT->ignore_status_mask |= SA_FLAGS_BREAK_DET;
+               icom_port->ignore_status_mask |= SA_FLAGS_BREAK_DET;
                /*
                 * If we're ignore parity and break indicators, ignore
                 * overruns too.  (For real raw support).
                 */
                if (iflag & IGNPAR)
-                       ICOM_PORT->ignore_status_mask |= SA_FLAGS_OVERRUN;
+                       icom_port->ignore_status_mask |= SA_FLAGS_OVERRUN;
        }
 
        /*
         * !!! ignore all characters if CREAD is not set
         */
        if ((cflag & CREAD) == 0)
-               ICOM_PORT->ignore_status_mask |= SA_FL_RCV_DONE;
+               icom_port->ignore_status_mask |= SA_FL_RCV_DONE;
 
        /* Turn off Receiver to prepare for reset */
-       writeb(CMD_RCV_DISABLE, &ICOM_PORT->dram->CmdReg);
+       writeb(CMD_RCV_DISABLE, &icom_port->dram->CmdReg);
 
        for (index = 0; index < 10; index++) {
-               if (readb(&ICOM_PORT->dram->PrevCmdReg) == 0x00) {
+               if (readb(&icom_port->dram->PrevCmdReg) == 0x00) {
                        break;
                }
        }
 
        /* clear all current buffers of data */
        for (rcv_buff = 0; rcv_buff < NUM_RBUFFS; rcv_buff++) {
-               ICOM_PORT->statStg->rcv[rcv_buff].flags = 0;
-               ICOM_PORT->statStg->rcv[rcv_buff].leLength = 0;
-               ICOM_PORT->statStg->rcv[rcv_buff].WorkingLength =
-                   (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+               icom_port->statStg->rcv[rcv_buff].flags = 0;
+               icom_port->statStg->rcv[rcv_buff].leLength = 0;
+               icom_port->statStg->rcv[rcv_buff].WorkingLength =
+                   cpu_to_le16(RCV_BUFF_SZ);
        }
 
        for (xmit_buff = 0; xmit_buff < NUM_XBUFFS; xmit_buff++) {
-               ICOM_PORT->statStg->xmit[xmit_buff].flags = 0;
+               icom_port->statStg->xmit[xmit_buff].flags = 0;
        }
 
        /* activate changes and start xmit and receiver here */
        /* Enable the receiver */
-       writeb(new_config3, &(ICOM_PORT->dram->async_config3));
-       writeb(new_config2, &(ICOM_PORT->dram->async_config2));
-       tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+       writeb(new_config3, &(icom_port->dram->async_config3));
+       writeb(new_config2, &(icom_port->dram->async_config2));
+       tmp_byte = readb(&(icom_port->dram->HDLCConfigReg));
        tmp_byte |= HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL;
-       writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
-       writeb(0x04, &(ICOM_PORT->dram->FlagFillIdleTimer));    /* 0.5 seconds */
-       writeb(0xFF, &(ICOM_PORT->dram->ier));  /* enable modem signal interrupts */
+       writeb(tmp_byte, &(icom_port->dram->HDLCConfigReg));
+       writeb(0x04, &(icom_port->dram->FlagFillIdleTimer));    /* 0.5 seconds */
+       writeb(0xFF, &(icom_port->dram->ier));  /* enable modem signal interrupts */
 
        /* reset processor */
-       writeb(CMD_RESTART, &ICOM_PORT->dram->CmdReg);
+       writeb(CMD_RESTART, &icom_port->dram->CmdReg);
 
        for (index = 0; index < 10; index++) {
-               if (readb(&ICOM_PORT->dram->CmdReg) == 0x00) {
+               if (readb(&icom_port->dram->CmdReg) == 0x00) {
                        break;
                }
        }
 
        /* Enable Transmitter and Receiver */
        offset =
-           (unsigned long) &ICOM_PORT->statStg->rcv[0] -
-           (unsigned long) ICOM_PORT->statStg;
-       writel(ICOM_PORT->statStg_pci + offset,
-              &ICOM_PORT->dram->RcvStatusAddr);
-       ICOM_PORT->next_rcv = 0;
-       ICOM_PORT->put_length = 0;
-       *ICOM_PORT->xmitRestart = 0;
-       writel(ICOM_PORT->xmitRestart_pci,
-              &ICOM_PORT->dram->XmitStatusAddr);
-       trace(ICOM_PORT, "XR_ENAB", 0);
-       writeb(CMD_XMIT_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+           (unsigned long) &icom_port->statStg->rcv[0] -
+           (unsigned long) icom_port->statStg;
+       writel(icom_port->statStg_pci + offset,
+              &icom_port->dram->RcvStatusAddr);
+       icom_port->next_rcv = 0;
+       *icom_port->xmitRestart = 0;
+       writel(icom_port->xmitRestart_pci,
+              &icom_port->dram->XmitStatusAddr);
+       trace(icom_port, "XR_ENAB", 0);
+       writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -1258,15 +1525,6 @@ static const char *icom_type(struct uart_port *port)
        return "icom";
 }
 
-static void icom_release_port(struct uart_port *port)
-{
-}
-
-static int icom_request_port(struct uart_port *port)
-{
-       return 0;
-}
-
 static void icom_config_port(struct uart_port *port, int flags)
 {
        port->type = PORT_ICOM;
@@ -1285,8 +1543,6 @@ static const struct uart_ops icom_ops = {
        .shutdown = icom_close,
        .set_termios = icom_set_termios,
        .type = icom_type,
-       .release_port = icom_release_port,
-       .request_port = icom_request_port,
        .config_port = icom_config_port,
 };
 
@@ -1315,7 +1571,6 @@ static int icom_init_ports(struct icom_adapter *icom_adapter)
                        icom_port = &icom_adapter->port_info[i];
                        icom_port->port = i;
                        icom_port->status = ICOM_PORT_ACTIVE;
-                       icom_port->imbed_modem = ICOM_UNKNOWN;
                }
        } else {
                if (subsystem_id == PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL) {
@@ -1326,26 +1581,15 @@ static int icom_init_ports(struct icom_adapter *icom_adapter)
 
                                icom_port->port = i;
                                icom_port->status = ICOM_PORT_ACTIVE;
-                               icom_port->imbed_modem = ICOM_IMBED_MODEM;
                        }
                } else {
                        icom_adapter->numb_ports = 4;
 
                        icom_adapter->port_info[0].port = 0;
                        icom_adapter->port_info[0].status = ICOM_PORT_ACTIVE;
-
-                       if (subsystem_id ==
-                           PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM) {
-                               icom_adapter->port_info[0].imbed_modem = ICOM_IMBED_MODEM;
-                       } else {
-                               icom_adapter->port_info[0].imbed_modem = ICOM_RVX;
-                       }
-
                        icom_adapter->port_info[1].status = ICOM_PORT_OFF;
-
                        icom_adapter->port_info[2].port = 2;
                        icom_adapter->port_info[2].status = ICOM_PORT_ACTIVE;
-                       icom_adapter->port_info[2].imbed_modem = ICOM_RVX;
                        icom_adapter->port_info[3].status = ICOM_PORT_OFF;
                }
        }
@@ -1401,7 +1645,6 @@ static int icom_alloc_adapter(struct icom_adapter
        int adapter_count = 0;
        struct icom_adapter *icom_adapter;
        struct icom_adapter *cur_adapter_entry;
-       struct list_head *tmp;
 
        icom_adapter = kzalloc(sizeof(struct icom_adapter), GFP_KERNEL);
 
@@ -1409,10 +1652,8 @@ static int icom_alloc_adapter(struct icom_adapter
                return -ENOMEM;
        }
 
-       list_for_each(tmp, &icom_adapter_head) {
-               cur_adapter_entry =
-                   list_entry(tmp, struct icom_adapter,
-                              icom_adapter_entry);
+       list_for_each_entry(cur_adapter_entry, &icom_adapter_head,
+                       icom_adapter_entry) {
                if (cur_adapter_entry->index != adapter_count) {
                        break;
                }
@@ -1420,7 +1661,8 @@ static int icom_alloc_adapter(struct icom_adapter
        }
 
        icom_adapter->index = adapter_count;
-       list_add_tail(&icom_adapter->icom_adapter_entry, tmp);
+       list_add_tail(&icom_adapter->icom_adapter_entry,
+                       &cur_adapter_entry->icom_adapter_entry);
 
        *icom_adapter_ref = icom_adapter;
        return 0;
@@ -1432,8 +1674,10 @@ static void icom_free_adapter(struct icom_adapter *icom_adapter)
        kfree(icom_adapter);
 }
 
-static void icom_remove_adapter(struct icom_adapter *icom_adapter)
+static void icom_kref_release(struct kref *kref)
 {
+       struct icom_adapter *icom_adapter = container_of(kref,
+                       struct icom_adapter, kref);
        struct icom_port *icom_port;
        int index;
 
@@ -1466,14 +1710,6 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
        icom_free_adapter(icom_adapter);
 }
 
-static void icom_kref_release(struct kref *kref)
-{
-       struct icom_adapter *icom_adapter;
-
-       icom_adapter = to_icom_adapter(kref);
-       icom_remove_adapter(icom_adapter);
-}
-
 static int icom_probe(struct pci_dev *dev,
                                const struct pci_device_id *ent)
 {
@@ -1501,7 +1737,7 @@ static int icom_probe(struct pci_dev *dev,
        retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
        if (retval) {
                dev_err(&dev->dev, "PCI Config read FAILED\n");
-               return retval;
+               goto probe_exit0;
        }
 
        pci_write_config_dword(dev, PCI_COMMAND,
@@ -1589,11 +1825,9 @@ probe_exit0:
 static void icom_remove(struct pci_dev *dev)
 {
        struct icom_adapter *icom_adapter;
-       struct list_head *tmp;
 
-       list_for_each(tmp, &icom_adapter_head) {
-               icom_adapter = list_entry(tmp, struct icom_adapter,
-                                         icom_adapter_entry);
+       list_for_each_entry(icom_adapter, &icom_adapter_head,
+                       icom_adapter_entry) {
                if (icom_adapter->pci_dev == dev) {
                        kref_put(&icom_adapter->kref, icom_kref_release);
                        return;
diff --git a/drivers/tty/serial/icom.h b/drivers/tty/serial/icom.h
deleted file mode 100644 (file)
index 26e3aa7..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * icom.h
- *
- * Copyright (C) 2001 Michael Anderson, IBM Corporation
- *
- * Serial device driver include file.
- */
-
-#include <linux/serial_core.h>
-
-#define BAUD_TABLE_LIMIT       ((sizeof(icom_acfg_baud)/sizeof(int)) - 1)
-static int icom_acfg_baud[] = {
-       300,
-       600,
-       900,
-       1200,
-       1800,
-       2400,
-       3600,
-       4800,
-       7200,
-       9600,
-       14400,
-       19200,
-       28800,
-       38400,
-       57600,
-       76800,
-       115200,
-       153600,
-       230400,
-       307200,
-       460800,
-};
-
-struct icom_regs {
-       u32 control;            /* Adapter Control Register     */
-       u32 interrupt;          /* Adapter Interrupt Register   */
-       u32 int_mask;           /* Adapter Interrupt Mask Reg   */
-       u32 int_pri;            /* Adapter Interrupt Priority r */
-       u32 int_reg_b;          /* Adapter non-masked Interrupt */
-       u32 resvd01;
-       u32 resvd02;
-       u32 resvd03;
-       u32 control_2;          /* Adapter Control Register 2   */
-       u32 interrupt_2;        /* Adapter Interrupt Register 2 */
-       u32 int_mask_2;         /* Adapter Interrupt Mask 2     */
-       u32 int_pri_2;          /* Adapter Interrupt Prior 2    */
-       u32 int_reg_2b;         /* Adapter non-masked 2         */
-};
-
-struct func_dram {
-       u32 reserved[108];      /* 0-1B0   reserved by personality code */
-       u32 RcvStatusAddr;      /* 1B0-1B3 Status Address for Next rcv */
-       u8 RcvStnAddr;          /* 1B4     Receive Station Addr */
-       u8 IdleState;           /* 1B5     Idle State */
-       u8 IdleMonitor;         /* 1B6     Idle Monitor */
-       u8 FlagFillIdleTimer;   /* 1B7     Flag Fill Idle Timer */
-       u32 XmitStatusAddr;     /* 1B8-1BB Transmit Status Address */
-       u8 StartXmitCmd;        /* 1BC     Start Xmit Command */
-       u8 HDLCConfigReg;       /* 1BD     Reserved */
-       u8 CauseCode;           /* 1BE     Cause code for fatal error */
-       u8 xchar;               /* 1BF     High priority send */
-       u32 reserved3;          /* 1C0-1C3 Reserved */
-       u8 PrevCmdReg;          /* 1C4     Reserved */
-       u8 CmdReg;              /* 1C5     Command Register */
-       u8 async_config2;       /* 1C6     Async Config Byte 2 */
-       u8 async_config3;       /* 1C7     Async Config Byte 3 */
-       u8 dce_resvd[20];       /* 1C8-1DB DCE Rsvd           */
-       u8 dce_resvd21;         /* 1DC     DCE Rsvd (21st byte */
-       u8 misc_flags;          /* 1DD     misc flags         */
-#define V2_HARDWARE     0x40
-#define ICOM_HDW_ACTIVE 0x01
-       u8 call_length;         /* 1DE     Phone #/CFI buff ln */
-       u8 call_length2;        /* 1DF     Upper byte (unused) */
-       u32 call_addr;          /* 1E0-1E3 Phn #/CFI buff addr */
-       u16 timer_value;        /* 1E4-1E5 general timer value */
-       u8 timer_command;       /* 1E6     general timer cmd  */
-       u8 dce_command;         /* 1E7     dce command reg    */
-       u8 dce_cmd_status;      /* 1E8     dce command stat   */
-       u8 x21_r1_ioff;         /* 1E9     dce ready counter  */
-       u8 x21_r0_ioff;         /* 1EA     dce not ready ctr  */
-       u8 x21_ralt_ioff;       /* 1EB     dce CNR counter    */
-       u8 x21_r1_ion;          /* 1EC     dce ready I on ctr */
-       u8 rsvd_ier;            /* 1ED     Rsvd for IER (if ne */
-       u8 ier;                 /* 1EE     Interrupt Enable   */
-       u8 isr;                 /* 1EF     Input Signal Reg   */
-       u8 osr;                 /* 1F0     Output Signal Reg  */
-       u8 reset;               /* 1F1     Reset/Reload Reg   */
-       u8 disable;             /* 1F2     Disable Reg        */
-       u8 sync;                /* 1F3     Sync Reg           */
-       u8 error_stat;          /* 1F4     Error Status       */
-       u8 cable_id;            /* 1F5     Cable ID           */
-       u8 cs_length;           /* 1F6     CS Load Length     */
-       u8 mac_length;          /* 1F7     Mac Load Length    */
-       u32 cs_load_addr;       /* 1F8-1FB Call Load PCI Addr */
-       u32 mac_load_addr;      /* 1FC-1FF Mac Load PCI Addr  */
-};
-
-/*
- * adapter defines and structures
- */
-#define ICOM_CONTROL_START_A         0x00000008
-#define ICOM_CONTROL_STOP_A          0x00000004
-#define ICOM_CONTROL_START_B         0x00000002
-#define ICOM_CONTROL_STOP_B          0x00000001
-#define ICOM_CONTROL_START_C         0x00000008
-#define ICOM_CONTROL_STOP_C          0x00000004
-#define ICOM_CONTROL_START_D         0x00000002
-#define ICOM_CONTROL_STOP_D          0x00000001
-#define ICOM_IRAM_OFFSET             0x1000
-#define ICOM_IRAM_SIZE               0x0C00
-#define ICOM_DCE_IRAM_OFFSET         0x0A00
-#define ICOM_CABLE_ID_VALID          0x01
-#define ICOM_CABLE_ID_MASK           0xF0
-#define ICOM_DISABLE                 0x80
-#define CMD_XMIT_RCV_ENABLE          0xC0
-#define CMD_XMIT_ENABLE              0x40
-#define CMD_RCV_DISABLE              0x00
-#define CMD_RCV_ENABLE               0x80
-#define CMD_RESTART                  0x01
-#define CMD_HOLD_XMIT                0x02
-#define CMD_SND_BREAK                0x04
-#define RS232_CABLE                  0x06
-#define V24_CABLE                    0x0E
-#define V35_CABLE                    0x0C
-#define V36_CABLE                    0x02
-#define NO_CABLE                     0x00
-#define START_DOWNLOAD               0x80
-#define ICOM_INT_MASK_PRC_A          0x00003FFF
-#define ICOM_INT_MASK_PRC_B          0x3FFF0000
-#define ICOM_INT_MASK_PRC_C          0x00003FFF
-#define ICOM_INT_MASK_PRC_D          0x3FFF0000
-#define INT_RCV_COMPLETED            0x1000
-#define INT_XMIT_COMPLETED           0x2000
-#define INT_IDLE_DETECT              0x0800
-#define INT_RCV_DISABLED             0x0400
-#define INT_XMIT_DISABLED            0x0200
-#define INT_RCV_XMIT_SHUTDOWN        0x0100
-#define INT_FATAL_ERROR              0x0080
-#define INT_CABLE_PULL               0x0020
-#define INT_SIGNAL_CHANGE            0x0010
-#define HDLC_PPP_PURE_ASYNC          0x02
-#define HDLC_FF_FILL                 0x00
-#define HDLC_HDW_FLOW                0x01
-#define START_XMIT                   0x80
-#define ICOM_ACFG_DRIVE1             0x20
-#define ICOM_ACFG_NO_PARITY          0x00
-#define ICOM_ACFG_PARITY_ENAB        0x02
-#define ICOM_ACFG_PARITY_ODD         0x01
-#define ICOM_ACFG_8BPC               0x00
-#define ICOM_ACFG_7BPC               0x04
-#define ICOM_ACFG_6BPC               0x08
-#define ICOM_ACFG_5BPC               0x0C
-#define ICOM_ACFG_1STOP_BIT          0x00
-#define ICOM_ACFG_2STOP_BIT          0x10
-#define ICOM_DTR                     0x80
-#define ICOM_RTS                     0x40
-#define ICOM_RI                      0x08
-#define ICOM_DSR                     0x80
-#define ICOM_DCD                     0x20
-#define ICOM_CTS                     0x40
-
-#define NUM_XBUFFS 1
-#define NUM_RBUFFS 2
-#define RCV_BUFF_SZ 0x0200
-#define XMIT_BUFF_SZ 0x1000
-struct statusArea {
-    /**********************************************/
-       /* Transmit Status Area                       */
-    /**********************************************/
-       struct xmit_status_area{
-               u32 leNext;     /* Next entry in Little Endian on Adapter */
-               u32 leNextASD;
-               u32 leBuffer;   /* Buffer for entry in LE for Adapter */
-               u16 leLengthASD;
-               u16 leOffsetASD;
-               u16 leLength;   /* Length of data in segment */
-               u16 flags;
-#define SA_FLAGS_DONE           0x0080 /* Done with Segment */
-#define SA_FLAGS_CONTINUED      0x8000 /* More Segments */
-#define SA_FLAGS_IDLE           0x4000 /* Mark IDLE after frm */
-#define SA_FLAGS_READY_TO_XMIT  0x0800
-#define SA_FLAGS_STAT_MASK      0x007F
-       } xmit[NUM_XBUFFS];
-
-    /**********************************************/
-       /* Receive Status Area                        */
-    /**********************************************/
-       struct {
-               u32 leNext;     /* Next entry in Little Endian on Adapter */
-               u32 leNextASD;
-               u32 leBuffer;   /* Buffer for entry in LE for Adapter */
-               u16 WorkingLength;      /* size of segment */
-               u16 reserv01;
-               u16 leLength;   /* Length of data in segment */
-               u16 flags;
-#define SA_FL_RCV_DONE           0x0010        /* Data ready */
-#define SA_FLAGS_OVERRUN         0x0040
-#define SA_FLAGS_PARITY_ERROR    0x0080
-#define SA_FLAGS_FRAME_ERROR     0x0001
-#define SA_FLAGS_FRAME_TRUNC     0x0002
-#define SA_FLAGS_BREAK_DET       0x0004        /* set conditionally by device driver, not hardware */
-#define SA_FLAGS_RCV_MASK        0xFFE6
-       } rcv[NUM_RBUFFS];
-};
-
-struct icom_adapter;
-
-
-#define ICOM_MAJOR       243
-#define ICOM_MINOR_START 0
-
-struct icom_port {
-       struct uart_port uart_port;
-       u8 imbed_modem;
-#define ICOM_UNKNOWN           1
-#define ICOM_RVX               2
-#define ICOM_IMBED_MODEM       3
-       unsigned char cable_id;
-       unsigned char read_status_mask;
-       unsigned char ignore_status_mask;
-       void __iomem * int_reg;
-       struct icom_regs __iomem *global_reg;
-       struct func_dram __iomem *dram;
-       int port;
-       struct statusArea *statStg;
-       dma_addr_t statStg_pci;
-       u32 *xmitRestart;
-       dma_addr_t xmitRestart_pci;
-       unsigned char *xmit_buf;
-       dma_addr_t xmit_buf_pci;
-       unsigned char *recv_buf;
-       dma_addr_t recv_buf_pci;
-       int next_rcv;
-       int put_length;
-       int status;
-#define ICOM_PORT_ACTIVE       1       /* Port exists. */
-#define ICOM_PORT_OFF          0       /* Port does not exist. */
-       int load_in_progress;
-       struct icom_adapter *adapter;
-};
-
-struct icom_adapter {
-       void __iomem * base_addr;
-       unsigned long base_addr_pci;
-       struct pci_dev *pci_dev;
-       struct icom_port port_info[4];
-       int index;
-       int version;
-#define ADAPTER_V1     0x0001
-#define ADAPTER_V2     0x0002
-       u32 subsystem_id;
-#define FOUR_PORT_MODEL                                0x0252
-#define V2_TWO_PORTS_RVX                       0x021A
-#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM     0x0251
-       int numb_ports;
-       struct list_head icom_adapter_entry;
-       struct kref kref;
-};
-
-/* prototype */
-extern void iCom_sercons_init(void);
-
-struct lookup_proc_table {
-       u32     __iomem *global_control_reg;
-       unsigned long   processor_id;
-};
-
-struct lookup_int_table {
-       u32     __iomem *global_int_mask;
-       unsigned long   processor_id;
-};
index b1639b1..30edb35 100644 (file)
@@ -1937,8 +1937,6 @@ static int imx_uart_rs485_config(struct uart_port *port,
            rs485conf->flags & SER_RS485_RX_DURING_TX)
                imx_uart_start_rx(port);
 
-       port->rs485 = *rs485conf;
-
        return 0;
 }
 
index 444f233..3fd57ac 100644 (file)
@@ -689,7 +689,7 @@ static void cls_param(struct jsm_channel *ch)
        /*
         * If baud rate is zero, flush queues, and set mval to drop DTR.
         */
-       if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+       if ((ch->ch_c_cflag & CBAUD) == B0) {
                ch->ch_r_head = 0;
                ch->ch_r_tail = 0;
                ch->ch_e_head = 0;
@@ -723,14 +723,8 @@ static void cls_param(struct jsm_channel *ch)
        if (!(ch->ch_c_cflag & PARODD))
                lcr |= UART_LCR_EPAR;
 
-       /*
-        * Not all platforms support mark/space parity,
-        * so this will hide behind an ifdef.
-        */
-#ifdef CMSPAR
        if (ch->ch_c_cflag & CMSPAR)
                lcr |= UART_LCR_SPAR;
-#endif
 
        if (ch->ch_c_cflag & CSTOPB)
                lcr |= UART_LCR_STOP;
index 110696c..0c78f66 100644 (file)
@@ -938,7 +938,7 @@ static void neo_param(struct jsm_channel *ch)
        /*
         * If baud rate is zero, flush queues, and set mval to drop DTR.
         */
-       if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+       if ((ch->ch_c_cflag & CBAUD) == B0) {
                ch->ch_r_head = ch->ch_r_tail = 0;
                ch->ch_e_head = ch->ch_e_tail = 0;
 
@@ -997,14 +997,8 @@ static void neo_param(struct jsm_channel *ch)
        if (!(ch->ch_c_cflag & PARODD))
                lcr |= UART_LCR_EPAR;
 
-       /*
-        * Not all platforms support mark/space parity,
-        * so this will hide behind an ifdef.
-        */
-#ifdef CMSPAR
        if (ch->ch_c_cflag & CMSPAR)
                lcr |= UART_LCR_SPAR;
-#endif
 
        if (ch->ch_c_cflag & CSTOPB)
                lcr |= UART_LCR_STOP;
index 3112b4a..a0b6ea5 100644 (file)
@@ -1037,7 +1037,6 @@ static int max310x_rs485_config(struct uart_port *port,
 
        rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX |
                        SER_RS485_ENABLED;
-       memset(rs485->padding, 0, sizeof(rs485->padding));
        port->rs485 = *rs485;
 
        schedule_work(&one->rs_work);
index 9acae5f..12117b5 100644 (file)
@@ -833,7 +833,6 @@ static int men_z135_probe(struct mcb_device *mdev,
        uart->port.iotype = UPIO_MEM;
        uart->port.ops = &men_z135_ops;
        uart->port.irq = mcb_get_irq(mdev);
-       uart->port.iotype = UPIO_MEM;
        uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
        uart->port.line = line++;
        uart->port.dev = dev;
index 2bf1c57..4869c00 100644 (file)
@@ -68,6 +68,7 @@
 #define AML_UART_BAUD_MASK             0x7fffff
 #define AML_UART_BAUD_USE              BIT(23)
 #define AML_UART_BAUD_XTAL             BIT(24)
+#define AML_UART_BAUD_XTAL_DIV2                BIT(27)
 
 #define AML_UART_PORT_NUM              12
 #define AML_UART_PORT_OFFSET           6
@@ -80,6 +81,10 @@ static struct uart_driver meson_uart_driver;
 
 static struct uart_port *meson_ports[AML_UART_PORT_NUM];
 
+struct meson_uart_data {
+       bool has_xtal_div2;
+};
+
 static void meson_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
 }
@@ -253,6 +258,14 @@ static const char *meson_uart_type(struct uart_port *port)
        return (port->type == PORT_MESON) ? "meson_uart" : NULL;
 }
 
+/*
+ * This function is called only from probe() using a temporary io mapping
+ * in order to perform a reset before setting up the device. Since the
+ * temporarily mapped region was successfully requested, there can be no
+ * console on this port at this time. Hence it is not necessary for this
+ * function to acquire the port->lock. (Since there is no console on this
+ * port at this time, the port->lock is not initialized yet.)
+ */
 static void meson_uart_reset(struct uart_port *port)
 {
        u32 val;
@@ -267,9 +280,12 @@ static void meson_uart_reset(struct uart_port *port)
 
 static int meson_uart_startup(struct uart_port *port)
 {
+       unsigned long flags;
        u32 val;
        int ret = 0;
 
+       spin_lock_irqsave(&port->lock, flags);
+
        val = readl(port->membase + AML_UART_CONTROL);
        val |= AML_UART_CLEAR_ERR;
        writel(val, port->membase + AML_UART_CONTROL);
@@ -285,6 +301,8 @@ static int meson_uart_startup(struct uart_port *port)
        val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
        writel(val, port->membase + AML_UART_MISC);
 
+       spin_unlock_irqrestore(&port->lock, flags);
+
        ret = request_irq(port->irq, meson_uart_interrupt, 0,
                          port->name, port);
 
@@ -293,16 +311,23 @@ static int meson_uart_startup(struct uart_port *port)
 
 static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
 {
-       u32 val;
+       const struct meson_uart_data *private_data = port->private_data;
+       u32 val = 0;
 
        while (!meson_uart_tx_empty(port))
                cpu_relax();
 
        if (port->uartclk == 24000000) {
-               val = ((port->uartclk / 3) / baud) - 1;
+               unsigned int xtal_div = 3;
+
+               if (private_data && private_data->has_xtal_div2) {
+                       xtal_div = 2;
+                       val |= AML_UART_BAUD_XTAL_DIV2;
+               }
+               val |= DIV_ROUND_CLOSEST(port->uartclk / xtal_div, baud) - 1;
                val |= AML_UART_BAUD_XTAL;
        } else {
-               val = ((port->uartclk * 10 / (baud * 4) + 5) / 10) - 1;
+               val =  DIV_ROUND_CLOSEST(port->uartclk / 4, baud) - 1;
        }
        val |= AML_UART_BAUD_USE;
        writel(val, port->membase + AML_UART_REG5);
@@ -749,6 +774,7 @@ static int meson_uart_probe(struct platform_device *pdev)
        port->x_char = 0;
        port->ops = &meson_uart_ops;
        port->fifosize = fifosize;
+       port->private_data = (void *)device_get_match_data(&pdev->dev);
 
        meson_ports[pdev->id] = port;
        platform_set_drvdata(pdev, port);
@@ -777,11 +803,19 @@ static int meson_uart_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct meson_uart_data s4_uart_data = {
+       .has_xtal_div2 = true,
+};
+
 static const struct of_device_id meson_uart_dt_match[] = {
        { .compatible = "amlogic,meson6-uart" },
        { .compatible = "amlogic,meson8-uart" },
        { .compatible = "amlogic,meson8b-uart" },
        { .compatible = "amlogic,meson-gx-uart" },
+       {
+               .compatible = "amlogic,meson-s4-uart",
+               .data = (void *)&s4_uart_data,
+       },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, meson_uart_dt_match);
index 3acc0f1..e50f069 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/clk.h>
 
@@ -754,9 +756,6 @@ static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
        port->irqflags = IRQF_SHARED;
        port->irq = psc_fifoc_irq;
 }
-#endif
-
-#ifdef CONFIG_PPC_MPC512x
 
 #define PSC_5125(port) ((struct mpc5125_psc __iomem *)((port)->membase))
 #define FIFO_5125(port) ((struct mpc512x_psc_fifo __iomem *)(PSC_5125(port)+1))
index 23c94b9..e676ec7 100644 (file)
@@ -1599,6 +1599,7 @@ static inline struct uart_port *msm_get_port_from_line(unsigned int line)
 static void __msm_console_write(struct uart_port *port, const char *s,
                                unsigned int count, bool is_uartdm)
 {
+       unsigned long flags;
        int i;
        int num_newlines = 0;
        bool replaced = false;
@@ -1616,6 +1617,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
                        num_newlines++;
        count += num_newlines;
 
+       local_irq_save(flags);
+
        if (port->sysrq)
                locked = 0;
        else if (oops_in_progress)
@@ -1661,6 +1664,8 @@ static void __msm_console_write(struct uart_port *port, const char *s,
 
        if (locked)
                spin_unlock(&port->lock);
+
+       local_irq_restore(flags);
 }
 
 static void msm_console_write(struct console *co, const char *s,
index 8d5ffa1..46f4d4c 100644 (file)
@@ -1336,18 +1336,11 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
        up->ier = 0;
        serial_out(up, UART_IER, 0);
 
-       /* Clamp the delays to [0, 100ms] */
-       rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
-       rs485->delay_rts_after_send  = min(rs485->delay_rts_after_send, 100U);
-
-       /* store new config */
-       port->rs485 = *rs485;
-
        if (up->rts_gpiod) {
                /* enable / disable rts */
-               val = (port->rs485.flags & SER_RS485_ENABLED) ?
+               val = (rs485->flags & SER_RS485_ENABLED) ?
                        SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
-               val = (port->rs485.flags & val) ? 1 : 0;
+               val = (rs485->flags & val) ? 1 : 0;
                gpiod_set_value(up->rts_gpiod, val);
        }
 
@@ -1358,7 +1351,7 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
        /* If RS-485 is disabled, make sure the THR interrupt is fired when
         * TX FIFO is below the trigger level.
         */
-       if (!(port->rs485.flags & SER_RS485_ENABLED) &&
+       if (!(rs485->flags & SER_RS485_ENABLED) &&
            (up->scr & OMAP_UART_SCR_TX_EMPTY)) {
                up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
                serial_out(up, UART_OMAP_SCR, up->scr);
index 5250bd7..44d20e5 100644 (file)
@@ -184,9 +184,6 @@ static void owl_uart_send_chars(struct uart_port *port)
        struct circ_buf *xmit = &port->state->xmit;
        unsigned int ch;
 
-       if (uart_tx_stopped(port))
-               return;
-
        if (port->x_char) {
                while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU))
                        cpu_relax();
@@ -195,6 +192,9 @@ static void owl_uart_send_chars(struct uart_port *port)
                port->x_char = 0;
        }
 
+       if (uart_tx_stopped(port))
+               return;
+
        while (!(owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)) {
                if (uart_circ_empty(xmit))
                        break;
@@ -731,6 +731,7 @@ static int owl_uart_probe(struct platform_device *pdev)
        owl_port->port.uartclk = clk_get_rate(owl_port->clk);
        if (owl_port->port.uartclk == 0) {
                dev_err(&pdev->dev, "clock rate is zero\n");
+               clk_disable_unprepare(owl_port->clk);
                return -EINVAL;
        }
        owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
index affe71f..3b26524 100644 (file)
@@ -550,18 +550,6 @@ static u8 pch_uart_hal_get_modem(struct eg20t_port *priv)
        return (u8)msr;
 }
 
-static void pch_uart_hal_write(struct eg20t_port *priv,
-                             const unsigned char *buf, int tx_size)
-{
-       int i;
-       unsigned int thr;
-
-       for (i = 0; i < tx_size;) {
-               thr = buf[i++];
-               iowrite8(thr, priv->membase + PCH_UART_THR);
-       }
-}
-
 static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
                             int rx_size)
 {
@@ -624,22 +612,6 @@ static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
        return 0;
 }
 
-static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
-{
-       int ret = 0;
-       struct uart_port *port = &priv->port;
-
-       if (port->x_char) {
-               dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
-                       __func__, port->x_char, jiffies);
-               buf[0] = port->x_char;
-               port->x_char = 0;
-               ret = 1;
-       }
-
-       return ret;
-}
-
 static int dma_push_rx(struct eg20t_port *priv, int size)
 {
        int room;
@@ -785,31 +757,6 @@ static void pch_dma_tx_complete(void *arg)
        pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
 }
 
-static int pop_tx(struct eg20t_port *priv, int size)
-{
-       int count = 0;
-       struct uart_port *port = &priv->port;
-       struct circ_buf *xmit = &port->state->xmit;
-
-       if (uart_tx_stopped(port) || uart_circ_empty(xmit) || count >= size)
-               goto pop_tx_end;
-
-       do {
-               int cnt_to_end =
-                   CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
-               int sz = min(size - count, cnt_to_end);
-               pch_uart_hal_write(priv, &xmit->buf[xmit->tail], sz);
-               xmit->tail = (xmit->tail + sz) & (UART_XMIT_SIZE - 1);
-               count += sz;
-       } while (!uart_circ_empty(xmit) && count < size);
-
-pop_tx_end:
-       dev_dbg(priv->port.dev, "%d characters. Remained %d characters.(%lu)\n",
-                count, size - count, jiffies);
-
-       return count;
-}
-
 static int handle_rx_to(struct eg20t_port *priv)
 {
        struct pch_uart_buffer *buf;
@@ -875,8 +822,6 @@ static unsigned int handle_tx(struct eg20t_port *priv)
        struct uart_port *port = &priv->port;
        struct circ_buf *xmit = &port->state->xmit;
        int fifo_size;
-       int tx_size;
-       int size;
        int tx_empty;
 
        if (!priv->start_tx) {
@@ -889,19 +834,19 @@ static unsigned int handle_tx(struct eg20t_port *priv)
 
        fifo_size = max(priv->fifo_size, 1);
        tx_empty = 1;
-       if (pop_tx_x(priv, xmit->buf)) {
-               pch_uart_hal_write(priv, xmit->buf, 1);
+       if (port->x_char) {
+               iowrite8(port->x_char, priv->membase + PCH_UART_THR);
                port->icount.tx++;
+               port->x_char = 0;
                tx_empty = 0;
                fifo_size--;
        }
-       size = min(xmit->head - xmit->tail, fifo_size);
-       if (size < 0)
-               size = fifo_size;
 
-       tx_size = pop_tx(priv, size);
-       if (tx_size > 0) {
-               port->icount.tx += tx_size;
+       while (!uart_tx_stopped(port) && !uart_circ_empty(xmit) && fifo_size) {
+               iowrite8(xmit->buf[xmit->tail], priv->membase + PCH_UART_THR);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               port->icount.tx++;
+               fifo_size--;
                tx_empty = 0;
        }
 
@@ -946,9 +891,11 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
        }
 
        fifo_size = max(priv->fifo_size, 1);
-       if (pop_tx_x(priv, xmit->buf)) {
-               pch_uart_hal_write(priv, xmit->buf, 1);
+
+       if (port->x_char) {
+               iowrite8(port->x_char, priv->membase + PCH_UART_THR);
                port->icount.tx++;
+               port->x_char = 0;
                fifo_size--;
        }
 
index b7a3a1b..b399aac 100644 (file)
 #include <linux/delay.h>
 
 #include <asm/mach-pic32/pic32.h>
-#include "pic32_uart.h"
 
 /* UART name and device definitions */
 #define PIC32_DEV_NAME         "pic32-uart"
 #define PIC32_MAX_UARTS                6
 #define PIC32_SDEV_NAME                "ttyPIC"
 
-/* pic32_sport pointer for console use */
-static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+#define PIC32_UART_DFLT_BRATE          9600
+#define PIC32_UART_TX_FIFO_DEPTH       8
+#define PIC32_UART_RX_FIFO_DEPTH       8
+
+#define PIC32_UART_MODE                0x00
+#define PIC32_UART_STA         0x10
+#define PIC32_UART_TX          0x20
+#define PIC32_UART_RX          0x30
+#define PIC32_UART_BRG         0x40
+
+/* struct pic32_sport - pic32 serial port descriptor
+ * @port: uart port descriptor
+ * @idx: port index
+ * @irq_fault: virtual fault interrupt number
+ * @irq_fault_name: irq fault name
+ * @irq_rx: virtual rx interrupt number
+ * @irq_rx_name: irq rx name
+ * @irq_tx: virtual tx interrupt number
+ * @irq_tx_name: irq tx name
+ * @cts_gpio: clear to send gpio
+ * @dev: device descriptor
+ **/
+struct pic32_sport {
+       struct uart_port port;
+       int idx;
+
+       int irq_fault;
+       const char *irq_fault_name;
+       int irq_rx;
+       const char *irq_rx_name;
+       int irq_tx;
+       const char *irq_tx_name;
+       bool enable_tx_irq;
+
+       bool hw_flow_ctrl;
+       int cts_gpio;
+
+       struct clk *clk;
+
+       struct device *dev;
+};
 
-static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
+static inline struct pic32_sport *to_pic32_sport(struct uart_port *port)
 {
-       /* wait for tx empty, otherwise chars will be lost or corrupted */
-       while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
-               udelay(1);
+       return container_of(port, struct pic32_sport, port);
 }
 
-static inline int pic32_enable_clock(struct pic32_sport *sport)
+static inline void pic32_uart_writel(struct pic32_sport *sport,
+                                       u32 reg, u32 val)
 {
-       int ret = clk_prepare_enable(sport->clk);
-
-       if (ret)
-               return ret;
+       __raw_writel(val, sport->port.membase + reg);
+}
 
-       sport->ref_clk++;
-       return 0;
+static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
+{
+       return  __raw_readl(sport->port.membase + reg);
 }
 
-static inline void pic32_disable_clock(struct pic32_sport *sport)
+/* pic32 uart mode register bits */
+#define PIC32_UART_MODE_ON        BIT(15)
+#define PIC32_UART_MODE_FRZ       BIT(14)
+#define PIC32_UART_MODE_SIDL      BIT(13)
+#define PIC32_UART_MODE_IREN      BIT(12)
+#define PIC32_UART_MODE_RTSMD     BIT(11)
+#define PIC32_UART_MODE_RESV1     BIT(10)
+#define PIC32_UART_MODE_UEN1      BIT(9)
+#define PIC32_UART_MODE_UEN0      BIT(8)
+#define PIC32_UART_MODE_WAKE      BIT(7)
+#define PIC32_UART_MODE_LPBK      BIT(6)
+#define PIC32_UART_MODE_ABAUD     BIT(5)
+#define PIC32_UART_MODE_RXINV     BIT(4)
+#define PIC32_UART_MODE_BRGH      BIT(3)
+#define PIC32_UART_MODE_PDSEL1    BIT(2)
+#define PIC32_UART_MODE_PDSEL0    BIT(1)
+#define PIC32_UART_MODE_STSEL     BIT(0)
+
+/* pic32 uart status register bits */
+#define PIC32_UART_STA_UTXISEL1   BIT(15)
+#define PIC32_UART_STA_UTXISEL0   BIT(14)
+#define PIC32_UART_STA_UTXINV     BIT(13)
+#define PIC32_UART_STA_URXEN      BIT(12)
+#define PIC32_UART_STA_UTXBRK     BIT(11)
+#define PIC32_UART_STA_UTXEN      BIT(10)
+#define PIC32_UART_STA_UTXBF      BIT(9)
+#define PIC32_UART_STA_TRMT       BIT(8)
+#define PIC32_UART_STA_URXISEL1   BIT(7)
+#define PIC32_UART_STA_URXISEL0   BIT(6)
+#define PIC32_UART_STA_ADDEN      BIT(5)
+#define PIC32_UART_STA_RIDLE      BIT(4)
+#define PIC32_UART_STA_PERR       BIT(3)
+#define PIC32_UART_STA_FERR       BIT(2)
+#define PIC32_UART_STA_OERR       BIT(1)
+#define PIC32_UART_STA_URXDA      BIT(0)
+
+/* pic32_sport pointer for console use */
+static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+
+static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
 {
-       sport->ref_clk--;
-       clk_disable_unprepare(sport->clk);
+       /* wait for tx empty, otherwise chars will be lost or corrupted */
+       while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
+               udelay(1);
 }
 
 /* serial core request to check if uart tx buffer is empty */
@@ -117,16 +193,16 @@ static unsigned int pic32_uart_get_mctrl(struct uart_port *port)
  */
 static inline void pic32_uart_irqtxen(struct pic32_sport *sport, u8 en)
 {
-       if (en && !tx_irq_enabled(sport)) {
+       if (en && !sport->enable_tx_irq) {
                enable_irq(sport->irq_tx);
-               tx_irq_enabled(sport) = 1;
-       } else if (!en && tx_irq_enabled(sport)) {
+               sport->enable_tx_irq = true;
+       } else if (!en && sport->enable_tx_irq) {
                /* use disable_irq_nosync() and not disable_irq() to avoid self
                 * imposed deadlock by not waiting for irq handler to end,
                 * since this callback is called from interrupt context.
                 */
                disable_irq_nosync(sport->irq_tx);
-               tx_irq_enabled(sport) = 0;
+               sport->enable_tx_irq = false;
        }
 }
 
@@ -395,7 +471,7 @@ static int pic32_uart_startup(struct uart_port *port)
 
        local_irq_save(flags);
 
-       ret = pic32_enable_clock(sport);
+       ret = clk_prepare_enable(sport->clk);
        if (ret) {
                local_irq_restore(flags);
                goto out_done;
@@ -419,7 +495,7 @@ static int pic32_uart_startup(struct uart_port *port)
         * For each irq request_irq() is called with interrupt disabled.
         * And the irq is enabled as soon as we are ready to handle them.
         */
-       tx_irq_enabled(sport) = 0;
+       sport->enable_tx_irq = false;
 
        sport->irq_fault_name = kasprintf(GFP_KERNEL, "%s%d-fault",
                                          pic32_uart_type(port),
@@ -431,7 +507,7 @@ static int pic32_uart_startup(struct uart_port *port)
        }
        irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
        ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
-                         sport->irqflags_fault, sport->irq_fault_name, port);
+                         IRQF_NO_THREAD, sport->irq_fault_name, port);
        if (ret) {
                dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
                        __func__, sport->irq_fault, ret,
@@ -449,7 +525,7 @@ static int pic32_uart_startup(struct uart_port *port)
        }
        irq_set_status_flags(sport->irq_rx, IRQ_NOAUTOEN);
        ret = request_irq(sport->irq_rx, pic32_uart_rx_interrupt,
-                         sport->irqflags_rx, sport->irq_rx_name, port);
+                         IRQF_NO_THREAD, sport->irq_rx_name, port);
        if (ret) {
                dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
                        __func__, sport->irq_rx, ret,
@@ -467,7 +543,7 @@ static int pic32_uart_startup(struct uart_port *port)
        }
        irq_set_status_flags(sport->irq_tx, IRQ_NOAUTOEN);
        ret = request_irq(sport->irq_tx, pic32_uart_tx_interrupt,
-                         sport->irqflags_tx, sport->irq_tx_name, port);
+                         IRQF_NO_THREAD, sport->irq_tx_name, port);
        if (ret) {
                dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
                        __func__, sport->irq_tx, ret,
@@ -488,19 +564,21 @@ static int pic32_uart_startup(struct uart_port *port)
        /* enable all interrupts and eanable uart */
        pic32_uart_en_and_unmask(port);
 
+       local_irq_restore(flags);
+
        enable_irq(sport->irq_rx);
 
        return 0;
 
 out_t:
-       kfree(sport->irq_tx_name);
        free_irq(sport->irq_tx, port);
+       kfree(sport->irq_tx_name);
 out_r:
-       kfree(sport->irq_rx_name);
        free_irq(sport->irq_rx, port);
+       kfree(sport->irq_rx_name);
 out_f:
-       kfree(sport->irq_fault_name);
        free_irq(sport->irq_fault, port);
+       kfree(sport->irq_fault_name);
 out_done:
        return ret;
 }
@@ -515,12 +593,15 @@ static void pic32_uart_shutdown(struct uart_port *port)
        spin_lock_irqsave(&port->lock, flags);
        pic32_uart_dsbl_and_mask(port);
        spin_unlock_irqrestore(&port->lock, flags);
-       pic32_disable_clock(sport);
+       clk_disable_unprepare(sport->clk);
 
        /* free all 3 interrupts for this UART */
        free_irq(sport->irq_fault, port);
+       kfree(sport->irq_fault_name);
        free_irq(sport->irq_tx, port);
+       kfree(sport->irq_tx_name);
        free_irq(sport->irq_rx, port);
+       kfree(sport->irq_rx_name);
 }
 
 /* serial core request to change current uart setting */
@@ -712,10 +793,9 @@ static void pic32_console_write(struct console *co, const char *s,
                                unsigned int count)
 {
        struct pic32_sport *sport = pic32_sports[co->index];
-       struct uart_port *port = pic32_get_port(sport);
 
        /* call uart helper to deal with \r\n */
-       uart_console_write(port, s, count, pic32_console_putchar);
+       uart_console_write(&sport->port, s, count, pic32_console_putchar);
 }
 
 /* console core request to setup given console, find matching uart
@@ -724,7 +804,6 @@ static void pic32_console_write(struct console *co, const char *s,
 static int pic32_console_setup(struct console *co, char *options)
 {
        struct pic32_sport *sport;
-       struct uart_port *port = NULL;
        int baud = 115200;
        int bits = 8;
        int parity = 'n';
@@ -737,16 +816,15 @@ static int pic32_console_setup(struct console *co, char *options)
        sport = pic32_sports[co->index];
        if (!sport)
                return -ENODEV;
-       port = pic32_get_port(sport);
 
-       ret = pic32_enable_clock(sport);
+       ret = clk_prepare_enable(sport->clk);
        if (ret)
                return ret;
 
        if (options)
                uart_parse_options(options, &baud, &parity, &bits, &flow);
 
-       return uart_set_options(port, co, baud, parity, bits, flow);
+       return uart_set_options(&sport->port, co, baud, parity, bits, flow);
 }
 
 static struct uart_driver pic32_uart_driver;
@@ -816,13 +894,9 @@ static int pic32_uart_probe(struct platform_device *pdev)
 
        sport->idx              = uart_idx;
        sport->irq_fault        = irq_of_parse_and_map(np, 0);
-       sport->irqflags_fault   = IRQF_NO_THREAD;
        sport->irq_rx           = irq_of_parse_and_map(np, 1);
-       sport->irqflags_rx      = IRQF_NO_THREAD;
        sport->irq_tx           = irq_of_parse_and_map(np, 2);
-       sport->irqflags_tx      = IRQF_NO_THREAD;
        sport->clk              = devm_clk_get(&pdev->dev, NULL);
-       sport->cts_gpio         = -EINVAL;
        sport->dev              = &pdev->dev;
 
        /* Hardware flow control: gpios
@@ -850,7 +924,6 @@ static int pic32_uart_probe(struct platform_device *pdev)
 
        pic32_sports[uart_idx] = sport;
        port = &sport->port;
-       memset(port, 0, sizeof(*port));
        port->iotype    = UPIO_MEM;
        port->mapbase   = res_mem->start;
        port->ops       = &pic32_uart_ops;
@@ -872,7 +945,7 @@ static int pic32_uart_probe(struct platform_device *pdev)
                /* The peripheral clock has been enabled by console_setup,
                 * so disable it till the port is used.
                 */
-               pic32_disable_clock(sport);
+               clk_disable_unprepare(sport->clk);
        }
 #endif
 
@@ -893,7 +966,7 @@ static int pic32_uart_remove(struct platform_device *pdev)
        struct pic32_sport *sport = to_pic32_sport(port);
 
        uart_remove_one_port(&pic32_uart_driver, port);
-       pic32_disable_clock(sport);
+       clk_disable_unprepare(sport->clk);
        platform_set_drvdata(pdev, NULL);
        pic32_sports[sport->idx] = NULL;
 
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
deleted file mode 100644 (file)
index b15639c..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * PIC32 Integrated Serial Driver.
- *
- * Copyright (C) 2015 Microchip Technology, Inc.
- *
- * Authors:
- *   Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
- */
-#ifndef __DT_PIC32_UART_H__
-#define __DT_PIC32_UART_H__
-
-#define PIC32_UART_DFLT_BRATE          (9600)
-#define PIC32_UART_TX_FIFO_DEPTH       (8)
-#define PIC32_UART_RX_FIFO_DEPTH       (8)
-
-#define PIC32_UART_MODE                0x00
-#define PIC32_UART_STA         0x10
-#define PIC32_UART_TX          0x20
-#define PIC32_UART_RX          0x30
-#define PIC32_UART_BRG         0x40
-
-struct pic32_console_opt {
-       int baud;
-       int parity;
-       int bits;
-       int flow;
-};
-
-/* struct pic32_sport - pic32 serial port descriptor
- * @port: uart port descriptor
- * @idx: port index
- * @irq_fault: virtual fault interrupt number
- * @irqflags_fault: flags related to fault irq
- * @irq_fault_name: irq fault name
- * @irq_rx: virtual rx interrupt number
- * @irqflags_rx: flags related to rx irq
- * @irq_rx_name: irq rx name
- * @irq_tx: virtual tx interrupt number
- * @irqflags_tx: : flags related to tx irq
- * @irq_tx_name: irq tx name
- * @cts_gpio: clear to send gpio
- * @dev: device descriptor
- **/
-struct pic32_sport {
-       struct uart_port port;
-       struct pic32_console_opt opt;
-       int idx;
-
-       int irq_fault;
-       int irqflags_fault;
-       const char *irq_fault_name;
-       int irq_rx;
-       int irqflags_rx;
-       const char *irq_rx_name;
-       int irq_tx;
-       int irqflags_tx;
-       const char *irq_tx_name;
-       u8 enable_tx_irq;
-
-       bool hw_flow_ctrl;
-       int cts_gpio;
-
-       int ref_clk;
-       struct clk *clk;
-
-       struct device *dev;
-};
-#define to_pic32_sport(c) container_of(c, struct pic32_sport, port)
-#define pic32_get_port(sport) (&sport->port)
-#define pic32_get_opt(sport) (&sport->opt)
-#define tx_irq_enabled(sport) (sport->enable_tx_irq)
-
-static inline void pic32_uart_writel(struct pic32_sport *sport,
-                                       u32 reg, u32 val)
-{
-       struct uart_port *port = pic32_get_port(sport);
-
-       __raw_writel(val, port->membase + reg);
-}
-
-static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
-{
-       struct uart_port *port = pic32_get_port(sport);
-
-       return  __raw_readl(port->membase + reg);
-}
-
-/* pic32 uart mode register bits */
-#define PIC32_UART_MODE_ON        BIT(15)
-#define PIC32_UART_MODE_FRZ       BIT(14)
-#define PIC32_UART_MODE_SIDL      BIT(13)
-#define PIC32_UART_MODE_IREN      BIT(12)
-#define PIC32_UART_MODE_RTSMD     BIT(11)
-#define PIC32_UART_MODE_RESV1     BIT(10)
-#define PIC32_UART_MODE_UEN1      BIT(9)
-#define PIC32_UART_MODE_UEN0      BIT(8)
-#define PIC32_UART_MODE_WAKE      BIT(7)
-#define PIC32_UART_MODE_LPBK      BIT(6)
-#define PIC32_UART_MODE_ABAUD     BIT(5)
-#define PIC32_UART_MODE_RXINV     BIT(4)
-#define PIC32_UART_MODE_BRGH      BIT(3)
-#define PIC32_UART_MODE_PDSEL1    BIT(2)
-#define PIC32_UART_MODE_PDSEL0    BIT(1)
-#define PIC32_UART_MODE_STSEL     BIT(0)
-
-/* pic32 uart status register bits */
-#define PIC32_UART_STA_UTXISEL1   BIT(15)
-#define PIC32_UART_STA_UTXISEL0   BIT(14)
-#define PIC32_UART_STA_UTXINV     BIT(13)
-#define PIC32_UART_STA_URXEN      BIT(12)
-#define PIC32_UART_STA_UTXBRK     BIT(11)
-#define PIC32_UART_STA_UTXEN      BIT(10)
-#define PIC32_UART_STA_UTXBF      BIT(9)
-#define PIC32_UART_STA_TRMT       BIT(8)
-#define PIC32_UART_STA_URXISEL1   BIT(7)
-#define PIC32_UART_STA_URXISEL0   BIT(6)
-#define PIC32_UART_STA_ADDEN      BIT(5)
-#define PIC32_UART_STA_RIDLE      BIT(4)
-#define PIC32_UART_STA_PERR       BIT(3)
-#define PIC32_UART_STA_FERR       BIT(2)
-#define PIC32_UART_STA_OERR       BIT(1)
-#define PIC32_UART_STA_URXDA      BIT(0)
-
-#endif /* __DT_PIC32_UART_H__ */
index 5d97c20..3133446 100644 (file)
@@ -24,7 +24,6 @@
  */
 
 #undef DEBUG
-#undef DEBUG_HARD
 #undef USE_CTRL_O_SYSRQ
 
 #include <linux/module.h>
@@ -51,7 +50,6 @@
 #include <asm/irq.h>
 
 #ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/dbdma.h>
 
 #include "pmac_zilog.h"
 
-/* Not yet implemented */
-#undef HAS_DBDMA
-
-static char version[] __initdata = "pmac_zilog: 0.6 (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
 MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
 MODULE_DESCRIPTION("Driver for the Mac and PowerMac serial ports.");
 MODULE_LICENSE("GPL");
@@ -446,9 +440,6 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
        spin_lock(&uap_a->port.lock);
        r3 = read_zsreg(uap_a, R3);
 
-#ifdef DEBUG_HARD
-       pmz_debug("irq, r3: %x\n", r3);
-#endif
        /* Channel A */
        push = false;
        if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
@@ -613,8 +604,6 @@ static void pmz_start_tx(struct uart_port *port)
        struct uart_pmac_port *uap = to_pmz(port);
        unsigned char status;
 
-       pmz_debug("pmz: start_tx()\n");
-
        uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
        uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
 
@@ -636,7 +625,7 @@ static void pmz_start_tx(struct uart_port *port)
                struct circ_buf *xmit = &port->state->xmit;
 
                if (uart_circ_empty(xmit))
-                       goto out;
+                       return;
                write_zsdata(uap, xmit->buf[xmit->tail]);
                zssync(uap);
                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -645,8 +634,6 @@ static void pmz_start_tx(struct uart_port *port)
                if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                        uart_write_wakeup(&uap->port);
        }
- out:
-       pmz_debug("pmz: start_tx() done.\n");
 }
 
 /* 
@@ -659,13 +646,9 @@ static void pmz_stop_rx(struct uart_port *port)
 {
        struct uart_pmac_port *uap = to_pmz(port);
 
-       pmz_debug("pmz: stop_rx()()\n");
-
        /* Disable all RX interrupts.  */
        uap->curregs[R1] &= ~RxINT_MASK;
        pmz_maybe_update_regs(uap);
-
-       pmz_debug("pmz: stop_rx() done.\n");
 }
 
 /* 
@@ -910,8 +893,6 @@ static int pmz_startup(struct uart_port *port)
        unsigned long flags;
        int pwr_delay = 0;
 
-       pmz_debug("pmz: startup()\n");
-
        uap->flags |= PMACZILOG_FLAG_IS_OPEN;
 
        /* A console is never powered down. Else, power up and
@@ -947,8 +928,6 @@ static int pmz_startup(struct uart_port *port)
        pmz_interrupt_control(uap, 1);
        spin_unlock_irqrestore(&port->lock, flags);
 
-       pmz_debug("pmz: startup() done.\n");
-
        return 0;
 }
 
@@ -957,8 +936,6 @@ static void pmz_shutdown(struct uart_port *port)
        struct uart_pmac_port *uap = to_pmz(port);
        unsigned long flags;
 
-       pmz_debug("pmz: shutdown()\n");
-
        spin_lock_irqsave(&port->lock, flags);
 
        /* Disable interrupt requests for the channel */
@@ -987,8 +964,6 @@ static void pmz_shutdown(struct uart_port *port)
                pmz_set_scc_power(uap, 0);      /* Shut the chip down */
 
        spin_unlock_irqrestore(&port->lock, flags);
-
-       pmz_debug("pmz: shutdown() done.\n");
 }
 
 /* Shared by TTY driver and serial console setup.  The port lock is held
@@ -1233,10 +1208,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
        struct uart_pmac_port *uap = to_pmz(port);
        unsigned long baud;
 
-       pmz_debug("pmz: set_termios()\n");
-
-       memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
-
        /* XXX Check which revs of machines actually allow 1 and 4Mb speeds
         * on the IR dongle. Note that the IRTTY driver currently doesn't know
         * about the FIR mode and high speed modes. So these are unused. For
@@ -1270,8 +1241,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
                pmz_maybe_update_regs(uap);
        }
        uart_update_timeout(port, termios->c_cflag, baud);
-
-       pmz_debug("pmz: set_termios() done.\n");
 }
 
 /* The port lock is not held.  */
@@ -1400,7 +1369,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
                char    name[1];
        } *slots;
        int len;
-       struct resource r_ports, r_rxdma, r_txdma;
+       struct resource r_ports;
 
        /*
         * Request & map chip registers
@@ -1412,35 +1381,6 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
 
        uap->control_reg = uap->port.membase;
        uap->data_reg = uap->control_reg + 0x10;
-       
-       /*
-        * Request & map DBDMA registers
-        */
-#ifdef HAS_DBDMA
-       if (of_address_to_resource(np, 1, &r_txdma) == 0 &&
-           of_address_to_resource(np, 2, &r_rxdma) == 0)
-               uap->flags |= PMACZILOG_FLAG_HAS_DMA;
-#else
-       memset(&r_txdma, 0, sizeof(struct resource));
-       memset(&r_rxdma, 0, sizeof(struct resource));
-#endif 
-       if (ZS_HAS_DMA(uap)) {
-               uap->tx_dma_regs = ioremap(r_txdma.start, 0x100);
-               if (uap->tx_dma_regs == NULL) { 
-                       uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
-                       goto no_dma;
-               }
-               uap->rx_dma_regs = ioremap(r_rxdma.start, 0x100);
-               if (uap->rx_dma_regs == NULL) { 
-                       iounmap(uap->tx_dma_regs);
-                       uap->tx_dma_regs = NULL;
-                       uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
-                       goto no_dma;
-               }
-               uap->tx_dma_irq = irq_of_parse_and_map(np, 1);
-               uap->rx_dma_irq = irq_of_parse_and_map(np, 2);
-       }
-no_dma:
 
        /*
         * Detect port type
@@ -1506,8 +1446,6 @@ no_dma:
            of_device_is_compatible(np->parent->parent, "gatwick")) {
                /* IRQs on gatwick are offset by 64 */
                uap->port.irq = irq_create_mapping(NULL, 64 + 15);
-               uap->tx_dma_irq = irq_create_mapping(NULL, 64 + 4);
-               uap->rx_dma_irq = irq_create_mapping(NULL, 64 + 5);
        }
 
        /* Setup some valid baud rate information in the register
@@ -1527,8 +1465,6 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
        struct device_node *np;
 
        np = uap->node;
-       iounmap(uap->rx_dma_regs);
-       iounmap(uap->tx_dma_regs);
        iounmap(uap->control_reg);
        uap->node = NULL;
        of_node_put(np);
@@ -1875,7 +1811,6 @@ static struct platform_driver pmz_driver = {
 static int __init init_pmz(void)
 {
        int rc, i;
-       printk(KERN_INFO "%s\n", version);
 
        /* 
         * First, we need to do a direct OF-based probe pass. We
index fa85b0d..837b97c 100644 (file)
@@ -43,7 +43,6 @@ struct uart_pmac_port {
 #define PMACZILOG_FLAG_TX_ACTIVE       0x00000040
 #define PMACZILOG_FLAG_IS_IRDA         0x00000100
 #define PMACZILOG_FLAG_IS_INTMODEM     0x00000200
-#define PMACZILOG_FLAG_HAS_DMA         0x00000400
 #define PMACZILOG_FLAG_RSRC_REQUESTED  0x00000800
 #define PMACZILOG_FLAG_IS_OPEN         0x00002000
 #define PMACZILOG_FLAG_IS_EXTCLK       0x00008000
@@ -55,16 +54,7 @@ struct uart_pmac_port {
        volatile u8                     __iomem *control_reg;
        volatile u8                     __iomem *data_reg;
 
-#ifdef CONFIG_PPC_PMAC
-       unsigned int                    tx_dma_irq;
-       unsigned int                    rx_dma_irq;
-       volatile struct dbdma_regs      __iomem *tx_dma_regs;
-       volatile struct dbdma_regs      __iomem *rx_dma_regs;
-#endif
-
        unsigned char                   irq_name[8];
-
-       struct ktermios                 termios_cache;
 };
 
 #define to_pmz(p) ((struct uart_pmac_port *)(p))
@@ -377,7 +367,6 @@ static inline void zssync(struct uart_pmac_port *port)
 #define ZS_WANTS_MODEM_STATUS(UP)      ((UP)->flags & PMACZILOG_FLAG_MODEM_STATUS)
 #define ZS_IS_IRDA(UP)                 ((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
 #define ZS_IS_INTMODEM(UP)             ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
-#define ZS_HAS_DMA(UP)                 ((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
 #define ZS_IS_OPEN(UP)                 ((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
 #define ZS_IS_EXTCLK(UP)               ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
 
index 1543a60..4733a23 100644 (file)
@@ -149,12 +149,6 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
 static void qcom_geni_serial_stop_rx(struct uart_port *uport);
 static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
 
-static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
-                                       32000000, 48000000, 51200000, 64000000,
-                                       80000000, 96000000, 100000000,
-                                       102400000, 112000000, 120000000,
-                                       128000000};
-
 #define to_dev_port(ptr, member) \
                container_of(ptr, struct qcom_geni_serial_port, member)
 
@@ -507,7 +501,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
                 */
                qcom_geni_serial_poll_tx_done(uport);
 
-               if (uart_circ_chars_pending(&uport->state->xmit)) {
+               if (!uart_circ_empty(&uport->state->xmit)) {
                        irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
                        writel(irq_en | M_TX_FIFO_WATERMARK_EN,
                                        uport->membase + SE_GENI_M_IRQ_EN);
@@ -946,25 +940,43 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
        return 0;
 }
 
-static unsigned long get_clk_cfg(unsigned long clk_freq)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(root_freq); i++) {
-               if (!(root_freq[i] % clk_freq))
-                       return root_freq[i];
-       }
-       return 0;
-}
-
-static unsigned long get_clk_div_rate(unsigned int baud,
+static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
                        unsigned int sampling_rate, unsigned int *clk_div)
 {
        unsigned long ser_clk;
        unsigned long desired_clk;
+       unsigned long freq, prev;
+       unsigned long div, maxdiv;
+       int64_t mult;
 
        desired_clk = baud * sampling_rate;
-       ser_clk = get_clk_cfg(desired_clk);
+       if (!desired_clk) {
+               pr_err("%s: Invalid frequency\n", __func__);
+               return 0;
+       }
+
+       maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
+       prev = 0;
+
+       for (div = 1; div <= maxdiv; div++) {
+               mult = div * desired_clk;
+               if (mult > ULONG_MAX)
+                       break;
+
+               freq = clk_round_rate(clk, (unsigned long)mult);
+               if (!(freq % desired_clk)) {
+                       ser_clk = freq;
+                       break;
+               }
+
+               if (!prev)
+                       ser_clk = freq;
+               else if (prev == freq)
+                       break;
+
+               prev = freq;
+       }
+
        if (!ser_clk) {
                pr_err("%s: Can't find matching DFS entry for baud %d\n",
                                                                __func__, baud);
@@ -972,6 +984,9 @@ static unsigned long get_clk_div_rate(unsigned int baud,
        }
 
        *clk_div = ser_clk / desired_clk;
+       if (!(*clk_div))
+               *clk_div = 1;
+
        return ser_clk;
 }
 
@@ -1003,7 +1018,8 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
        if (ver >= QUP_SE_VERSION_2_5)
                sampling_rate /= 2;
 
-       clk_rate = get_clk_div_rate(baud, sampling_rate, &clk_div);
+       clk_rate = get_clk_div_rate(port->se.clk, baud,
+               sampling_rate, &clk_div);
        if (!clk_rate)
                goto out_restart_rx;
 
index e5f1fde..f556b49 100644 (file)
@@ -262,6 +262,8 @@ static void rda_uart_set_termios(struct uart_port *port,
                fallthrough;
        case CS7:
                ctrl &= ~RDA_UART_DBITS_8;
+               termios->c_cflag &= ~CSIZE;
+               termios->c_cflag |= CS7;
                break;
        default:
                ctrl |= RDA_UART_DBITS_8;
index 5fe6ccc..e64e42a 100644 (file)
@@ -446,6 +446,8 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
        baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); 
        quot = uart_get_divisor(port, baud);
 
+       del_timer_sync(&sport->timer);
+
        spin_lock_irqsave(&sport->port.lock, flags);
 
        sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
@@ -476,8 +478,6 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
                                UTSR1_TO_SM(UTSR1_ROR);
        }
 
-       del_timer_sync(&sport->timer);
-
        /*
         * Update the per-port timeout.
         */
index e1585fb..d5ca904 100644 (file)
@@ -2480,12 +2480,24 @@ s3c24xx_serial_console_write(struct console *co, const char *s,
                             unsigned int count)
 {
        unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+       unsigned long flags;
+       bool locked = true;
 
        /* not possible to xmit on unconfigured port */
        if (!s3c24xx_port_configured(ucon))
                return;
 
+       if (cons_uart->sysrq)
+               locked = false;
+       else if (oops_in_progress)
+               locked = spin_trylock_irqsave(&cons_uart->lock, flags);
+       else
+               spin_lock_irqsave(&cons_uart->lock, flags);
+
        uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
+
+       if (locked)
+               spin_unlock_irqrestore(&cons_uart->lock, flags);
 }
 
 /* Shouldn't be __init, as it can be instantiated from other module */
@@ -2814,6 +2826,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
                .num_clks       = 1,
                .clksel_mask    = 0,
                .clksel_shift   = 0,
+               .ucon_mask      = APPLE_S5L_UCON_MASK,
        },
        .def_cfg = {
                .ucon           = APPLE_S5L_UCON_DEFAULT,
index 5fb201c..8472bf7 100644 (file)
@@ -1134,16 +1134,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
        struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
        if (rs485->flags & SER_RS485_ENABLED) {
-               bool rts_during_rx, rts_during_tx;
-
-               rts_during_rx = rs485->flags & SER_RS485_RTS_AFTER_SEND;
-               rts_during_tx = rs485->flags & SER_RS485_RTS_ON_SEND;
-
-               if (rts_during_rx == rts_during_tx)
-                       dev_err(port->dev,
-                               "unsupported RTS signalling on_send:%d after_send:%d - exactly one of RS485 RTS flags should be set\n",
-                               rts_during_tx, rts_during_rx);
-
                /*
                 * RTS signal is handled by HW, it's timing can't be influenced.
                 * However, it's sometimes useful to delay TX even without RTS
index 6a8963c..9a85b41 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/sysrq.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
+#include <linux/math64.h>
 #include <linux/security.h>
 
 #include <linux/irq.h>
@@ -42,6 +43,11 @@ static struct lock_class_key port_lock_key;
 
 #define HIGH_BITS_OFFSET       ((sizeof(long)-sizeof(int))*8)
 
+/*
+ * Max time with active RTS before/after data is sent.
+ */
+#define RS485_MAX_RTS_DELAY    100 /* msecs */
+
 static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
                                        struct ktermios *old_termios);
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -333,15 +339,18 @@ void
 uart_update_timeout(struct uart_port *port, unsigned int cflag,
                    unsigned int baud)
 {
-       unsigned int size;
+       unsigned int size = tty_get_frame_size(cflag);
+       u64 frame_time;
 
-       size = tty_get_frame_size(cflag) * port->fifosize;
+       frame_time = (u64)size * NSEC_PER_SEC;
+       size *= port->fifosize;
 
        /*
         * Figure the timeout to send the above number of bits.
         * Add .02 seconds of slop
         */
        port->timeout = (HZ * size) / baud + HZ/50;
+       port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
 }
 EXPORT_SYMBOL(uart_update_timeout);
 
@@ -1296,8 +1305,36 @@ static int uart_set_rs485_config(struct uart_port *port,
        if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
                return -EFAULT;
 
+       /* pick sane settings if the user hasn't */
+       if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
+           !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
+               dev_warn_ratelimited(port->dev,
+                       "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+                       port->name, port->line);
+               rs485.flags |= SER_RS485_RTS_ON_SEND;
+               rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
+       }
+
+       if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+               rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
+               dev_warn_ratelimited(port->dev,
+                       "%s (%d): RTS delay before sending clamped to %u ms\n",
+                       port->name, port->line, rs485.delay_rts_before_send);
+       }
+
+       if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+               rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
+               dev_warn_ratelimited(port->dev,
+                       "%s (%d): RTS delay after sending clamped to %u ms\n",
+                       port->name, port->line, rs485.delay_rts_after_send);
+       }
+       /* Return clean padding area to userspace */
+       memset(rs485.padding, 0, sizeof(rs485.padding));
+
        spin_lock_irqsave(&port->lock, flags);
        ret = port->rs485_config(port, &rs485);
+       if (!ret)
+               port->rs485 = rs485;
        spin_unlock_irqrestore(&port->lock, flags);
        if (ret)
                return ret;
@@ -1610,24 +1647,24 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
         * Note: we have to use pretty tight timings here to satisfy
         * the NIST-PCTS.
         */
-       char_time = (port->timeout - HZ/50) / port->fifosize;
-       char_time = char_time / 5;
-       if (char_time == 0)
-               char_time = 1;
+       char_time = max(nsecs_to_jiffies(port->frame_time / 5), 1UL);
+
        if (timeout && timeout < char_time)
                char_time = timeout;
 
-       /*
-        * If the transmitter hasn't cleared in twice the approximate
-        * amount of time to send the entire FIFO, it probably won't
-        * ever clear.  This assumes the UART isn't doing flow
-        * control, which is currently the case.  Hence, if it ever
-        * takes longer than port->timeout, this is probably due to a
-        * UART bug of some kind.  So, we clamp the timeout parameter at
-        * 2*port->timeout.
-        */
-       if (timeout == 0 || timeout > 2 * port->timeout)
-               timeout = 2 * port->timeout;
+       if (!uart_cts_enabled(port)) {
+               /*
+                * If the transmitter hasn't cleared in twice the approximate
+                * amount of time to send the entire FIFO, it probably won't
+                * ever clear.  This assumes the UART isn't doing flow
+                * control, which is currently the case.  Hence, if it ever
+                * takes longer than port->timeout, this is probably due to a
+                * UART bug of some kind.  So, we clamp the timeout parameter at
+                * 2*port->timeout.
+                */
+               if (timeout == 0 || timeout > 2 * port->timeout)
+                       timeout = 2 * port->timeout;
+       }
 
        expire = jiffies + timeout;
 
@@ -1643,7 +1680,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
                msleep_interruptible(jiffies_to_msecs(char_time));
                if (signal_pending(current))
                        break;
-               if (time_after(jiffies, expire))
+               if (timeout && time_after(jiffies, expire))
                        break;
        }
        uart_port_deref(port);
@@ -2174,15 +2211,23 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
        }
        put_device(tty_dev);
 
-       /* Nothing to do if the console is not suspending */
-       if (!console_suspend_enabled && uart_console(uport))
+       /*
+        * Nothing to do if the console is not suspending
+        * except stop_rx to prevent any asynchronous data
+        * over RX line. Re-start_rx, when required, is
+        * done by set_termios in resume sequence
+        */
+       if (!console_suspend_enabled && uart_console(uport)) {
+               uport->ops->stop_rx(uport);
                goto unlock;
+       }
 
        uport->suspended = 1;
 
        if (tty_port_initialized(port)) {
                const struct uart_ops *ops = uport->ops;
                int tries;
+               unsigned int mctrl;
 
                tty_port_set_suspended(port, 1);
                tty_port_set_initialized(port, 0);
@@ -2190,6 +2235,9 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
                spin_lock_irq(&uport->lock);
                ops->stop_tx(uport);
                ops->set_mctrl(uport, 0);
+               /* save mctrl so it can be restored on resume */
+               mctrl = uport->mctrl;
+               uport->mctrl = 0;
                ops->stop_rx(uport);
                spin_unlock_irq(&uport->lock);
 
@@ -2203,6 +2251,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
                                uport->name);
 
                ops->shutdown(uport);
+               uport->mctrl = mctrl;
        }
 
        /*
index 2213e6b..228e380 100644 (file)
@@ -618,6 +618,8 @@ serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
        case CS6:       /* not supported */
        case CS8:
                cval |= TXX9_SILCR_UMODE_8BIT;
+               termios->c_cflag &= ~CSIZE;
+               termios->c_cflag |= CS8;
                break;
        }
 
index 0f9b8bd..0075a14 100644 (file)
@@ -2379,8 +2379,12 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
        int best_clk = -1;
        unsigned long flags;
 
-       if ((termios->c_cflag & CSIZE) == CS7)
+       if ((termios->c_cflag & CSIZE) == CS7) {
                smr_val |= SCSMR_CHR;
+       } else {
+               termios->c_cflag &= ~CSIZE;
+               termios->c_cflag |= CS8;
+       }
        if (termios->c_cflag & PARENB)
                smr_val |= SCSMR_PE;
        if (termios->c_cflag & PARODD)
index f5ac14c..c0869b0 100644 (file)
  * @port: struct uart_port embedded in this struct
  * @dev: struct device *
  * @ier: shadowed copy of the interrupt enable register
- * @clkin_rate: input clock to the UART IP block.
  * @baud_rate: UART serial line rate (e.g., 115200 baud)
  * @clk: reference to this device's clock
  * @clk_notifier: clock rate change notifier for upstream clock changes
@@ -159,7 +158,6 @@ struct sifive_serial_port {
        struct uart_port        port;
        struct device           *dev;
        unsigned char           ier;
-       unsigned long           clkin_rate;
        unsigned long           baud_rate;
        struct clk              *clk;
        struct notifier_block   clk_notifier;
@@ -463,7 +461,7 @@ static void __ssp_update_div(struct sifive_serial_port *ssp)
 {
        u16 div;
 
-       div = DIV_ROUND_UP(ssp->clkin_rate, ssp->baud_rate) - 1;
+       div = DIV_ROUND_UP(ssp->port.uartclk, ssp->baud_rate) - 1;
 
        __ssp_writel(div, SIFIVE_SERIAL_DIV_OFFS, ssp);
 }
@@ -648,8 +646,8 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb,
                udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate));
        }
 
-       if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) {
-               ssp->clkin_rate = cnd->new_rate;
+       if (event == POST_RATE_CHANGE && ssp->port.uartclk != cnd->new_rate) {
+               ssp->port.uartclk = cnd->new_rate;
                __ssp_update_div(ssp);
        }
 
@@ -666,19 +664,24 @@ static void sifive_serial_set_termios(struct uart_port *port,
        int rate;
        char nstop;
 
-       if ((termios->c_cflag & CSIZE) != CS8)
+       if ((termios->c_cflag & CSIZE) != CS8) {
                dev_err_once(ssp->port.dev, "only 8-bit words supported\n");
+               termios->c_cflag &= ~CSIZE;
+               termios->c_cflag |= CS8;
+       }
        if (termios->c_iflag & (INPCK | PARMRK))
                dev_err_once(ssp->port.dev, "parity checking not supported\n");
        if (termios->c_iflag & BRKINT)
                dev_err_once(ssp->port.dev, "BREAK detection not supported\n");
+       termios->c_iflag &= ~(INPCK|PARMRK|BRKINT);
 
        /* Set number of stop bits */
        nstop = (termios->c_cflag & CSTOPB) ? 2 : 1;
        __ssp_set_stop_bits(ssp, nstop);
 
        /* Set line rate */
-       rate = uart_get_baud_rate(port, termios, old, 0, ssp->clkin_rate / 16);
+       rate = uart_get_baud_rate(port, termios, old, 0,
+                                 ssp->port.uartclk / 16);
        __ssp_update_baud_rate(ssp, rate);
 
        spin_lock_irqsave(&ssp->port.lock, flags);
@@ -996,9 +999,8 @@ static int sifive_serial_probe(struct platform_device *pdev)
        }
 
        /* Set up clock divider */
-       ssp->clkin_rate = clk_get_rate(ssp->clk);
+       ssp->port.uartclk = clk_get_rate(ssp->clk);
        ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
-       ssp->port.uartclk = ssp->baud_rate * 16;
        __ssp_update_div(ssp);
 
        platform_set_drvdata(pdev, ssp);
index d7fd692..1b0da60 100644 (file)
@@ -535,10 +535,14 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
        /* set character length */
        if ((cflag & CSIZE) == CS7) {
                ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
+               cflag |= PARENB;
        } else {
                ctrl_val |= (cflag & PARENB) ?  ASC_CTL_MODE_8BIT_PAR :
                                                ASC_CTL_MODE_8BIT;
+               cflag &= ~CSIZE;
+               cflag |= CS8;
        }
+       termios->c_cflag = cflag;
 
        /* set stop bit */
        ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
index 87b5cd4..b7b44f4 100644 (file)
@@ -37,6 +37,7 @@
 
 static void stm32_usart_stop_tx(struct uart_port *port);
 static void stm32_usart_transmit_chars(struct uart_port *port);
+static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
 
 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
 {
@@ -107,8 +108,6 @@ static int stm32_usart_config_rs485(struct uart_port *port,
 
        stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
 
-       port->rs485 = *rs485conf;
-
        rs485conf->flags |= SER_RS485_RX_DURING_TX;
 
        if (rs485conf->flags & SER_RS485_ENABLED) {
@@ -128,13 +127,10 @@ static int stm32_usart_config_rs485(struct uart_port *port,
                                             rs485conf->delay_rts_after_send,
                                             baud);
 
-               if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+               if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
                        cr3 &= ~USART_CR3_DEP;
-                       rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
-               } else {
+               else
                        cr3 |= USART_CR3_DEP;
-                       rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
-               }
 
                writel_relaxed(cr3, port->membase + ofs->cr3);
                writel_relaxed(cr1, port->membase + ofs->cr1);
@@ -421,6 +417,14 @@ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
                stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
 }
 
+static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
+{
+       struct stm32_port *stm32_port = to_stm32_port(port);
+       const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+       stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
+}
+
 static void stm32_usart_rx_dma_complete(void *arg)
 {
        struct uart_port *port = arg;
@@ -446,6 +450,50 @@ static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
                stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
 }
 
+static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
+{
+       struct stm32_port *stm32_port = to_stm32_port(port);
+       const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+       stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
+}
+
+static void stm32_usart_rs485_rts_enable(struct uart_port *port)
+{
+       struct stm32_port *stm32_port = to_stm32_port(port);
+       struct serial_rs485 *rs485conf = &port->rs485;
+
+       if (stm32_port->hw_flow_control ||
+           !(rs485conf->flags & SER_RS485_ENABLED))
+               return;
+
+       if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+               mctrl_gpio_set(stm32_port->gpios,
+                              stm32_port->port.mctrl | TIOCM_RTS);
+       } else {
+               mctrl_gpio_set(stm32_port->gpios,
+                              stm32_port->port.mctrl & ~TIOCM_RTS);
+       }
+}
+
+static void stm32_usart_rs485_rts_disable(struct uart_port *port)
+{
+       struct stm32_port *stm32_port = to_stm32_port(port);
+       struct serial_rs485 *rs485conf = &port->rs485;
+
+       if (stm32_port->hw_flow_control ||
+           !(rs485conf->flags & SER_RS485_ENABLED))
+               return;
+
+       if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+               mctrl_gpio_set(stm32_port->gpios,
+                              stm32_port->port.mctrl & ~TIOCM_RTS);
+       } else {
+               mctrl_gpio_set(stm32_port->gpios,
+                              stm32_port->port.mctrl | TIOCM_RTS);
+       }
+}
+
 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
 {
        struct stm32_port *stm32_port = to_stm32_port(port);
@@ -553,6 +601,13 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
        u32 isr;
        int ret;
 
+       if (!stm32_port->hw_flow_control &&
+           port->rs485.flags & SER_RS485_ENABLED) {
+               stm32_port->txdone = false;
+               stm32_usart_tc_interrupt_disable(port);
+               stm32_usart_rs485_rts_enable(port);
+       }
+
        if (port->x_char) {
                if (stm32_usart_tx_dma_started(stm32_port) &&
                    stm32_usart_tx_dma_enabled(stm32_port))
@@ -593,8 +648,14 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
 
-       if (uart_circ_empty(xmit))
+       if (uart_circ_empty(xmit)) {
                stm32_usart_tx_interrupt_disable(port);
+               if (!stm32_port->hw_flow_control &&
+                   port->rs485.flags & SER_RS485_ENABLED) {
+                       stm32_port->txdone = true;
+                       stm32_usart_tc_interrupt_enable(port);
+               }
+       }
 }
 
 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
@@ -608,6 +669,13 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
 
        sr = readl_relaxed(port->membase + ofs->isr);
 
+       if (!stm32_port->hw_flow_control &&
+           port->rs485.flags & SER_RS485_ENABLED &&
+           (sr & USART_SR_TC)) {
+               stm32_usart_tc_interrupt_disable(port);
+               stm32_usart_rs485_rts_disable(port);
+       }
+
        if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
                writel_relaxed(USART_ICR_RTOCF,
                               port->membase + ofs->icr);
@@ -717,44 +785,27 @@ static void stm32_usart_disable_ms(struct uart_port *port)
 static void stm32_usart_stop_tx(struct uart_port *port)
 {
        struct stm32_port *stm32_port = to_stm32_port(port);
-       struct serial_rs485 *rs485conf = &port->rs485;
        const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
 
        stm32_usart_tx_interrupt_disable(port);
        if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
                stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
 
-       if (rs485conf->flags & SER_RS485_ENABLED) {
-               if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
-                       mctrl_gpio_set(stm32_port->gpios,
-                                       stm32_port->port.mctrl & ~TIOCM_RTS);
-               } else {
-                       mctrl_gpio_set(stm32_port->gpios,
-                                       stm32_port->port.mctrl | TIOCM_RTS);
-               }
-       }
+       stm32_usart_rs485_rts_disable(port);
 }
 
 /* There are probably characters waiting to be transmitted. */
 static void stm32_usart_start_tx(struct uart_port *port)
 {
-       struct stm32_port *stm32_port = to_stm32_port(port);
-       struct serial_rs485 *rs485conf = &port->rs485;
        struct circ_buf *xmit = &port->state->xmit;
 
-       if (uart_circ_empty(xmit) && !port->x_char)
+       if (uart_circ_empty(xmit) && !port->x_char) {
+               stm32_usart_rs485_rts_disable(port);
                return;
-
-       if (rs485conf->flags & SER_RS485_ENABLED) {
-               if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
-                       mctrl_gpio_set(stm32_port->gpios,
-                                       stm32_port->port.mctrl | TIOCM_RTS);
-               } else {
-                       mctrl_gpio_set(stm32_port->gpios,
-                                       stm32_port->port.mctrl & ~TIOCM_RTS);
-               }
        }
 
+       stm32_usart_rs485_rts_enable(port);
+
        stm32_usart_transmit_chars(port);
 }
 
@@ -1037,13 +1088,22 @@ static void stm32_usart_set_termios(struct uart_port *port,
         * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
         * M0 and M1 already cleared by cr1 initialization.
         */
-       if (bits == 9)
+       if (bits == 9) {
                cr1 |= USART_CR1_M0;
-       else if ((bits == 7) && cfg->has_7bits_data)
+       } else if ((bits == 7) && cfg->has_7bits_data) {
                cr1 |= USART_CR1_M1;
-       else if (bits != 8)
+       } else if (bits != 8) {
                dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
                        , bits);
+               cflag &= ~CSIZE;
+               cflag |= CS8;
+               termios->c_cflag = cflag;
+               bits = 8;
+               if (cflag & PARENB) {
+                       bits++;
+                       cr1 |= USART_CR1_M0;
+               }
+       }
 
        if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
                                       (stm32_port->fifoen &&
@@ -1222,6 +1282,33 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state,
        }
 }
 
+#if defined(CONFIG_CONSOLE_POLL)
+
+ /* Callbacks for characters polling in debug context (i.e. KGDB). */
+static int stm32_usart_poll_init(struct uart_port *port)
+{
+       struct stm32_port *stm32_port = to_stm32_port(port);
+
+       return clk_prepare_enable(stm32_port->clk);
+}
+
+static int stm32_usart_poll_get_char(struct uart_port *port)
+{
+       struct stm32_port *stm32_port = to_stm32_port(port);
+       const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+       if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
+               return NO_POLL_CHAR;
+
+       return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
+}
+
+static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+       stm32_usart_console_putchar(port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
 static const struct uart_ops stm32_uart_ops = {
        .tx_empty       = stm32_usart_tx_empty,
        .set_mctrl      = stm32_usart_set_mctrl,
@@ -1243,6 +1330,11 @@ static const struct uart_ops stm32_uart_ops = {
        .request_port   = stm32_usart_request_port,
        .config_port    = stm32_usart_config_port,
        .verify_port    = stm32_usart_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+       .poll_init      = stm32_usart_poll_init,
+       .poll_get_char  = stm32_usart_poll_get_char,
+       .poll_put_char  = stm32_usart_poll_put_char,
+#endif /* CONFIG_CONSOLE_POLL */
 };
 
 /*
@@ -1640,18 +1732,24 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_SERIAL_STM32_CONSOLE
-static void stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
+static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
 {
        struct stm32_port *stm32_port = to_stm32_port(port);
        const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+       u32 isr;
+       int ret;
 
-       while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
-               cpu_relax();
-
+       ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
+                                               (isr & USART_SR_TXE), 100,
+                                               STM32_USART_TIMEOUT_USEC);
+       if (ret != 0) {
+               dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
+               return;
+       }
        writel_relaxed(ch, port->membase + ofs->tdr);
 }
 
+#ifdef CONFIG_SERIAL_STM32_CONSOLE
 static void stm32_usart_console_write(struct console *co, const char *s,
                                      unsigned int cnt)
 {
@@ -1727,6 +1825,57 @@ static struct console stm32_console = {
 #define STM32_SERIAL_CONSOLE NULL
 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
 
+#ifdef CONFIG_SERIAL_EARLYCON
+static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
+{
+       struct stm32_usart_info *info = port->private_data;
+
+       while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
+               cpu_relax();
+
+       writel_relaxed(ch, port->membase + info->ofs.tdr);
+}
+
+static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
+{
+       struct earlycon_device *device = console->data;
+       struct uart_port *port = &device->port;
+
+       uart_console_write(port, s, count, early_stm32_usart_console_putchar);
+}
+
+static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
+{
+       if (!(device->port.membase || device->port.iobase))
+               return -ENODEV;
+       device->port.private_data = &stm32h7_info;
+       device->con->write = early_stm32_serial_write;
+       return 0;
+}
+
+static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
+{
+       if (!(device->port.membase || device->port.iobase))
+               return -ENODEV;
+       device->port.private_data = &stm32f7_info;
+       device->con->write = early_stm32_serial_write;
+       return 0;
+}
+
+static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
+{
+       if (!(device->port.membase || device->port.iobase))
+               return -ENODEV;
+       device->port.private_data = &stm32f4_info;
+       device->con->write = early_stm32_serial_write;
+       return 0;
+}
+
+OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
+OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
+OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
+#endif /* CONFIG_SERIAL_EARLYCON */
+
 static struct uart_driver stm32_usart_driver = {
        .driver_name    = DRIVER_NAME,
        .dev_name       = STM32_SERIAL_NAME,
index feab952..ee69c20 100644 (file)
@@ -251,6 +251,8 @@ struct stm32_usart_info stm32h7_info = {
 #define RX_BUF_P (RX_BUF_L / 2)         /* dma rx buffer period     */
 #define TX_BUF_L RX_BUF_L       /* dma tx buffer length     */
 
+#define STM32_USART_TIMEOUT_USEC USEC_PER_SEC /* 1s timeout in µs */
+
 struct stm32_port {
        struct uart_port port;
        struct clk *clk;
@@ -269,6 +271,7 @@ struct stm32_port {
        bool hw_flow_control;
        bool swap;               /* swap RX & TX pins */
        bool fifoen;
+       bool txdone;
        int rxftcfg;            /* RX FIFO threshold CFG      */
        int txftcfg;            /* TX FIFO threshold CFG      */
        bool wakeup_src;
index 9f15922..60c7366 100644 (file)
@@ -498,7 +498,7 @@ static const struct uart_ops sunplus_uart_ops = {
 };
 
 #ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE
-struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR];
+static struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR];
 
 static void sunplus_uart_console_putchar(struct uart_port *port,
                                         unsigned char ch)
index c313891..fff50b5 100644 (file)
@@ -798,10 +798,8 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
                cval |= UART_LCR_PARITY;
        if (!(cflag & PARODD))
                cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
        if (cflag & CMSPAR)
                cval |= UART_LCR_SPAR;
-#endif
 
        /*
         * Work around a bug in the Oxford Semiconductor 952 rev B
index 007db67..880e2af 100644 (file)
@@ -321,7 +321,8 @@ static void ulite_set_termios(struct uart_port *port, struct ktermios *termios,
        struct uartlite_data *pdata = port->private_data;
 
        /* Set termios to what the hardware supports */
-       termios->c_cflag &= ~(BRKINT | CSTOPB | PARENB | PARODD | CSIZE);
+       termios->c_iflag &= ~BRKINT;
+       termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CSIZE);
        termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
        tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
 
index 250a1d8..9e01fe6 100644 (file)
@@ -313,41 +313,27 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
 static void cdns_uart_handle_tx(void *dev_id)
 {
        struct uart_port *port = (struct uart_port *)dev_id;
+       struct circ_buf *xmit = &port->state->xmit;
        unsigned int numbytes;
 
-       if (uart_circ_empty(&port->state->xmit)) {
+       if (uart_circ_empty(xmit)) {
                writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
-       } else {
-               numbytes = port->fifosize;
-               while (numbytes && !uart_circ_empty(&port->state->xmit) &&
-                      !(readl(port->membase + CDNS_UART_SR) &
-                                               CDNS_UART_SR_TXFULL)) {
-                       /*
-                        * Get the data from the UART circular buffer
-                        * and write it to the cdns_uart's TX_FIFO
-                        * register.
-                        */
-                       writel(
-                               port->state->xmit.buf[port->state->xmit.tail],
-                                       port->membase + CDNS_UART_FIFO);
-
-                       port->icount.tx++;
-
-                       /*
-                        * Adjust the tail of the UART buffer and wrap
-                        * the buffer if it reaches limit.
-                        */
-                       port->state->xmit.tail =
-                               (port->state->xmit.tail + 1) &
-                                       (UART_XMIT_SIZE - 1);
-
-                       numbytes--;
-               }
+               return;
+       }
 
-               if (uart_circ_chars_pending(
-                               &port->state->xmit) < WAKEUP_CHARS)
-                       uart_write_wakeup(port);
+       numbytes = port->fifosize;
+       while (numbytes && !uart_circ_empty(xmit) &&
+              !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+
+               writel(xmit->buf[xmit->tail], port->membase + CDNS_UART_FIFO);
+
+               port->icount.tx++;
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               numbytes--;
        }
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
 }
 
 /**
index 70969bf..5bc5859 100644 (file)
@@ -981,7 +981,7 @@ static const char *zs_type(struct uart_port *uport)
 static void zs_release_port(struct uart_port *uport)
 {
        iounmap(uport->membase);
-       uport->membase = 0;
+       uport->membase = NULL;
        release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
 }
 
index 25c558e..9bc2a92 100644 (file)
@@ -1746,6 +1746,8 @@ static int hdlcdev_init(struct slgt_info *info)
  */
 static void hdlcdev_exit(struct slgt_info *info)
 {
+       if (!info->netdev)
+               return;
        unregister_hdlc_device(info->netdev);
        free_netdev(info->netdev);
        info->netdev = NULL;
index 2884cd6..18e6233 100644 (file)
@@ -232,8 +232,10 @@ static void showacpu(void *dummy)
        unsigned long flags;
 
        /* Idle CPUs have no interesting backtrace. */
-       if (idle_cpu(smp_processor_id()))
+       if (idle_cpu(smp_processor_id())) {
+               pr_info("CPU%d: backtrace skipped as idling\n", smp_processor_id());
                return;
+       }
 
        raw_spin_lock_irqsave(&show_lock, flags);
        pr_info("CPU%d:\n", smp_processor_id());
@@ -260,10 +262,13 @@ static void sysrq_handle_showallcpus(int key)
 
                if (in_hardirq())
                        regs = get_irq_regs();
-               if (regs) {
-                       pr_info("CPU%d:\n", smp_processor_id());
+
+               pr_info("CPU%d:\n", smp_processor_id());
+               if (regs)
                        show_regs(regs);
-               }
+               else
+                       show_stack(NULL, NULL, KERN_INFO);
+
                schedule_work(&sysrq_showallcpus);
        }
 }
@@ -274,6 +279,8 @@ static const struct sysrq_key_op sysrq_showallcpus_op = {
        .action_msg     = "Show backtrace of all active CPUs",
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
+#else
+#define sysrq_showallcpus_op (*(const struct sysrq_key_op *)NULL)
 #endif
 
 static void sysrq_handle_showregs(int key)
@@ -405,6 +412,7 @@ static const struct sysrq_key_op sysrq_moom_op = {
        .enable_mask    = SYSRQ_ENABLE_SIGNAL,
 };
 
+#ifdef CONFIG_BLOCK
 static void sysrq_handle_thaw(int key)
 {
        emergency_thaw_all();
@@ -415,6 +423,9 @@ static const struct sysrq_key_op sysrq_thaw_op = {
        .action_msg     = "Emergency Thaw of all frozen filesystems",
        .enable_mask    = SYSRQ_ENABLE_SIGNAL,
 };
+#else
+#define sysrq_thaw_op (*(const struct sysrq_key_op *)NULL)
+#endif
 
 static void sysrq_handle_kill(int key)
 {
@@ -468,17 +479,9 @@ static const struct sysrq_key_op *sysrq_key_table[62] = {
        NULL,                           /* g */
        NULL,                           /* h - reserved for help */
        &sysrq_kill_op,                 /* i */
-#ifdef CONFIG_BLOCK
        &sysrq_thaw_op,                 /* j */
-#else
-       NULL,                           /* j */
-#endif
        &sysrq_SAK_op,                  /* k */
-#ifdef CONFIG_SMP
        &sysrq_showallcpus_op,          /* l */
-#else
-       NULL,                           /* l */
-#endif
        &sysrq_showmem_op,              /* m */
        &sysrq_unrt_op,                 /* n */
        /* o: This will often be registered as 'Off' at init time */
index d903e11..3cd99ed 100644 (file)
@@ -61,11 +61,10 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
 
        cbaud = termios->c_cflag & CBAUD;
 
-#ifdef BOTHER
        /* Magic token for arbitrary speed via c_ispeed/c_ospeed */
        if (cbaud == BOTHER)
                return termios->c_ospeed;
-#endif
+
        if (cbaud & CBAUDEX) {
                cbaud &= ~CBAUDEX;
 
@@ -92,16 +91,15 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
 
 speed_t tty_termios_input_baud_rate(struct ktermios *termios)
 {
-#ifdef IBSHIFT
        unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
 
        if (cbaud == B0)
                return tty_termios_baud_rate(termios);
-#ifdef BOTHER
+
        /* Magic token for arbitrary speed via c_ispeed*/
        if (cbaud == BOTHER)
                return termios->c_ispeed;
-#endif
+
        if (cbaud & CBAUDEX) {
                cbaud &= ~CBAUDEX;
 
@@ -111,9 +109,6 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
                        cbaud += 15;
        }
        return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
-#else  /* IBSHIFT */
-       return tty_termios_baud_rate(termios);
-#endif /* IBSHIFT */
 }
 EXPORT_SYMBOL(tty_termios_input_baud_rate);
 
@@ -153,11 +148,9 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
        termios->c_ispeed = ibaud;
        termios->c_ospeed = obaud;
 
-#ifdef IBSHIFT
        if (((termios->c_cflag >> IBSHIFT) & CBAUD) != B0)
                ibinput = 1;    /* An input speed was specified */
-#endif
-#ifdef BOTHER
+
        /* If the user asked for a precise weird speed give a precise weird
         * answer. If they asked for a Bfoo speed they may have problems
         * digesting non-exact replies so fuzz a bit.
@@ -170,11 +163,9 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
        }
        if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
                iclose = 0;
-#endif
+
        termios->c_cflag &= ~CBAUD;
-#ifdef IBSHIFT
        termios->c_cflag &= ~(CBAUD << IBSHIFT);
-#endif
 
        /*
         *      Our goal is to find a close match to the standard baud rate
@@ -194,22 +185,16 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
                        /* For the case input == output don't set IBAUD bits
                         * if the user didn't do so.
                         */
-                       if (ofound == i && !ibinput)
+                       if (ofound == i && !ibinput) {
                                ifound  = i;
-#ifdef IBSHIFT
-                       else {
+                       } else {
                                ifound = i;
                                termios->c_cflag |= (baud_bits[i] << IBSHIFT);
                        }
-#endif
                }
        } while (++i < n_baud_table);
 
-       /*
-        *      If we found no match then use BOTHER if provided or warn
-        *      the user their platform maintainer needs to wake up if not.
-        */
-#ifdef BOTHER
+       /* If we found no match then use BOTHER. */
        if (ofound == -1)
                termios->c_cflag |= BOTHER;
        /* Set exact input bits only if the input and output differ or the
@@ -217,10 +202,6 @@ void tty_termios_encode_baud_rate(struct ktermios *termios,
         */
        if (ifound == -1 && (ibaud != obaud || ibinput))
                termios->c_cflag |= (BOTHER << IBSHIFT);
-#else
-       if (ifound == -1 || ofound == -1)
-               pr_warn_once("tty: Unable to return correct speed data as your architecture needs updating.\n");
-#endif
 }
 EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
 
index 6318192..adae687 100644 (file)
@@ -562,10 +562,8 @@ static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
        termios.c_cc[VKILL] = tmp.sg_kill;
        set_sgflags(&termios, tmp.sg_flags);
        /* Try and encode into Bfoo format */
-#ifdef BOTHER
        tty_termios_encode_baud_rate(&termios, termios.c_ispeed,
                                                termios.c_ospeed);
-#endif
        up_write(&tty->termios_rwsem);
        tty_set_termios(tty, &termios);
        return 0;
index 80b86a7..0d04287 100644 (file)
@@ -215,8 +215,8 @@ int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
                                spin_unlock_irq(&p->sighand->siglock);
                                continue;
                        }
-                       __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
-                       __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+                       send_signal_locked(SIGHUP, SEND_SIG_PRIV, p, PIDTYPE_TGID);
+                       send_signal_locked(SIGCONT, SEND_SIG_PRIV, p, PIDTYPE_TGID);
                        put_pid(p->signal->tty_old_pgrp);  /* A noop */
                        spin_lock(&tty->ctrl.lock);
                        tty_pgrp = get_pid(tty->ctrl.pgrp);
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig
new file mode 100644 (file)
index 0000000..90226f7
--- /dev/null
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# UFS subsystem configuration
+#
+
+menuconfig SCSI_UFSHCD
+       tristate "Universal Flash Storage Controller"
+       depends on SCSI && SCSI_DMA
+       select PM_DEVFREQ
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
+       select NLS
+       help
+         Enables support for UFS (Universal Flash Storage) host controllers.
+         A UFS host controller is an electronic component that is able to
+         communicate with a UFS card. UFS host controllers occur in
+         smartphones, laptops, digital cameras and also in cars.
+         The kernel module will be called ufshcd.
+
+         To compile this driver as a module, choose M here and read
+         <file:Documentation/scsi/ufs.rst>.
+         However, do not compile this as a module if your root file system
+         (the one containing the directory /) is located on a UFS device.
+
+if SCSI_UFSHCD
+
+source "drivers/ufs/core/Kconfig"
+
+source "drivers/ufs/host/Kconfig"
+
+endif
diff --git a/drivers/ufs/Makefile b/drivers/ufs/Makefile
new file mode 100644 (file)
index 0000000..5a199ef
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# The link order is important here. ufshcd-core must initialize
+# before vendor drivers.
+obj-$(CONFIG_SCSI_UFSHCD)      += core/ host/
diff --git a/drivers/ufs/core/Kconfig b/drivers/ufs/core/Kconfig
new file mode 100644 (file)
index 0000000..e119781
--- /dev/null
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Kernel configuration file for the UFS Host Controller core.
+#
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+#      Santosh Yaraganavi <santosh.sy@samsung.com>
+#      Vinayak Holikatti <h.vinayak@samsung.com>
+
+config SCSI_UFS_BSG
+       bool "Universal Flash Storage BSG device node"
+       select BLK_DEV_BSGLIB
+       help
+         Universal Flash Storage (UFS) is SCSI transport specification for
+         accessing flash storage on digital cameras, mobile phones and
+         consumer electronic devices.
+         A UFS controller communicates with a UFS device by exchanging
+         UFS Protocol Information Units (UPIUs).
+         UPIUs can not only be used as a transport layer for the SCSI protocol
+         but are also used by the UFS native command set.
+         This transport driver supports exchanging UFS protocol information units
+         with a UFS device. See also the ufshcd driver, which is a SCSI driver
+         that supports UFS devices.
+
+         Select this if you need a bsg device node for your UFS controller.
+         If unsure, say N.
+
+config SCSI_UFS_CRYPTO
+       bool "UFS Crypto Engine Support"
+       depends on BLK_INLINE_ENCRYPTION
+       help
+         Enable Crypto Engine Support in UFS.
+         Enabling this makes it possible for the kernel to use the crypto
+         capabilities of the UFS device (if present) to perform crypto
+         operations on data being transferred to/from the device.
+
+config SCSI_UFS_HPB
+       bool "Support UFS Host Performance Booster"
+       help
+         The UFS HPB feature improves random read performance. It caches
+         L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
+         read command by piggybacking physical page number for bypassing FTL (flash
+         translation layer)'s L2P address translation.
+
+config SCSI_UFS_FAULT_INJECTION
+       bool "UFS Fault Injection Support"
+       depends on FAULT_INJECTION
+       help
+         Enable fault injection support in the UFS driver. This makes it easier
+         to test the UFS error handler and abort handler.
+
+config SCSI_UFS_HWMON
+       bool "UFS Temperature Notification"
+       depends on SCSI_UFSHCD=HWMON || HWMON=y
+       help
+         This provides support for UFS hardware monitoring. If enabled,
+         a hardware monitoring device will be created for the UFS device.
+
+         If unsure, say N.
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
new file mode 100644 (file)
index 0000000..62f38c5
--- /dev/null
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SCSI_UFSHCD)              += ufshcd-core.o
+ufshcd-core-y                          += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_DEBUG_FS)         += ufs-debugfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG)     += ufs_bsg.o
+ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO)  += ufshcd-crypto.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HPB)     += ufshpb.o
+ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HWMON)   += ufs-hwmon.o
similarity index 99%
rename from drivers/scsi/ufs/ufs-debugfs.c
rename to drivers/ufs/core/ufs-debugfs.c
index c10a8f0..e3baed6 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/debugfs.h>
 
 #include "ufs-debugfs.h"
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-priv.h"
 
 static struct dentry *ufs_debugfs_root;
similarity index 99%
rename from drivers/scsi/ufs/ufs-hwmon.c
rename to drivers/ufs/core/ufs-hwmon.c
index c38d9d9..4c6a872 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/hwmon.h>
 #include <linux/units.h>
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-priv.h"
 
 struct ufs_hwmon_data {
similarity index 99%
rename from drivers/scsi/ufs/ufs-sysfs.c
rename to drivers/ufs/core/ufs-sysfs.c
index 8a3c644..0a088b4 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/bitfield.h>
 #include <asm/unaligned.h>
 
-#include "ufs.h"
+#include <ufs/ufs.h>
 #include "ufs-sysfs.h"
 #include "ufshcd-priv.h"
 
similarity index 99%
rename from drivers/scsi/ufs/ufs_bsg.c
rename to drivers/ufs/core/ufs_bsg.c
index 9e9b938..b99e3f3 100644 (file)
@@ -9,7 +9,7 @@
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include "ufs_bsg.h"
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-priv.h"
 
 static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
similarity index 99%
rename from drivers/scsi/ufs/ufshcd-crypto.c
rename to drivers/ufs/core/ufshcd-crypto.c
index 67402ba..198360f 100644 (file)
@@ -3,7 +3,7 @@
  * Copyright 2019 Google LLC
  */
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-crypto.h"
 
 /* Blk-crypto modes supported by UFS crypto */
similarity index 97%
rename from drivers/scsi/ufs/ufshcd-crypto.h
rename to drivers/ufs/core/ufshcd-crypto.h
index 9f98f18..504cc84 100644 (file)
@@ -7,9 +7,9 @@
 #define _UFSHCD_CRYPTO_H
 
 #include <scsi/scsi_cmnd.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-priv.h"
-#include "ufshci.h"
+#include <ufs/ufshci.h>
 
 #ifdef CONFIG_SCSI_UFS_CRYPTO
 
similarity index 99%
rename from drivers/scsi/ufs/ufshcd-priv.h
rename to drivers/ufs/core/ufshcd-priv.h
index 38bc77d..ffb01fc 100644 (file)
@@ -4,7 +4,7 @@
 #define _UFSHCD_PRIV_H_
 
 #include <linux/pm_runtime.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 
 static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
 {
similarity index 99%
rename from drivers/scsi/ufs/ufshcd.c
rename to drivers/ufs/core/ufshcd.c
index 1fb3a8b..01fb4ba 100644 (file)
@@ -26,8 +26,8 @@
 #include <scsi/scsi_driver.h>
 #include <scsi/scsi_eh.h>
 #include "ufshcd-priv.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
+#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
 #include "ufs-sysfs.h"
 #include "ufs-debugfs.h"
 #include "ufs-fault-injection.h"
@@ -8445,10 +8445,7 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
 {
        struct ufs_vreg_info *info = &hba->vreg_info;
 
-       if (info)
-               return ufshcd_get_vreg(hba->dev, info->vdd_hba);
-
-       return 0;
+       return ufshcd_get_vreg(hba->dev, info->vdd_hba);
 }
 
 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
similarity index 99%
rename from drivers/scsi/ufs/ufshpb.c
rename to drivers/ufs/core/ufshpb.c
index 8882b47..de2bb84 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "ufshcd-priv.h"
 #include "ufshpb.h"
-#include "../sd.h"
+#include "../../scsi/sd.h"
 
 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
 #define READ_TO_MS 1000
@@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
 
        req->timeout = 0;
        req->end_io_data = umap_req;
+       req->end_io = ufshpb_umap_req_compl_fn;
 
        ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
        scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
 
-       blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
+       blk_execute_rq_nowait(req, true);
 
        hpb->stats.umap_req_cnt++;
 }
@@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
        blk_rq_append_bio(req, map_req->bio);
 
        req->end_io_data = map_req;
+       req->end_io = ufshpb_map_req_compl_fn;
 
        if (unlikely(last))
                mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
@@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
                                map_req->rb.srgn_idx, mem_size);
        scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
 
-       blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
+       blk_execute_rq_nowait(req, true);
 
        hpb->stats.map_req_cnt++;
        return 0;
similarity index 56%
rename from drivers/scsi/ufs/Kconfig
rename to drivers/ufs/host/Kconfig
index 393b9a0..8259022 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0+
 #
-# Kernel configuration file for the UFS Host Controller
+# Kernel configuration file for the UFS host controller drivers.
 #
 # Copyright (C) 2011-2013 Samsung India Software Operations
 #
@@ -8,26 +8,6 @@
 #      Santosh Yaraganavi <santosh.sy@samsung.com>
 #      Vinayak Holikatti <h.vinayak@samsung.com>
 
-config SCSI_UFSHCD
-       tristate "Universal Flash Storage Controller Driver Core"
-       depends on SCSI && SCSI_DMA
-       select PM_DEVFREQ
-       select DEVFREQ_GOV_SIMPLE_ONDEMAND
-       select NLS
-       help
-         This selects the support for UFS devices in Linux, say Y and make
-         sure that you know the name of your UFS host adapter (the card
-         inside your computer that "speaks" the UFS protocol, also
-         called UFS Host Controller), because you will be asked for it.
-         The module will be called ufshcd.
-
-         To compile this driver as a module, choose M here and read
-         <file:Documentation/scsi/ufs.rst>.
-         However, do not compile this as a module if your root file system
-         (the one containing the directory /) is located on a UFS device.
-
-if SCSI_UFSHCD
-
 config SCSI_UFSHCD_PCI
        tristate "PCI bus based UFS Controller support"
        depends on PCI
@@ -122,24 +102,6 @@ config SCSI_UFS_TI_J721E
          Selects this if you have TI platform with UFS controller.
          If unsure, say N.
 
-config SCSI_UFS_BSG
-       bool "Universal Flash Storage BSG device node"
-       select BLK_DEV_BSGLIB
-       help
-         Universal Flash Storage (UFS) is SCSI transport specification for
-         accessing flash storage on digital cameras, mobile phones and
-         consumer electronic devices.
-         A UFS controller communicates with a UFS device by exchanging
-         UFS Protocol Information Units (UPIUs).
-         UPIUs can not only be used as a transport layer for the SCSI protocol
-         but are also used by the UFS native command set.
-         This transport driver supports exchanging UFS protocol information units
-         with a UFS device. See also the ufshcd driver, which is a SCSI driver
-         that supports UFS devices.
-
-         Select this if you need a bsg device node for your UFS controller.
-         If unsure, say N.
-
 config SCSI_UFS_EXYNOS
        tristate "Exynos specific hooks to UFS controller platform driver"
        depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
@@ -150,38 +112,3 @@ config SCSI_UFS_EXYNOS
 
          Select this if you have UFS host controller on Samsung Exynos SoC.
          If unsure, say N.
-
-config SCSI_UFS_CRYPTO
-       bool "UFS Crypto Engine Support"
-       depends on BLK_INLINE_ENCRYPTION
-       help
-         Enable Crypto Engine Support in UFS.
-         Enabling this makes it possible for the kernel to use the crypto
-         capabilities of the UFS device (if present) to perform crypto
-         operations on data being transferred to/from the device.
-
-config SCSI_UFS_HPB
-       bool "Support UFS Host Performance Booster"
-       help
-         The UFS HPB feature improves random read performance. It caches
-         L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
-         read command by piggybacking physical page number for bypassing FTL (flash
-         translation layer)'s L2P address translation.
-
-config SCSI_UFS_FAULT_INJECTION
-       bool "UFS Fault Injection Support"
-       depends on FAULT_INJECTION
-       help
-         Enable fault injection support in the UFS driver. This makes it easier
-         to test the UFS error handler and abort handler.
-
-config SCSI_UFS_HWMON
-       bool "UFS Temperature Notification"
-       depends on SCSI_UFSHCD=HWMON || HWMON=y
-       help
-         This provides support for UFS hardware monitoring. If enabled,
-         a hardware monitoring device will be created for the UFS device.
-
-         If unsure, say N.
-
-endif
similarity index 56%
rename from drivers/scsi/ufs/Makefile
rename to drivers/ufs/host/Makefile
index 9660488..e4be542 100644 (file)
@@ -1,16 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-# UFSHCD makefile
-
-# The link order is important here. ufshcd-core must initialize
-# before vendor drivers.
-obj-$(CONFIG_SCSI_UFSHCD)              += ufshcd-core.o
-ufshcd-core-y                          += ufshcd.o ufs-sysfs.o
-ufshcd-core-$(CONFIG_DEBUG_FS)         += ufs-debugfs.o
-ufshcd-core-$(CONFIG_SCSI_UFS_BSG)     += ufs_bsg.o
-ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO)  += ufshcd-crypto.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HPB)     += ufshpb.o
-ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
 
 obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
 obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
similarity index 99%
rename from drivers/scsi/ufs/tc-dwc-g210-pci.c
rename to drivers/ufs/host/tc-dwc-g210-pci.c
index e635c21..92b8ad4 100644 (file)
@@ -7,7 +7,7 @@
  * Authors: Joao Pinto <jpinto@synopsys.com>
  */
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-dwc.h"
 #include "tc-dwc-g210.h"
 
similarity index 99%
rename from drivers/scsi/ufs/tc-dwc-g210.c
rename to drivers/ufs/host/tc-dwc-g210.c
index 7ef67c9..deb93db 100644 (file)
@@ -9,8 +9,8 @@
 
 #include <linux/module.h>
 
-#include "ufshcd.h"
-#include "unipro.h"
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
 
 #include "ufshcd-dwc.h"
 #include "ufshci-dwc.h"
similarity index 99%
rename from drivers/scsi/ufs/ufs-exynos.c
rename to drivers/ufs/host/ufs-exynos.c
index ddb2d42..a81d8cb 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-pltfrm.h"
-#include "ufshci.h"
-#include "unipro.h"
+#include <ufs/ufshci.h>
+#include <ufs/unipro.h>
 
 #include "ufs-exynos.h"
 
similarity index 99%
rename from drivers/scsi/ufs/ufs-hisi.c
rename to drivers/ufs/host/ufs-hisi.c
index 7046143..2eed13b 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
 #include "ufs-hisi.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
 
 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
 {
similarity index 93%
rename from drivers/scsi/ufs/ufs-mediatek-trace.h
rename to drivers/ufs/host/ufs-mediatek-trace.h
index 895e82e..7e01084 100644 (file)
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
 
 #undef TRACE_INCLUDE_PATH
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
+#define TRACE_INCLUDE_PATH ../../drivers/ufs/host
 #define TRACE_INCLUDE_FILE ufs-mediatek-trace
 #include <trace/define_trace.h>
similarity index 99%
rename from drivers/scsi/ufs/ufs-mediatek.c
rename to drivers/ufs/host/ufs-mediatek.c
index 083d6bd..beabc3c 100644 (file)
 #include <linux/sched/clock.h>
 #include <linux/soc/mediatek/mtk_sip_svc.h>
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-pltfrm.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
+#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
 #include "ufs-mediatek.h"
 
 #define CREATE_TRACE_POINTS
similarity index 99%
rename from drivers/scsi/ufs/ufs-qcom.c
rename to drivers/ufs/host/ufs-qcom.c
index 4dcb232..f10d466 100644 (file)
 #include <linux/reset-controller.h>
 #include <linux/devfreq.h>
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
 #include "ufs-qcom.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
+#include <ufs/ufshci.h>
+#include <ufs/ufs_quirks.h>
 
 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN  \
        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
similarity index 99%
rename from drivers/scsi/ufs/ufs-qcom.h
rename to drivers/ufs/host/ufs-qcom.h
index 771bc95..44466a3 100644 (file)
@@ -7,7 +7,7 @@
 
 #include <linux/reset-controller.h>
 #include <linux/reset.h>
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 
 #define MAX_UFS_QCOM_HOSTS     1
 #define MAX_U32                 (~(u32)0)
similarity index 98%
rename from drivers/scsi/ufs/ufshcd-dwc.c
rename to drivers/ufs/host/ufshcd-dwc.c
index a57973c..e28a67e 100644 (file)
@@ -9,8 +9,8 @@
 
 #include <linux/module.h>
 
-#include "ufshcd.h"
-#include "unipro.h"
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
 
 #include "ufshcd-dwc.h"
 #include "ufshci-dwc.h"
similarity index 95%
rename from drivers/scsi/ufs/ufshcd-dwc.h
rename to drivers/ufs/host/ufshcd-dwc.h
index 43b7079..ad91ea5 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef _UFSHCD_DWC_H
 #define _UFSHCD_DWC_H
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 
 struct ufshcd_dme_attr_val {
        u32 attr_sel;
similarity index 99%
rename from drivers/scsi/ufs/ufshcd-pci.c
rename to drivers/ufs/host/ufshcd-pci.c
index 20af2fb..04166bd 100644 (file)
@@ -9,7 +9,7 @@
  *     Vinayak Holikatti <h.vinayak@samsung.com>
  */
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/pci.h>
similarity index 99%
rename from drivers/scsi/ufs/ufshcd-pltfrm.c
rename to drivers/ufs/host/ufshcd-pltfrm.c
index f5313f4..e7332cc 100644 (file)
@@ -13,9 +13,9 @@
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 #include "ufshcd-pltfrm.h"
-#include "unipro.h"
+#include <ufs/unipro.h>
 
 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION             2
 
similarity index 98%
rename from drivers/scsi/ufs/ufshcd-pltfrm.h
rename to drivers/ufs/host/ufshcd-pltfrm.h
index c33e28a..43c2e41 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef UFSHCD_PLTFRM_H_
 #define UFSHCD_PLTFRM_H_
 
-#include "ufshcd.h"
+#include <ufs/ufshcd.h>
 
 #define UFS_PWM_MODE 1
 #define UFS_HS_MODE  2
index 89c0fc7..8f39cc8 100644 (file)
@@ -45,9 +45,11 @@ static int uio_dfl_probe(struct dfl_device *ddev)
 }
 
 #define FME_FEATURE_ID_ETH_GROUP       0x10
+#define FME_FEATURE_ID_HSSI_SUBSYS     0x15
 
 static const struct dfl_device_id uio_dfl_ids[] = {
        { FME_ID, FME_FEATURE_ID_ETH_GROUP },
+       { FME_ID, FME_FEATURE_ID_HSSI_SUBSYS },
        { }
 };
 MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
index e3a49d8..3622171 100644 (file)
@@ -1091,7 +1091,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
                        snd_buf_bytes - (snd_buf_bytes % instance->tx_channel.stride));
 
        /* rx buffer size must be a positive multiple of the endpoint maxpacket */
-       maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint, 0);
+       maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint);
 
        if ((maxpacket < 1) || (maxpacket > UDSL_MAX_BUF_SIZE)) {
                dev_err(dev, "%s: invalid endpoint %02x!\n", __func__,
index 53838e7..6db5cb1 100644 (file)
@@ -189,14 +189,12 @@ static int c67x00_drv_remove(struct platform_device *pdev)
        c67x00_ll_release(c67x00);
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (res)
-               free_irq(res->start, c67x00);
+       free_irq(res->start, c67x00);
 
        iounmap(c67x00->hpi.base);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res)
-               release_mem_region(res->start, resource_size(res));
+       release_mem_region(res->start, resource_size(res));
 
        kfree(c67x00);
 
index c7d3e90..a09fa68 100644 (file)
@@ -655,7 +655,7 @@ static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
                               usb_pipeout(urb->pipe));
        remaining = urb->transfer_buffer_length - urb->actual_length;
 
-       maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+       maxps = usb_maxpacket(urb->dev, urb->pipe);
 
        need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
            usb_pipeout(urb->pipe) && !(remaining % maxps);
@@ -866,7 +866,7 @@ static inline int c67x00_end_of_data(struct c67x00_td *td)
        if (unlikely(!act_bytes))
                return 1;       /* This was an empty packet */
 
-       maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
+       maxps = usb_maxpacket(td_udev(td), td->pipe);
 
        if (unlikely(act_bytes < maxps))
                return 1;       /* Smaller then full packet */
index d6d515d..5c15c48 100644 (file)
@@ -2038,7 +2038,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        u8 mult = 0;
        int ret;
 
-       buffering = CDNS3_EP_BUF_SIZE - 1;
+       buffering = priv_dev->ep_buf_size - 1;
 
        cdns3_configure_dmult(priv_dev, priv_ep);
 
@@ -2057,7 +2057,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
                break;
        default:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
-               mult = CDNS3_EP_ISO_HS_MULT - 1;
+               mult = priv_dev->ep_iso_burst - 1;
                buffering = mult + 1;
        }
 
@@ -2073,14 +2073,14 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
                mult = 0;
                max_packet_size = 1024;
                if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
-                       maxburst = CDNS3_EP_ISO_SS_BURST - 1;
+                       maxburst = priv_dev->ep_iso_burst - 1;
                        buffering = (mult + 1) *
                                    (maxburst + 1);
 
                        if (priv_ep->interval > 1)
                                buffering++;
                } else {
-                       maxburst = CDNS3_EP_BUF_SIZE - 1;
+                       maxburst = priv_dev->ep_buf_size - 1;
                }
                break;
        default:
@@ -2095,6 +2095,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        else
                priv_ep->trb_burst_size = 16;
 
+       mult = min_t(u8, mult, EP_CFG_MULT_MAX);
+       buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
+       maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+
        /* onchip buffer is only allocated before configuration */
        if (!priv_dev->hw_configured_flag) {
                ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
@@ -2961,6 +2965,40 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
        return 0;
 }
 
+/**
+ * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration
+ * @gadget: pointer to the USB gadget
+ *
+ * Used to record the maximum number of endpoints being used in a USB composite
+ * device. (across all configurations)  This is to be used in the calculation
+ * of the TXFIFO sizes when resizing internal memory for individual endpoints.
+ * It will help ensured that the resizing logic reserves enough space for at
+ * least one max packet.
+ */
+static int cdns3_gadget_check_config(struct usb_gadget *gadget)
+{
+       struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+       struct usb_ep *ep;
+       int n_in = 0;
+       int total;
+
+       list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+               if (ep->claimed && (ep->address & USB_DIR_IN))
+                       n_in++;
+       }
+
+       /* 2KB are reserved for EP0, 1KB for out*/
+       total = 2 + n_in + 1;
+
+       if (total > priv_dev->onchip_buffers)
+               return -ENOMEM;
+
+       priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
+                       (priv_dev->onchip_buffers - 2) / (n_in + 1);
+
+       return 0;
+}
+
 static const struct usb_gadget_ops cdns3_gadget_ops = {
        .get_frame = cdns3_gadget_get_frame,
        .wakeup = cdns3_gadget_wakeup,
@@ -2969,6 +3007,7 @@ static const struct usb_gadget_ops cdns3_gadget_ops = {
        .udc_start = cdns3_gadget_udc_start,
        .udc_stop = cdns3_gadget_udc_stop,
        .match_ep = cdns3_gadget_match_ep,
+       .check_config = cdns3_gadget_check_config,
 };
 
 static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
index c5660f2..fbe4a8e 100644 (file)
@@ -562,15 +562,18 @@ struct cdns3_usb_regs {
 /* Max burst size (used only in SS mode). */
 #define EP_CFG_MAXBURST_MASK   GENMASK(11, 8)
 #define EP_CFG_MAXBURST(p)     (((p) << 8) & EP_CFG_MAXBURST_MASK)
+#define EP_CFG_MAXBURST_MAX    15
 /* ISO max burst. */
 #define EP_CFG_MULT_MASK       GENMASK(15, 14)
 #define EP_CFG_MULT(p)         (((p) << 14) & EP_CFG_MULT_MASK)
+#define EP_CFG_MULT_MAX                2
 /* ISO max burst. */
 #define EP_CFG_MAXPKTSIZE_MASK GENMASK(26, 16)
 #define EP_CFG_MAXPKTSIZE(p)   (((p) << 16) & EP_CFG_MAXPKTSIZE_MASK)
 /* Max number of buffered packets. */
 #define EP_CFG_BUFFERING_MASK  GENMASK(31, 27)
 #define EP_CFG_BUFFERING(p)    (((p) << 27) & EP_CFG_BUFFERING_MASK)
+#define EP_CFG_BUFFERING_MAX   15
 
 /* EP_CMD - bitmasks */
 /* Endpoint reset. */
@@ -1094,9 +1097,6 @@ struct cdns3_trb {
 #define CDNS3_ENDPOINTS_MAX_COUNT      32
 #define CDNS3_EP_ZLP_BUF_SIZE          1024
 
-#define CDNS3_EP_BUF_SIZE              4       /* KB */
-#define CDNS3_EP_ISO_HS_MULT           3
-#define CDNS3_EP_ISO_SS_BURST          3
 #define CDNS3_MAX_NUM_DESCMISS_BUF     32
 #define CDNS3_DESCMIS_BUF_SIZE         2048    /* Bytes */
 #define CDNS3_WA2_NUM_BUFFERS          128
@@ -1333,6 +1333,9 @@ struct cdns3_device {
        /*in KB */
        u16                             onchip_buffers;
        u16                             onchip_used_size;
+
+       u16                             ep_buf_size;
+       u16                             ep_iso_burst;
 };
 
 void cdns3_set_register_bit(void __iomem *ptr, u32 mask);
index 3aa7f0a..d26ecd1 100644 (file)
@@ -8,14 +8,6 @@
  */
 
 /*
- * CMSPAR, some architectures can't have space and mark parity.
- */
-
-#ifndef CMSPAR
-#define CMSPAR                 0
-#endif
-
-/*
  * Major and minor numbers.
  */
 
index d8b0041..2c14a96 100644 (file)
@@ -228,8 +228,6 @@ static char *usb_dump_interface(int speed, char *start, char *end,
 
        start = usb_dump_interface_descriptor(start, end, intfc, iface, setno);
        for (i = 0; i < desc->desc.bNumEndpoints; i++) {
-               if (start > end)
-                       return start;
                start = usb_dump_endpoint_descriptor(speed,
                                start, end, &desc->endpoint[i].desc);
        }
@@ -302,8 +300,6 @@ static char *usb_dump_config(int speed, char *start, char *end,
                intfc = config->intf_cache[i];
                interface = config->interface[i];
                for (j = 0; j < intfc->num_altsetting; j++) {
-                       if (start > end)
-                               return start;
                        start = usb_dump_interface(speed,
                                start, end, intfc, interface, j);
                }
@@ -369,19 +365,11 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
 {
        int i;
 
-       if (start > end)
-               return start;
-
        start = usb_dump_device_descriptor(start, end, &dev->descriptor);
 
-       if (start > end)
-               return start;
-
        start = usb_dump_device_strings(start, end, dev);
 
        for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
-               if (start > end)
-                       return start;
                start = usb_dump_config(dev->speed,
                                start, end, dev->config + i,
                                /* active ? */
@@ -390,41 +378,6 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
        return start;
 }
 
-
-#ifdef PROC_EXTRA /* TBD: may want to add this code later */
-
-static char *usb_dump_hub_descriptor(char *start, char *end,
-                                    const struct usb_hub_descriptor *desc)
-{
-       int leng = USB_DT_HUB_NONVAR_SIZE;
-       unsigned char *ptr = (unsigned char *)desc;
-
-       if (start > end)
-               return start;
-       start += sprintf(start, "Interface:");
-       while (leng && start <= end) {
-               start += sprintf(start, " %02x", *ptr);
-               ptr++; leng--;
-       }
-       *start++ = '\n';
-       return start;
-}
-
-static char *usb_dump_string(char *start, char *end,
-                            const struct usb_device *dev, char *id, int index)
-{
-       if (start > end)
-               return start;
-       start += sprintf(start, "Interface:");
-       if (index <= dev->maxstring && dev->stringindex &&
-           dev->stringindex[index])
-               start += sprintf(start, "%s: %.100s ", id,
-                                dev->stringindex[index]);
-       return start;
-}
-
-#endif /* PROC_EXTRA */
-
 /*****************************************************************/
 
 /* This is a recursive function. Parameters:
index 355ed33..b87452e 100644 (file)
@@ -1533,22 +1533,23 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
 {
        int     w;
 
-       /* Remote wakeup is needed only when we actually go to sleep.
-        * For things like FREEZE and QUIESCE, if the device is already
-        * autosuspended then its current wakeup setting is okay.
+       /*
+        * For FREEZE/QUIESCE, disable remote wakeups so no interrupts get
+        * generated.
         */
        if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
-               if (udev->state != USB_STATE_SUSPENDED)
-                       udev->do_remote_wakeup = 0;
-               return;
-       }
+               w = 0;
 
-       /* Enable remote wakeup if it is allowed, even if no interface drivers
-        * actually want it.
-        */
-       w = device_may_wakeup(&udev->dev);
+       } else {
+               /*
+                * Enable remote wakeup if it is allowed, even if no interface
+                * drivers actually want it.
+                */
+               w = device_may_wakeup(&udev->dev);
+       }
 
-       /* If the device is autosuspended with the wrong wakeup setting,
+       /*
+        * If the device is autosuspended with the wrong wakeup setting,
         * autoresume now so the setting can be changed.
         */
        if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
index 8176bc8..482dae7 100644 (file)
@@ -15,7 +15,6 @@
 #ifdef CONFIG_PPC_PMAC
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
-#include <asm/prom.h>
 #endif
 
 #include "usb.h"
@@ -616,10 +615,10 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
        .suspend_noirq  = hcd_pci_suspend_noirq,
        .resume_noirq   = hcd_pci_resume_noirq,
        .resume         = hcd_pci_resume,
-       .freeze         = check_root_hub_suspended,
+       .freeze         = hcd_pci_suspend,
        .freeze_noirq   = check_root_hub_suspended,
        .thaw_noirq     = NULL,
-       .thaw           = NULL,
+       .thaw           = hcd_pci_resume,
        .poweroff       = hcd_pci_suspend,
        .poweroff_noirq = hcd_pci_suspend_noirq,
        .restore_noirq  = hcd_pci_resume_noirq,
index d9712c2..06eea88 100644 (file)
@@ -2816,6 +2816,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
 {
        int retval;
        struct usb_device *rhdev;
+       struct usb_hcd *shared_hcd;
 
        if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
                hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2976,13 +2977,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
                goto err_hcd_driver_start;
        }
 
+       /* starting here, usbcore will pay attention to the shared HCD roothub */
+       shared_hcd = hcd->shared_hcd;
+       if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+               retval = register_root_hub(shared_hcd);
+               if (retval != 0)
+                       goto err_register_root_hub;
+
+               if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+                       usb_hcd_poll_rh_status(shared_hcd);
+       }
+
        /* starting here, usbcore will pay attention to this root hub */
-       retval = register_root_hub(hcd);
-       if (retval != 0)
-               goto err_register_root_hub;
+       if (!HCD_DEFER_RH_REGISTER(hcd)) {
+               retval = register_root_hub(hcd);
+               if (retval != 0)
+                       goto err_register_root_hub;
 
-       if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
-               usb_hcd_poll_rh_status(hcd);
+               if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+                       usb_hcd_poll_rh_status(hcd);
+       }
 
        return retval;
 
@@ -3020,6 +3034,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
 void usb_remove_hcd(struct usb_hcd *hcd)
 {
        struct usb_device *rhdev = hcd->self.root_hub;
+       bool rh_registered;
 
        dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
 
@@ -3030,6 +3045,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
 
        dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
        spin_lock_irq (&hcd_root_hub_lock);
+       rh_registered = hcd->rh_registered;
        hcd->rh_registered = 0;
        spin_unlock_irq (&hcd_root_hub_lock);
 
@@ -3039,7 +3055,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
        cancel_work_sync(&hcd->died_work);
 
        mutex_lock(&usb_bus_idr_lock);
-       usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
+       if (rh_registered)
+               usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
        mutex_unlock(&usb_bus_idr_lock);
 
        /*
index 1460857..68e9121 100644 (file)
@@ -1635,7 +1635,7 @@ static int hub_configure(struct usb_hub *hub,
         * maxpktsize is defined in hcd.c's fake endpoint descriptors
         * to be big enough for at least USB_MAXCHILDREN ports. */
        pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(hdev, pipe);
 
        if (maxp > sizeof(*hub->buffer))
                maxp = sizeof(*hub->buffer);
@@ -5511,7 +5511,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
 /* Handle notifying userspace about hub over-current events */
 static void port_over_current_notify(struct usb_port *port_dev)
 {
-       char *envp[3];
+       char *envp[3] = { NULL, NULL, NULL };
        struct device *hub_dev;
        char *port_dev_path;
 
@@ -5528,20 +5528,18 @@ static void port_over_current_notify(struct usb_port *port_dev)
 
        envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
        if (!envp[0])
-               goto exit_path;
+               goto exit;
 
        envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
                        port_dev->over_current_count);
        if (!envp[1])
                goto exit;
 
-       envp[2] = NULL;
        kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
 
-       kfree(envp[1]);
 exit:
+       kfree(envp[1]);
        kfree(envp[0]);
-exit_path:
        kfree(port_dev_path);
 }
 
index 97b44a6..f99a65a 100644 (file)
@@ -510,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* DELL USB GEN2 */
+       { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
+
        /* VCOM device */
        { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
 
index bb1da35..d4dcaef 100644 (file)
@@ -205,8 +205,11 @@ usb_acpi_find_companion_for_device(struct usb_device *udev)
        struct usb_hub *hub;
 
        if (!udev->parent) {
-               /* root hub is only child (_ADR=0) under its parent, the HC */
-               adev = ACPI_COMPANION(udev->dev.parent);
+               /*
+                * root hub is only child (_ADR=0) under its parent, the HC.
+                * sysdev pointer is the HC as seen from firmware.
+                */
+               adev = ACPI_COMPANION(udev->bus->sysdev);
                return acpi_find_child_device(adev, 0, false);
        }
 
index cf0bcd0..dc4fc72 100644 (file)
@@ -1153,6 +1153,7 @@ static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
 int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 {
        u32 usbcfg;
+       u32 otgctl;
        int retval = 0;
 
        if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -1187,6 +1188,14 @@ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
                dwc2_writel(hsotg, usbcfg, GUSBCFG);
        }
 
+       if (!hsotg->params.activate_ingenic_overcurrent_detection) {
+               if (dwc2_is_host_mode(hsotg)) {
+                       otgctl = readl(hsotg->regs + GOTGCTL);
+                       otgctl |= GOTGCTL_VBVALOEN | GOTGCTL_VBVALOVAL;
+                       writel(otgctl, hsotg->regs + GOTGCTL);
+               }
+       }
+
        return retval;
 }
 
index 88c337b..0683852 100644 (file)
@@ -426,6 +426,10 @@ enum dwc2_ep0_state {
  *                     detection using GGPIO register.
  *                     0 - Deactivate the external level detection (default)
  *                     1 - Activate the external level detection
+ * @activate_ingenic_overcurrent_detection: Activate Ingenic overcurrent
+ *                     detection.
+ *                     0 - Deactivate the overcurrent detection
+ *                     1 - Activate the overcurrent detection (default)
  * @g_dma:              Enables gadget dma usage (default: autodetect).
  * @g_dma_desc:         Enables gadget descriptor DMA (default: autodetect).
  * @g_rx_fifo_size:    The periodic rx fifo size for the device, in
@@ -494,6 +498,7 @@ struct dwc2_core_params {
        u8 hird_threshold;
        bool activate_stm_fs_transceiver;
        bool activate_stm_id_vb_detection;
+       bool activate_ingenic_overcurrent_detection;
        bool ipg_isoc_en;
        u16 max_packet_count;
        u32 max_transfer_size;
index eee3504..fe2a58c 100644 (file)
@@ -4544,7 +4544,6 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
 
        WARN_ON(hsotg->driver);
 
-       driver->driver.bus = NULL;
        hsotg->driver = driver;
        hsotg->gadget.dev.of_node = hsotg->dev->of_node;
        hsotg->gadget.speed = USB_SPEED_UNKNOWN;
index 1306f4e..fdb8a42 100644 (file)
@@ -73,6 +73,47 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
        p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
 }
 
+static void dwc2_set_jz4775_params(struct dwc2_hsotg *hsotg)
+{
+       struct dwc2_core_params *p = &hsotg->params;
+
+       p->otg_caps.hnp_support = false;
+       p->speed = DWC2_SPEED_PARAM_HIGH;
+       p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+       p->phy_utmi_width = 16;
+       p->activate_ingenic_overcurrent_detection =
+               !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x1600_params(struct dwc2_hsotg *hsotg)
+{
+       struct dwc2_core_params *p = &hsotg->params;
+
+       p->otg_caps.hnp_support = false;
+       p->speed = DWC2_SPEED_PARAM_HIGH;
+       p->host_channels = 16;
+       p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+       p->phy_utmi_width = 16;
+       p->activate_ingenic_overcurrent_detection =
+               !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
+static void dwc2_set_x2000_params(struct dwc2_hsotg *hsotg)
+{
+       struct dwc2_core_params *p = &hsotg->params;
+
+       p->otg_caps.hnp_support = false;
+       p->speed = DWC2_SPEED_PARAM_HIGH;
+       p->host_rx_fifo_size = 1024;
+       p->host_nperio_tx_fifo_size = 1024;
+       p->host_perio_tx_fifo_size = 1024;
+       p->host_channels = 16;
+       p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
+       p->phy_utmi_width = 16;
+       p->activate_ingenic_overcurrent_detection =
+               !device_property_read_bool(hsotg->dev, "disable-over-current");
+}
+
 static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
 {
        struct dwc2_core_params *p = &hsotg->params;
@@ -221,7 +262,14 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
 
 const struct of_device_id dwc2_of_match_table[] = {
        { .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
-       { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params  },
+       { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
+       { .compatible = "ingenic,jz4775-otg", .data = dwc2_set_jz4775_params },
+       { .compatible = "ingenic,jz4780-otg", .data = dwc2_set_jz4775_params },
+       { .compatible = "ingenic,x1000-otg", .data = dwc2_set_jz4775_params },
+       { .compatible = "ingenic,x1600-otg", .data = dwc2_set_x1600_params },
+       { .compatible = "ingenic,x1700-otg", .data = dwc2_set_x1600_params },
+       { .compatible = "ingenic,x1830-otg", .data = dwc2_set_x1600_params },
+       { .compatible = "ingenic,x2000-otg", .data = dwc2_set_x2000_params },
        { .compatible = "rockchip,rk3066-usb", .data = dwc2_set_rk_params },
        { .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
        { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
index c483f28..cd9a734 100644 (file)
@@ -159,4 +159,13 @@ config USB_DWC3_XILINX
          This driver handles both ZynqMP and Versal SoC operations.
          Say 'Y' or 'M' if you have one such device.
 
+config USB_DWC3_AM62
+       tristate "Texas Instruments AM62 Platforms"
+       depends on ARCH_K3 || COMPILE_TEST
+       default USB_DWC3
+       help
+         Support TI's AM62 platforms with DesignWare Core USB3 IP.
+         The Designware Core USB3 IP is progammed to operate in
+         in USB 2.0 mode only.
+         Say 'Y' or 'M' here if you have one such device
 endif
index 2d499de..9f66bd8 100644 (file)
@@ -42,6 +42,7 @@ endif
 # and allyesconfig builds.
 ##
 
+obj-$(CONFIG_USB_DWC3_AM62)            += dwc3-am62.o
 obj-$(CONFIG_USB_DWC3_OMAP)            += dwc3-omap.o
 obj-$(CONFIG_USB_DWC3_EXYNOS)          += dwc3-exynos.o
 obj-$(CONFIG_USB_DWC3_PCI)             += dwc3-pci.o
index d28cd1a..e027c04 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
+#include <linux/of_graph.h>
 #include <linux/acpi.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/reset.h>
@@ -85,7 +86,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
                 * mode. If the controller supports DRD but the dr_mode is not
                 * specified or set to OTG, then set the mode to peripheral.
                 */
-               if (mode == USB_DR_MODE_OTG &&
+               if (mode == USB_DR_MODE_OTG && !dwc->edev &&
                    (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
                     !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
                    !DWC3_VER_IS_PRIOR(DWC3, 330A))
@@ -297,6 +298,7 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
                        udelay(1);
        } while (--retries);
 
+       dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
        return -ETIMEDOUT;
 
 done:
@@ -342,7 +344,6 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
  *             from the default, this will set clock period in DWC3_GUCTL
  *             register.
  * @dwc: Pointer to our controller context structure
- * @ref_clk_per: reference clock period in ns
  */
 static void dwc3_ref_clk_period(struct dwc3 *dwc)
 {
@@ -964,10 +965,8 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
                return;
 
        vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
-       if (!vals) {
-               dev_err(dev, "Error to get memory\n");
+       if (!vals)
                return;
-       }
 
        /* Get INCR burst type, and parse it */
        ret = device_property_read_u32_array(dev,
@@ -1268,40 +1267,36 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
 
        if (IS_ERR(dwc->usb2_phy)) {
                ret = PTR_ERR(dwc->usb2_phy);
-               if (ret == -ENXIO || ret == -ENODEV) {
+               if (ret == -ENXIO || ret == -ENODEV)
                        dwc->usb2_phy = NULL;
-               } else {
+               else
                        return dev_err_probe(dev, ret, "no usb2 phy configured\n");
-               }
        }
 
        if (IS_ERR(dwc->usb3_phy)) {
                ret = PTR_ERR(dwc->usb3_phy);
-               if (ret == -ENXIO || ret == -ENODEV) {
+               if (ret == -ENXIO || ret == -ENODEV)
                        dwc->usb3_phy = NULL;
-               } else {
+               else
                        return dev_err_probe(dev, ret, "no usb3 phy configured\n");
-               }
        }
 
        dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
        if (IS_ERR(dwc->usb2_generic_phy)) {
                ret = PTR_ERR(dwc->usb2_generic_phy);
-               if (ret == -ENOSYS || ret == -ENODEV) {
+               if (ret == -ENOSYS || ret == -ENODEV)
                        dwc->usb2_generic_phy = NULL;
-               } else {
+               else
                        return dev_err_probe(dev, ret, "no usb2 phy configured\n");
-               }
        }
 
        dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
        if (IS_ERR(dwc->usb3_generic_phy)) {
                ret = PTR_ERR(dwc->usb3_generic_phy);
-               if (ret == -ENOSYS || ret == -ENODEV) {
+               if (ret == -ENOSYS || ret == -ENODEV)
                        dwc->usb3_generic_phy = NULL;
-               } else {
+               else
                        return dev_err_probe(dev, ret, "no usb3 phy configured\n");
-               }
        }
 
        return 0;
@@ -1633,6 +1628,51 @@ static void dwc3_check_params(struct dwc3 *dwc)
        }
 }
 
+static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
+{
+       struct device *dev = dwc->dev;
+       struct device_node *np_phy;
+       struct extcon_dev *edev = NULL;
+       const char *name;
+
+       if (device_property_read_bool(dev, "extcon"))
+               return extcon_get_edev_by_phandle(dev, 0);
+
+       /*
+        * Device tree platforms should get extcon via phandle.
+        * On ACPI platforms, we get the name from a device property.
+        * This device property is for kernel internal use only and
+        * is expected to be set by the glue code.
+        */
+       if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
+               edev = extcon_get_extcon_dev(name);
+               if (!edev)
+                       return ERR_PTR(-EPROBE_DEFER);
+
+               return edev;
+       }
+
+       /*
+        * Try to get an extcon device from the USB PHY controller's "port"
+        * node. Check if it has the "port" node first, to avoid printing the
+        * error message from underlying code, as it's a valid case: extcon
+        * device (and "port" node) may be missing in case of "usb-role-switch"
+        * or OTG mode.
+        */
+       np_phy = of_parse_phandle(dev->of_node, "phys", 0);
+       if (of_graph_is_present(np_phy)) {
+               struct device_node *np_conn;
+
+               np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+               if (np_conn)
+                       edev = extcon_find_edev_by_node(np_conn);
+               of_node_put(np_conn);
+       }
+       of_node_put(np_phy);
+
+       return edev;
+}
+
 static int dwc3_probe(struct platform_device *pdev)
 {
        struct device           *dev = &pdev->dev;
@@ -1768,6 +1808,13 @@ static int dwc3_probe(struct platform_device *pdev)
                goto err2;
        }
 
+       dwc->edev = dwc3_get_extcon(dwc);
+       if (IS_ERR(dwc->edev)) {
+               ret = PTR_ERR(dwc->edev);
+               dev_err_probe(dwc->dev, ret, "failed to get extcon\n");
+               goto err3;
+       }
+
        ret = dwc3_get_dr_mode(dwc);
        if (ret)
                goto err3;
index 5c9d467..81c486b 100644 (file)
@@ -1046,6 +1046,7 @@ struct dwc3_scratchpad_array {
  * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
  * @tx_max_burst_prd: max periodic ESS transmit burst size
  * @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
+ * @clear_stall_protocol: endpoint number that requires a delayed status phase
  * @hsphy_interface: "utmi" or "ulpi"
  * @connected: true when we're connected to a host, false otherwise
  * @softconnect: true when gadget connect is called, false when disconnect runs
@@ -1266,6 +1267,7 @@ struct dwc3 {
        u8                      tx_thr_num_pkt_prd;
        u8                      tx_max_burst_prd;
        u8                      tx_fifo_resize_max_num;
+       u8                      clear_stall_protocol;
 
        const char              *hsphy_interface;
 
index 8cad9e7..039bf24 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/extcon.h>
-#include <linux/of_graph.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
@@ -439,51 +438,6 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
-static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
-{
-       struct device *dev = dwc->dev;
-       struct device_node *np_phy;
-       struct extcon_dev *edev = NULL;
-       const char *name;
-
-       if (device_property_read_bool(dev, "extcon"))
-               return extcon_get_edev_by_phandle(dev, 0);
-
-       /*
-        * Device tree platforms should get extcon via phandle.
-        * On ACPI platforms, we get the name from a device property.
-        * This device property is for kernel internal use only and
-        * is expected to be set by the glue code.
-        */
-       if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
-               edev = extcon_get_extcon_dev(name);
-               if (!edev)
-                       return ERR_PTR(-EPROBE_DEFER);
-
-               return edev;
-       }
-
-       /*
-        * Try to get an extcon device from the USB PHY controller's "port"
-        * node. Check if it has the "port" node first, to avoid printing the
-        * error message from underlying code, as it's a valid case: extcon
-        * device (and "port" node) may be missing in case of "usb-role-switch"
-        * or OTG mode.
-        */
-       np_phy = of_parse_phandle(dev->of_node, "phys", 0);
-       if (of_graph_is_present(np_phy)) {
-               struct device_node *np_conn;
-
-               np_conn = of_graph_get_remote_node(np_phy, -1, -1);
-               if (np_conn)
-                       edev = extcon_find_edev_by_node(np_conn);
-               of_node_put(np_conn);
-       }
-       of_node_put(np_phy);
-
-       return edev;
-}
-
 #if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
 #define ROLE_SWITCH 1
 static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
@@ -588,10 +542,6 @@ int dwc3_drd_init(struct dwc3 *dwc)
            device_property_read_bool(dwc->dev, "usb-role-switch"))
                return dwc3_setup_role_switch(dwc);
 
-       dwc->edev = dwc3_get_extcon(dwc);
-       if (IS_ERR(dwc->edev))
-               return PTR_ERR(dwc->edev);
-
        if (dwc->edev) {
                dwc->edev_nb.notifier_call = dwc3_drd_notifier;
                ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
new file mode 100644 (file)
index 0000000..fea7aca
--- /dev/null
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-am62.c - TI specific Glue layer for AM62 DWC3 USB Controller
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/consumer.h>
+
+/* USB WRAPPER register offsets */
+#define USBSS_PID                      0x0
+#define USBSS_OVERCURRENT_CTRL         0x4
+#define USBSS_PHY_CONFIG               0x8
+#define USBSS_PHY_TEST                 0xc
+#define USBSS_CORE_STAT                        0x14
+#define USBSS_HOST_VBUS_CTRL           0x18
+#define USBSS_MODE_CONTROL             0x1c
+#define USBSS_WAKEUP_CONFIG            0x30
+#define USBSS_WAKEUP_STAT              0x34
+#define USBSS_OVERRIDE_CONFIG          0x38
+#define USBSS_IRQ_MISC_STATUS_RAW      0x430
+#define USBSS_IRQ_MISC_STATUS          0x434
+#define USBSS_IRQ_MISC_ENABLE_SET      0x438
+#define USBSS_IRQ_MISC_ENABLE_CLR      0x43c
+#define USBSS_IRQ_MISC_EOI             0x440
+#define USBSS_INTR_TEST                        0x490
+#define USBSS_VBUS_FILTER              0x614
+#define USBSS_VBUS_STAT                        0x618
+#define USBSS_DEBUG_CFG                        0x708
+#define USBSS_DEBUG_DATA               0x70c
+#define USBSS_HOST_HUB_CTRL            0x714
+
+/* PHY CONFIG register bits */
+#define USBSS_PHY_VBUS_SEL_MASK                GENMASK(2, 1)
+#define USBSS_PHY_VBUS_SEL_SHIFT       1
+#define USBSS_PHY_LANE_REVERSE         BIT(0)
+
+/* MODE CONTROL register bits */
+#define USBSS_MODE_VALID       BIT(0)
+
+/* WAKEUP CONFIG register bits */
+#define USBSS_WAKEUP_CFG_OVERCURRENT_EN        BIT(3)
+#define USBSS_WAKEUP_CFG_LINESTATE_EN  BIT(2)
+#define USBSS_WAKEUP_CFG_SESSVALID_EN  BIT(1)
+#define USBSS_WAKEUP_CFG_VBUSVALID_EN  BIT(0)
+
+/* WAKEUP STAT register bits */
+#define USBSS_WAKEUP_STAT_OVERCURRENT  BIT(4)
+#define USBSS_WAKEUP_STAT_LINESTATE    BIT(3)
+#define USBSS_WAKEUP_STAT_SESSVALID    BIT(2)
+#define USBSS_WAKEUP_STAT_VBUSVALID    BIT(1)
+#define USBSS_WAKEUP_STAT_CLR          BIT(0)
+
+/* IRQ_MISC_STATUS_RAW register bits */
+#define USBSS_IRQ_MISC_RAW_VBUSVALID   BIT(22)
+#define USBSS_IRQ_MISC_RAW_SESSVALID   BIT(20)
+
+/* IRQ_MISC_STATUS register bits */
+#define USBSS_IRQ_MISC_VBUSVALID       BIT(22)
+#define USBSS_IRQ_MISC_SESSVALID       BIT(20)
+
+/* IRQ_MISC_ENABLE_SET register bits */
+#define USBSS_IRQ_MISC_ENABLE_SET_VBUSVALID    BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_SET_SESSVALID    BIT(20)
+
+/* IRQ_MISC_ENABLE_CLR register bits */
+#define USBSS_IRQ_MISC_ENABLE_CLR_VBUSVALID    BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_CLR_SESSVALID    BIT(20)
+
+/* IRQ_MISC_EOI register bits */
+#define USBSS_IRQ_MISC_EOI_VECTOR      BIT(0)
+
+/* VBUS_STAT register bits */
+#define USBSS_VBUS_STAT_SESSVALID      BIT(2)
+#define USBSS_VBUS_STAT_VBUSVALID      BIT(0)
+
+/* Mask for PHY PLL REFCLK */
+#define PHY_PLL_REFCLK_MASK    GENMASK(3, 0)
+
+#define DWC3_AM62_AUTOSUSPEND_DELAY    100
+
+struct dwc3_data {
+       struct device *dev;
+       void __iomem *usbss;
+       struct clk *usb2_refclk;
+       int rate_code;
+       struct regmap *syscon;
+       unsigned int offset;
+       unsigned int vbus_divider;
+};
+
+static const int dwc3_ti_rate_table[] = {      /* in KHZ */
+       9600,
+       10000,
+       12000,
+       19200,
+       20000,
+       24000,
+       25000,
+       26000,
+       38400,
+       40000,
+       58000,
+       50000,
+       52000,
+};
+
+static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset)
+{
+       return readl((data->usbss) + offset);
+}
+
+static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value)
+{
+       writel(value, (data->usbss) + offset);
+}
+
+static int phy_syscon_pll_refclk(struct dwc3_data *data)
+{
+       struct device *dev = data->dev;
+       struct device_node *node = dev->of_node;
+       struct of_phandle_args args;
+       struct regmap *syscon;
+       int ret;
+
+       syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
+       if (IS_ERR(syscon)) {
+               dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
+               return PTR_ERR(syscon);
+       }
+
+       data->syscon = syscon;
+
+       ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
+                                              0, &args);
+       if (ret)
+               return ret;
+
+       data->offset = args.args[0];
+
+       ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code);
+       if (ret) {
+               dev_err(dev, "failed to set phy pll reference clock rate\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int dwc3_ti_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = pdev->dev.of_node;
+       struct dwc3_data *data;
+       int i, ret;
+       unsigned long rate;
+       u32 reg;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->dev = dev;
+       platform_set_drvdata(pdev, data);
+
+       data->usbss = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(data->usbss)) {
+               dev_err(dev, "can't map IOMEM resource\n");
+               return PTR_ERR(data->usbss);
+       }
+
+       data->usb2_refclk = devm_clk_get(dev, "ref");
+       if (IS_ERR(data->usb2_refclk)) {
+               dev_err(dev, "can't get usb2_refclk\n");
+               return PTR_ERR(data->usb2_refclk);
+       }
+
+       /* Calculate the rate code */
+       rate = clk_get_rate(data->usb2_refclk);
+       rate /= 1000;   // To KHz
+       for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
+               if (dwc3_ti_rate_table[i] == rate)
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
+               dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+               ret = -EINVAL;
+               goto err_clk_disable;
+       }
+
+       data->rate_code = i;
+
+       /* Read the syscon property and set the rate code */
+       ret = phy_syscon_pll_refclk(data);
+       if (ret)
+               goto err_clk_disable;
+
+       /* VBUS divider select */
+       data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+       reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG);
+       if (data->vbus_divider)
+               reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+
+       dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       /*
+        * Don't ignore its dependencies with its children
+        */
+       pm_suspend_ignore_children(dev, false);
+       clk_prepare_enable(data->usb2_refclk);
+       pm_runtime_get_noresume(dev);
+
+       ret = of_platform_populate(node, NULL, NULL, dev);
+       if (ret) {
+               dev_err(dev, "failed to create dwc3 core: %d\n", ret);
+               goto err_pm_disable;
+       }
+
+       /* Set mode valid bit to indicate role is valid */
+       reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+       reg |= USBSS_MODE_VALID;
+       dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+       /* Setting up autosuspend */
+       pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+       return 0;
+
+err_pm_disable:
+       clk_disable_unprepare(data->usb2_refclk);
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+err_clk_disable:
+       clk_put(data->usb2_refclk);
+       return ret;
+}
+
+static int dwc3_ti_remove_core(struct device *dev, void *c)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+
+       platform_device_unregister(pdev);
+       return 0;
+}
+
+static int dwc3_ti_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct dwc3_data *data = platform_get_drvdata(pdev);
+       u32 reg;
+
+       device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+
+       /* Clear mode valid bit */
+       reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+       reg &= ~USBSS_MODE_VALID;
+       dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+       pm_runtime_put_sync(dev);
+       clk_disable_unprepare(data->usb2_refclk);
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+
+       clk_put(data->usb2_refclk);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int dwc3_ti_suspend_common(struct device *dev)
+{
+       struct dwc3_data *data = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(data->usb2_refclk);
+
+       return 0;
+}
+
+static int dwc3_ti_resume_common(struct device *dev)
+{
+       struct dwc3_data *data = dev_get_drvdata(dev);
+
+       clk_prepare_enable(data->usb2_refclk);
+
+       return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(dwc3_ti_pm_ops, dwc3_ti_suspend_common,
+                           dwc3_ti_resume_common, NULL);
+
+#define DEV_PM_OPS     (&dwc3_ti_pm_ops)
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id dwc3_ti_of_match[] = {
+       { .compatible = "ti,am62-usb"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_ti_of_match);
+
+static struct platform_driver dwc3_ti_driver = {
+       .probe          = dwc3_ti_probe,
+       .remove         = dwc3_ti_remove,
+       .driver         = {
+               .name   = "dwc3-am62",
+               .pm     = DEV_PM_OPS,
+               .of_match_table = dwc3_ti_of_match,
+       },
+};
+
+module_platform_driver(dwc3_ti_driver);
+
+MODULE_ALIAS("platform:dwc3-am62");
+MODULE_AUTHOR("Aswath Govindraju <a-govindraju@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DesignWare USB3 TI Glue Layer");
index 2e19e0e..ba51de7 100644 (file)
@@ -288,7 +288,7 @@ static void dwc3_pci_resume_work(struct work_struct *work)
        int ret;
 
        ret = pm_runtime_get_sync(&dwc3->dev);
-       if (ret) {
+       if (ret < 0) {
                pm_runtime_put_sync_autosuspend(&dwc3->dev);
                return;
        }
index a6f3a9b..67b237c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/of_gpio.h>
 #include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
@@ -98,6 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
 {
        struct device           *dev = priv_data->dev;
        struct reset_control    *crst, *hibrst, *apbrst;
+       struct gpio_desc        *reset_gpio;
        struct phy              *usb3_phy;
        int                     ret = 0;
        u32                     reg;
@@ -201,6 +203,21 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
        }
 
 skip_usb3_phy:
+       /* ulpi reset via gpio-modepin or gpio-framework driver */
+       reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(reset_gpio)) {
+               return dev_err_probe(dev, PTR_ERR(reset_gpio),
+                                    "Failed to request reset GPIO\n");
+       }
+
+       if (reset_gpio) {
+               /* Toggle ulpi to reset the phy. */
+               gpiod_set_value_cansleep(reset_gpio, 1);
+               usleep_range(5000, 10000);
+               gpiod_set_value_cansleep(reset_gpio, 0);
+               usleep_range(5000, 10000);
+       }
+
        /*
         * This routes the USB DMA traffic to go through FPD path instead
         * of reaching DDR directly. This traffic routing is needed to
index 1064be5..5d64266 100644 (file)
@@ -218,7 +218,7 @@ out:
        return ret;
 }
 
-static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
 {
        struct dwc3_ep          *dep;
 
@@ -813,7 +813,7 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
        int ret = -EINVAL;
        u32 len;
 
-       if (!dwc->gadget_driver)
+       if (!dwc->gadget_driver || !dwc->connected)
                goto out;
 
        trace_dwc3_ctrl_req(ctrl);
@@ -1080,6 +1080,7 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
        unsigned int direction = !dwc->ep0_expect_in;
 
        dwc->delayed_status = false;
+       dwc->clear_stall_protocol = 0;
 
        if (dwc->ep0state != EP0_STATUS_PHASE)
                return;
@@ -1087,13 +1088,18 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
        __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
 }
 
-static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
        struct dwc3_gadget_ep_cmd_params params;
        u32                     cmd;
        int                     ret;
 
-       if (!dep->resource_index)
+       /*
+        * For status/DATA OUT stage, TRB will be queued on ep0 out
+        * endpoint for which resource index is zero. Hence allow
+        * queuing ENDXFER command for ep0 out endpoint.
+        */
+       if (!dep->resource_index && dep->number)
                return;
 
        cmd = DWC3_DEPCMD_ENDTRANSFER;
index 0b9c249..00427d1 100644 (file)
@@ -657,7 +657,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
 /**
  * dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
  * @dwc: pointer to the DWC3 context
- * @nfifos: number of fifos to calculate for
  *
  * Calculates the size value based on the equation below:
  *
@@ -690,7 +689,7 @@ static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
 }
 
 /**
- * dwc3_gadget_clear_tx_fifo_size - Clears txfifo allocation
+ * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
  * @dwc: pointer to the DWC3 context
  *
  * Iterates through all the endpoint registers and clears the previous txfifo
@@ -783,7 +782,8 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
                num_fifos = 3;
 
        if (dep->endpoint.maxburst > 6 &&
-           usb_endpoint_xfer_bulk(dep->endpoint.desc) && DWC3_IP_IS(DWC31))
+           (usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
+            usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31))
                num_fifos = dwc->tx_fifo_resize_max_num;
 
        /* FIFO size for a single buffer */
@@ -882,12 +882,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
                reg |= DWC3_DALEPENA_EP(dep->number);
                dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
+               dep->trb_dequeue = 0;
+               dep->trb_enqueue = 0;
+
                if (usb_endpoint_xfer_control(desc))
                        goto out;
 
                /* Initialize the TRB ring */
-               dep->trb_dequeue = 0;
-               dep->trb_enqueue = 0;
                memset(dep->trb_pool, 0,
                       sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
 
@@ -2001,10 +2002,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
 {
        struct dwc3_request             *req;
-       struct dwc3_request             *tmp;
        struct dwc3                     *dwc = dep->dwc;
 
-       list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
+       while (!list_empty(&dep->cancelled_list)) {
+               req = next_request(&dep->cancelled_list);
                dwc3_gadget_ep_skip_trbs(dep, req);
                switch (req->status) {
                case DWC3_REQUEST_STATUS_DISCONNECTED:
@@ -2021,6 +2022,12 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
                        dwc3_gadget_giveback(dep, req, -ECONNRESET);
                        break;
                }
+               /*
+                * The endpoint is disabled, let the dwc3_remove_requests()
+                * handle the cleanup.
+                */
+               if (!dep->endpoint.desc)
+                       break;
        }
 }
 
@@ -2056,16 +2063,6 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                if (r == req) {
                        struct dwc3_request *t;
 
-                       /*
-                        * If a Setup packet is received but yet to DMA out, the controller will
-                        * not process the End Transfer command of any endpoint. Polling of its
-                        * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
-                        * timeout. Delay issuing the End Transfer command until the Setup TRB is
-                        * prepared.
-                        */
-                       if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status)
-                               dep->flags |= DWC3_EP_DELAY_STOP;
-
                        /* wait until it is processed */
                        dwc3_stop_active_transfer(dep, true, true);
 
@@ -2152,6 +2149,9 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
                if (dep->flags & DWC3_EP_END_TRANSFER_PENDING ||
                    (dep->flags & DWC3_EP_DELAY_STOP)) {
                        dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
+                       if (protocol)
+                               dwc->clear_stall_protocol = dep->number;
+
                        return 0;
                }
 
@@ -2498,28 +2498,64 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
 static void __dwc3_gadget_stop(struct dwc3 *dwc);
 static int __dwc3_gadget_start(struct dwc3 *dwc);
 
-static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
 {
-       struct dwc3             *dwc = gadget_to_dwc(g);
-       unsigned long           flags;
-       int                     ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dwc->lock, flags);
+       dwc->connected = false;
 
-       is_on = !!is_on;
-       dwc->softconnect = is_on;
        /*
         * Per databook, when we want to stop the gadget, if a control transfer
         * is still in process, complete it and get the core into setup phase.
         */
-       if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
+       if (dwc->ep0state != EP0_SETUP_PHASE) {
+               int ret;
+
                reinit_completion(&dwc->ep0_in_setup);
 
+               spin_unlock_irqrestore(&dwc->lock, flags);
                ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
                                msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+               spin_lock_irqsave(&dwc->lock, flags);
                if (ret == 0)
                        dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
        }
 
        /*
+        * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+        * Section 4.1.8 Table 4-7, it states that for a device-initiated
+        * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
+        * command for any active transfers" before clearing the RunStop
+        * bit.
+        */
+       dwc3_stop_active_transfers(dwc);
+       __dwc3_gadget_stop(dwc);
+       spin_unlock_irqrestore(&dwc->lock, flags);
+
+       /*
+        * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+        * driver needs to acknowledge them before the controller can halt.
+        * Simply let the interrupt handler acknowledges and handle the
+        * remaining event generated by the controller while polling for
+        * DSTS.DEVCTLHLT.
+        */
+       return dwc3_gadget_run_stop(dwc, false, false);
+}
+
+static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+       struct dwc3             *dwc = gadget_to_dwc(g);
+       int                     ret;
+
+       is_on = !!is_on;
+
+       if (dwc->pullups_connected == is_on)
+               return 0;
+
+       dwc->softconnect = is_on;
+
+       /*
         * Avoid issuing a runtime resume if the device is already in the
         * suspended state during gadget disconnect.  DWC3 gadget was already
         * halted/stopped during runtime suspend.
@@ -2541,42 +2577,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
                return 0;
        }
 
-       /*
-        * Synchronize and disable any further event handling while controller
-        * is being enabled/disabled.
-        */
-       disable_irq(dwc->irq_gadget);
-
-       spin_lock_irqsave(&dwc->lock, flags);
-
        if (!is_on) {
-               u32 count;
-
-               dwc->connected = false;
-               /*
-                * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
-                * Section 4.1.8 Table 4-7, it states that for a device-initiated
-                * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
-                * command for any active transfers" before clearing the RunStop
-                * bit.
-                */
-               dwc3_stop_active_transfers(dwc);
-               __dwc3_gadget_stop(dwc);
-
-               /*
-                * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
-                * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
-                * "software needs to acknowledge the events that are generated
-                * (by writing to GEVNTCOUNTn) while it is waiting for this bit
-                * to be set to '1'."
-                */
-               count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
-               count &= DWC3_GEVNTCOUNT_MASK;
-               if (count > 0) {
-                       dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
-                       dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
-                                               dwc->ev_buf->length;
-               }
+               ret = dwc3_gadget_soft_disconnect(dwc);
        } else {
                /*
                 * In the Synopsys DWC_usb31 1.90a programming guide section
@@ -2584,18 +2586,13 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
                 * device-initiated disconnect requires a core soft reset
                 * (DCTL.CSftRst) before enabling the run/stop bit.
                 */
-               spin_unlock_irqrestore(&dwc->lock, flags);
                dwc3_core_soft_reset(dwc);
-               spin_lock_irqsave(&dwc->lock, flags);
 
                dwc3_event_buffers_setup(dwc);
                __dwc3_gadget_start(dwc);
+               ret = dwc3_gadget_run_stop(dwc, true, false);
        }
 
-       ret = dwc3_gadget_run_stop(dwc, is_on, false);
-       spin_unlock_irqrestore(&dwc->lock, flags);
-       enable_irq(dwc->irq_gadget);
-
        pm_runtime_put(dwc->dev);
 
        return ret;
@@ -2745,6 +2742,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
 
        /* begin to receive SETUP packets */
        dwc->ep0state = EP0_SETUP_PHASE;
+       dwc->ep0_bounced = false;
        dwc->link_state = DWC3_LINK_STATE_SS_DIS;
        dwc->delayed_status = false;
        dwc3_ep0_out_start(dwc);
@@ -3333,15 +3331,21 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
                const struct dwc3_event_depevt *event, int status)
 {
        struct dwc3_request     *req;
-       struct dwc3_request     *tmp;
 
-       list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
+       while (!list_empty(&dep->started_list)) {
                int ret;
 
+               req = next_request(&dep->started_list);
                ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
                                req, status);
                if (ret)
                        break;
+               /*
+                * The endpoint is disabled, let the dwc3_remove_requests()
+                * handle the cleanup.
+                */
+               if (!dep->endpoint.desc)
+                       break;
        }
 }
 
@@ -3380,14 +3384,14 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
        struct dwc3             *dwc = dep->dwc;
        bool                    no_started_trb = true;
 
-       if (!dep->endpoint.desc)
-               return no_started_trb;
-
        dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
 
        if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
                goto out;
 
+       if (!dep->endpoint.desc)
+               return no_started_trb;
+
        if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
                list_empty(&dep->started_list) &&
                (list_empty(&dep->pending_list) || status == -EXDEV))
@@ -3512,7 +3516,7 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
                }
 
                dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
-               if (dwc->delayed_status)
+               if (dwc->clear_stall_protocol == dep->number)
                        dwc3_ep0_send_delayed_status(dwc);
        }
 
@@ -3673,12 +3677,35 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
 void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
        bool interrupt)
 {
+       struct dwc3 *dwc = dep->dwc;
+
+       /*
+        * Only issue End Transfer command to the control endpoint of a started
+        * Data Phase. Typically we should only do so in error cases such as
+        * invalid/unexpected direction as described in the control transfer
+        * flow of the programming guide.
+        */
+       if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
+               return;
+
        if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
            (dep->flags & DWC3_EP_DELAY_STOP) ||
            (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
                return;
 
        /*
+        * If a Setup packet is received but yet to DMA out, the controller will
+        * not process the End Transfer command of any endpoint. Polling of its
+        * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
+        * timeout. Delay issuing the End Transfer command until the Setup TRB is
+        * prepared.
+        */
+       if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
+               dep->flags |= DWC3_EP_DELAY_STOP;
+               return;
+       }
+
+       /*
         * NOTICE: We are violating what the Databook says about the
         * EndTransfer command. Ideally we would _always_ wait for the
         * EndTransfer Command Completion IRQ, but that's causing too
@@ -3795,6 +3822,27 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
        }
 
        dwc3_reset_gadget(dwc);
+
+       /*
+        * From SNPS databook section 8.1.2, the EP0 should be in setup
+        * phase. So ensure that EP0 is in setup phase by issuing a stall
+        * and restart if EP0 is not in setup phase.
+        */
+       if (dwc->ep0state != EP0_SETUP_PHASE) {
+               unsigned int    dir;
+
+               dir = !!dwc->ep0_expect_in;
+               if (dwc->ep0state == EP0_DATA_PHASE)
+                       dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+               else
+                       dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+
+               dwc->eps[0]->trb_enqueue = 0;
+               dwc->eps[1]->trb_enqueue = 0;
+
+               dwc3_ep0_stall_and_restart(dwc);
+       }
+
        /*
         * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
         * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
index f763380..55a56cf 100644 (file)
@@ -110,6 +110,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 void dwc3_ep0_interrupt(struct dwc3 *dwc,
                const struct dwc3_event_depevt *event);
 void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep);
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);
 int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
index eda8719..f56c30c 100644 (file)
@@ -7,7 +7,6 @@
  * Authors: Felipe Balbi <balbi@ti.com>,
  */
 
-#include <linux/acpi.h>
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -83,7 +82,6 @@ int dwc3_host_init(struct dwc3 *dwc)
        }
 
        xhci->dev.parent        = dwc->dev;
-       ACPI_COMPANION_SET(&xhci->dev, ACPI_COMPANION(dwc->dev));
 
        dwc->xhci = xhci;
 
index 2eaeaae..403563c 100644 (file)
@@ -2505,7 +2505,7 @@ int usb_composite_probe(struct usb_composite_driver *driver)
        gadget_driver->driver.name = driver->name;
        gadget_driver->max_speed = driver->max_speed;
 
-       return usb_gadget_probe_driver(gadget_driver);
+       return usb_gadget_register_driver(gadget_driver);
 }
 EXPORT_SYMBOL_GPL(usb_composite_probe);
 
index 84b73cb..3a6b492 100644 (file)
@@ -284,7 +284,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
                        goto err;
                }
                gi->composite.gadget_driver.udc_name = name;
-               ret = usb_gadget_probe_driver(&gi->composite.gadget_driver);
+               ret = usb_gadget_register_driver(&gi->composite.gadget_driver);
                if (ret) {
                        gi->composite.gadget_driver.udc_name = NULL;
                        goto err;
index 349945e..411eb48 100644 (file)
@@ -333,6 +333,8 @@ static void acm_complete_set_line_coding(struct usb_ep *ep,
        }
 }
 
+static int acm_send_break(struct gserial *port, int duration);
+
 static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
 {
        struct f_acm            *acm = func_to_acm(f);
@@ -391,6 +393,14 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
                acm->port_handshake_bits = w_value;
                break;
 
+       case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+                       | USB_CDC_REQ_SEND_BREAK:
+               if (w_index != acm->ctrl_id)
+                       goto invalid;
+
+               acm_send_break(&acm->port, w_value);
+               break;
+
        default:
 invalid:
                dev_vdbg(&cdev->gadget->dev,
index d379658..d3feeeb 100644 (file)
@@ -24,7 +24,6 @@
 #include <media/v4l2-dev.h>
 #include <media/v4l2-event.h>
 
-#include "u_uvc.h"
 #include "uvc.h"
 #include "uvc_configfs.h"
 #include "uvc_v4l2.h"
@@ -44,7 +43,7 @@ MODULE_PARM_DESC(trace, "Trace level bitmask");
 #define UVC_STRING_STREAMING_IDX               1
 
 static struct usb_string uvc_en_us_strings[] = {
-       [UVC_STRING_CONTROL_IDX].s = "UVC Camera",
+       /* [UVC_STRING_CONTROL_IDX].s = DYNAMIC, */
        [UVC_STRING_STREAMING_IDX].s = "Video Streaming",
        {  }
 };
@@ -676,6 +675,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
        uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
        uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
 
+       uvc_en_us_strings[UVC_STRING_CONTROL_IDX].s = opts->function_name;
        us = usb_gstrings_attach(cdev, uvc_function_strings,
                                 ARRAY_SIZE(uvc_en_us_strings));
        if (IS_ERR(us)) {
@@ -866,6 +866,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
 
        opts->streaming_interval = 1;
        opts->streaming_maxpacket = 1024;
+       snprintf(opts->function_name, sizeof(opts->function_name), "UVC Camera");
 
        ret = uvcg_attach_configfs(opts);
        if (ret < 0) {
index 2bb5698..c1f62e9 100644 (file)
@@ -1179,8 +1179,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
        if (c_chmask) {
                struct uac_rtd_params *prm = &uac->c_prm;
 
-    spin_lock_init(&prm->lock);
-    uac->c_prm.uac = uac;
+               spin_lock_init(&prm->lock);
+               uac->c_prm.uac = uac;
                prm->max_psize = g_audio->out_ep_maxpsize;
                prm->srate = params->c_srates[0];
 
index 9a01a7d..24b8681 100644 (file)
@@ -27,6 +27,7 @@ struct f_uvc_opts {
 
        unsigned int                                    control_interface;
        unsigned int                                    streaming_interface;
+       char                                            function_name[32];
 
        /*
         * Control descriptors array pointers for full-/high-speed and
index 886103a..58e383a 100644 (file)
@@ -80,6 +80,7 @@ struct uvc_request {
        struct uvc_video *video;
        struct sg_table sgt;
        u8 header[UVCG_REQUEST_HEADER_LEN];
+       struct uvc_buffer *last_buf;
 };
 
 struct uvc_video {
index 77d6403..e5a6b6e 100644 (file)
  * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
  */
 
-#include <linux/sort.h>
-
-#include "u_uvc.h"
 #include "uvc_configfs.h"
 
+#include <linux/sort.h>
+
 /* -----------------------------------------------------------------------------
  * Global Utility Structures and Macros
  */
 
-#define UVCG_STREAMING_CONTROL_SIZE    1
-
 #define UVC_ATTR(prefix, cname, aname) \
 static struct configfs_attribute prefix##attr_##cname = { \
        .ca_name        = __stringify(aname),                           \
@@ -49,12 +46,6 @@ static int uvcg_config_compare_u32(const void *l, const void *r)
        return li < ri ? -1 : li == ri ? 0 : 1;
 }
 
-static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
-{
-       return container_of(to_config_group(item), struct f_uvc_opts,
-                           func_inst.group);
-}
-
 struct uvcg_config_group_type {
        struct config_item_type type;
        const char *name;
@@ -125,19 +116,6 @@ static void uvcg_config_remove_children(struct config_group *group)
  * control/header
  */
 
-DECLARE_UVC_HEADER_DESCRIPTOR(1);
-
-struct uvcg_control_header {
-       struct config_item              item;
-       struct UVC_HEADER_DESCRIPTOR(1) desc;
-       unsigned                        linked;
-};
-
-static struct uvcg_control_header *to_uvcg_control_header(struct config_item *item)
-{
-       return container_of(item, struct uvcg_control_header, item);
-}
-
 #define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit)                  \
 static ssize_t uvcg_control_header_##cname##_show(                     \
        struct config_item *item, char *page)                           \
@@ -769,24 +747,6 @@ static const char * const uvcg_format_names[] = {
        "mjpeg",
 };
 
-enum uvcg_format_type {
-       UVCG_UNCOMPRESSED = 0,
-       UVCG_MJPEG,
-};
-
-struct uvcg_format {
-       struct config_group     group;
-       enum uvcg_format_type   type;
-       unsigned                linked;
-       unsigned                num_frames;
-       __u8                    bmaControls[UVCG_STREAMING_CONTROL_SIZE];
-};
-
-static struct uvcg_format *to_uvcg_format(struct config_item *item)
-{
-       return container_of(to_config_group(item), struct uvcg_format, group);
-}
-
 static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
 {
        struct f_uvc_opts *opts;
@@ -845,29 +805,11 @@ end:
        return ret;
 }
 
-struct uvcg_format_ptr {
-       struct uvcg_format      *fmt;
-       struct list_head        entry;
-};
-
 /* -----------------------------------------------------------------------------
  * streaming/header/<NAME>
  * streaming/header
  */
 
-struct uvcg_streaming_header {
-       struct config_item                              item;
-       struct uvc_input_header_descriptor              desc;
-       unsigned                                        linked;
-       struct list_head                                formats;
-       unsigned                                        num_fmt;
-};
-
-static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
-{
-       return container_of(item, struct uvcg_streaming_header, item);
-}
-
 static void uvcg_format_set_indices(struct config_group *fmt);
 
 static int uvcg_streaming_header_allow_link(struct config_item *src,
@@ -1059,31 +1001,6 @@ static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = {
  * streaming/<mode>/<format>/<NAME>
  */
 
-struct uvcg_frame {
-       struct config_item      item;
-       enum uvcg_format_type   fmt_type;
-       struct {
-               u8      b_length;
-               u8      b_descriptor_type;
-               u8      b_descriptor_subtype;
-               u8      b_frame_index;
-               u8      bm_capabilities;
-               u16     w_width;
-               u16     w_height;
-               u32     dw_min_bit_rate;
-               u32     dw_max_bit_rate;
-               u32     dw_max_video_frame_buffer_size;
-               u32     dw_default_frame_interval;
-               u8      b_frame_interval_type;
-       } __attribute__((packed)) frame;
-       u32 *dw_frame_interval;
-};
-
-static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
-{
-       return container_of(item, struct uvcg_frame, item);
-}
-
 #define UVCG_FRAME_ATTR(cname, aname, bits) \
 static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
 {                                                                      \
@@ -1345,6 +1262,7 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
        struct uvcg_format *fmt;
        struct f_uvc_opts *opts;
        struct config_item *opts_item;
+       struct uvcg_frame_ptr *frame_ptr;
 
        h = kzalloc(sizeof(*h), GFP_KERNEL);
        if (!h)
@@ -1375,6 +1293,16 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
                kfree(h);
                return ERR_PTR(-EINVAL);
        }
+
+       frame_ptr = kzalloc(sizeof(*frame_ptr), GFP_KERNEL);
+       if (!frame_ptr) {
+               mutex_unlock(&opts->lock);
+               kfree(h);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       frame_ptr->frm = h;
+       list_add_tail(&frame_ptr->entry, &fmt->frames);
        ++fmt->num_frames;
        mutex_unlock(&opts->lock);
 
@@ -1388,13 +1316,23 @@ static void uvcg_frame_drop(struct config_group *group, struct config_item *item
        struct uvcg_format *fmt;
        struct f_uvc_opts *opts;
        struct config_item *opts_item;
+       struct uvcg_frame *target_frm = NULL;
+       struct uvcg_frame_ptr *frame_ptr, *tmp;
 
        opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
        opts = to_f_uvc_opts(opts_item);
 
        mutex_lock(&opts->lock);
+       target_frm = container_of(item, struct uvcg_frame, item);
        fmt = to_uvcg_format(&group->cg_item);
-       --fmt->num_frames;
+
+       list_for_each_entry_safe(frame_ptr, tmp, &fmt->frames, entry)
+               if (frame_ptr->frm == target_frm) {
+                       list_del(&frame_ptr->entry);
+                       kfree(frame_ptr);
+                       --fmt->num_frames;
+                       break;
+               }
        mutex_unlock(&opts->lock);
 
        config_item_put(item);
@@ -1420,18 +1358,6 @@ static void uvcg_format_set_indices(struct config_group *fmt)
  * streaming/uncompressed/<NAME>
  */
 
-struct uvcg_uncompressed {
-       struct uvcg_format              fmt;
-       struct uvc_format_uncompressed  desc;
-};
-
-static struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item)
-{
-       return container_of(
-               container_of(to_config_group(item), struct uvcg_format, group),
-               struct uvcg_uncompressed, fmt);
-}
-
 static struct configfs_group_operations uvcg_uncompressed_group_ops = {
        .make_item              = uvcg_frame_make,
        .drop_item              = uvcg_frame_drop,
@@ -1565,6 +1491,12 @@ uvcg_uncompressed_##cname##_store(struct config_item *item,              \
        if (ret)                                                        \
                goto end;                                               \
                                                                        \
+       /* index values in uvc are never 0 */                           \
+       if (!num) {                                                     \
+               ret = -EINVAL;                                          \
+               goto end;                                               \
+       }                                                               \
+                                                                       \
        u->desc.aname = num;                                            \
        ret = len;                                                      \
 end:                                                                   \
@@ -1645,6 +1577,7 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
        h->desc.bmInterfaceFlags        = 0;
        h->desc.bCopyProtect            = 0;
 
+       INIT_LIST_HEAD(&h->fmt.frames);
        h->fmt.type = UVCG_UNCOMPRESSED;
        config_group_init_type_name(&h->fmt.group, name,
                                    &uvcg_uncompressed_type);
@@ -1669,18 +1602,6 @@ static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = {
  * streaming/mjpeg/<NAME>
  */
 
-struct uvcg_mjpeg {
-       struct uvcg_format              fmt;
-       struct uvc_format_mjpeg         desc;
-};
-
-static struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
-{
-       return container_of(
-               container_of(to_config_group(item), struct uvcg_format, group),
-               struct uvcg_mjpeg, fmt);
-}
-
 static struct configfs_group_operations uvcg_mjpeg_group_ops = {
        .make_item              = uvcg_frame_make,
        .drop_item              = uvcg_frame_drop,
@@ -1758,6 +1679,12 @@ uvcg_mjpeg_##cname##_store(struct config_item *item,                     \
        if (ret)                                                        \
                goto end;                                               \
                                                                        \
+       /* index values in uvc are never 0 */                           \
+       if (!num) {                                                     \
+               ret = -EINVAL;                                          \
+               goto end;                                               \
+       }                                                               \
+                                                                       \
        u->desc.aname = num;                                            \
        ret = len;                                                      \
 end:                                                                   \
@@ -1831,6 +1758,7 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
        h->desc.bmInterfaceFlags        = 0;
        h->desc.bCopyProtect            = 0;
 
+       INIT_LIST_HEAD(&h->fmt.frames);
        h->fmt.type = UVCG_MJPEG;
        config_group_init_type_name(&h->fmt.group, name,
                                    &uvcg_mjpeg_type);
@@ -2425,10 +2353,51 @@ UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
 
 #undef UVCG_OPTS_ATTR
 
+#define UVCG_OPTS_STRING_ATTR(cname, aname)                            \
+static ssize_t f_uvc_opts_string_##cname##_show(struct config_item *item,\
+                                        char *page)                    \
+{                                                                      \
+       struct f_uvc_opts *opts = to_f_uvc_opts(item);                  \
+       int result;                                                     \
+                                                                       \
+       mutex_lock(&opts->lock);                                        \
+       result = snprintf(page, sizeof(opts->aname), "%s", opts->aname);\
+       mutex_unlock(&opts->lock);                                      \
+                                                                       \
+       return result;                                                  \
+}                                                                      \
+                                                                       \
+static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
+                                         const char *page, size_t len) \
+{                                                                      \
+       struct f_uvc_opts *opts = to_f_uvc_opts(item);                  \
+       int ret = 0;                                                    \
+                                                                       \
+       mutex_lock(&opts->lock);                                        \
+       if (opts->refcnt) {                                             \
+               ret = -EBUSY;                                           \
+               goto end;                                               \
+       }                                                               \
+                                                                       \
+       ret = snprintf(opts->aname, min(sizeof(opts->aname), len),      \
+                       "%s", page);                                    \
+                                                                       \
+end:                                                                   \
+       mutex_unlock(&opts->lock);                                      \
+       return ret;                                                     \
+}                                                                      \
+                                                                       \
+UVC_ATTR(f_uvc_opts_string_, cname, aname)
+
+UVCG_OPTS_STRING_ATTR(function_name, function_name);
+
+#undef UVCG_OPTS_STRING_ATTR
+
 static struct configfs_attribute *uvc_attrs[] = {
        &f_uvc_opts_attr_streaming_interval,
        &f_uvc_opts_attr_streaming_maxpacket,
        &f_uvc_opts_attr_streaming_maxburst,
+       &f_uvc_opts_string_attr_function_name,
        NULL,
 };
 
index 7e1d7ca..ad2ec8c 100644 (file)
 #ifndef UVC_CONFIGFS_H
 #define UVC_CONFIGFS_H
 
-struct f_uvc_opts;
+#include <linux/configfs.h>
+
+#include "u_uvc.h"
+
+static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct f_uvc_opts,
+                           func_inst.group);
+}
+
+#define UVCG_STREAMING_CONTROL_SIZE    1
+
+DECLARE_UVC_HEADER_DESCRIPTOR(1);
+
+struct uvcg_control_header {
+       struct config_item              item;
+       struct UVC_HEADER_DESCRIPTOR(1) desc;
+       unsigned                        linked;
+};
+
+static inline struct uvcg_control_header *to_uvcg_control_header(struct config_item *item)
+{
+       return container_of(item, struct uvcg_control_header, item);
+}
+
+enum uvcg_format_type {
+       UVCG_UNCOMPRESSED = 0,
+       UVCG_MJPEG,
+};
+
+struct uvcg_format {
+       struct config_group     group;
+       enum uvcg_format_type   type;
+       unsigned                linked;
+       struct list_head        frames;
+       unsigned                num_frames;
+       __u8                    bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+};
+
+struct uvcg_format_ptr {
+       struct uvcg_format      *fmt;
+       struct list_head        entry;
+};
+
+static inline struct uvcg_format *to_uvcg_format(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct uvcg_format, group);
+}
+
+struct uvcg_streaming_header {
+       struct config_item                              item;
+       struct uvc_input_header_descriptor              desc;
+       unsigned                                        linked;
+       struct list_head                                formats;
+       unsigned                                        num_fmt;
+};
+
+static inline struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
+{
+       return container_of(item, struct uvcg_streaming_header, item);
+}
+
+struct uvcg_frame_ptr {
+       struct uvcg_frame       *frm;
+       struct list_head        entry;
+};
+
+struct uvcg_frame {
+       struct config_item      item;
+       enum uvcg_format_type   fmt_type;
+       struct {
+               u8      b_length;
+               u8      b_descriptor_type;
+               u8      b_descriptor_subtype;
+               u8      b_frame_index;
+               u8      bm_capabilities;
+               u16     w_width;
+               u16     w_height;
+               u32     dw_min_bit_rate;
+               u32     dw_max_bit_rate;
+               u32     dw_max_video_frame_buffer_size;
+               u32     dw_default_frame_interval;
+               u8      b_frame_interval_type;
+       } __attribute__((packed)) frame;
+       u32 *dw_frame_interval;
+};
+
+static inline struct uvcg_frame *to_uvcg_frame(struct config_item *item)
+{
+       return container_of(item, struct uvcg_frame, item);
+}
+
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed/<NAME>
+ */
+
+struct uvcg_uncompressed {
+       struct uvcg_format              fmt;
+       struct uvc_format_uncompressed  desc;
+};
+
+static inline struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item)
+{
+       return container_of(to_uvcg_format(item), struct uvcg_uncompressed, fmt);
+}
+
+/* -----------------------------------------------------------------------------
+ * streaming/mjpeg/<NAME>
+ */
+
+struct uvcg_mjpeg {
+       struct uvcg_format              fmt;
+       struct uvc_format_mjpeg         desc;
+};
+
+static inline struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
+{
+       return container_of(to_uvcg_format(item), struct uvcg_mjpeg, fmt);
+}
 
 int uvcg_attach_configfs(struct f_uvc_opts *opts);
 
index 2cda982..d25edc3 100644 (file)
@@ -185,18 +185,7 @@ int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
 
 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
 {
-       unsigned long flags;
-       int ret;
-
-       ret = vb2_qbuf(&queue->queue, NULL, buf);
-       if (ret < 0)
-               return ret;
-
-       spin_lock_irqsave(&queue->irqlock, flags);
-       ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
-       queue->flags &= ~UVC_QUEUE_PAUSED;
-       spin_unlock_irqrestore(&queue->irqlock, flags);
-       return ret;
+       return vb2_qbuf(&queue->queue, NULL, buf);
 }
 
 /*
@@ -328,33 +317,22 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
 }
 
 /* called with &queue_irqlock held.. */
-struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
+void uvcg_complete_buffer(struct uvc_video_queue *queue,
                                          struct uvc_buffer *buf)
 {
-       struct uvc_buffer *nextbuf;
-
        if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
             buf->length != buf->bytesused) {
                buf->state = UVC_BUF_STATE_QUEUED;
                vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
-               return buf;
+               return;
        }
 
-       list_del(&buf->queue);
-       if (!list_empty(&queue->irqqueue))
-               nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
-                                          queue);
-       else
-               nextbuf = NULL;
-
        buf->buf.field = V4L2_FIELD_NONE;
        buf->buf.sequence = queue->sequence++;
        buf->buf.vb2_buf.timestamp = ktime_get_ns();
 
        vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
        vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
-
-       return nextbuf;
 }
 
 struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
@@ -364,8 +342,6 @@ struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
        if (!list_empty(&queue->irqqueue))
                buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
                                       queue);
-       else
-               queue->flags |= UVC_QUEUE_PAUSED;
 
        return buf;
 }
index 05360a0..41f87b9 100644 (file)
@@ -43,7 +43,6 @@ struct uvc_buffer {
 
 #define UVC_QUEUE_DISCONNECTED         (1 << 0)
 #define UVC_QUEUE_DROP_INCOMPLETE      (1 << 1)
-#define UVC_QUEUE_PAUSED               (1 << 2)
 
 struct uvc_video_queue {
        struct vb2_queue queue;
@@ -93,7 +92,7 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect);
 
 int uvcg_queue_enable(struct uvc_video_queue *queue, int enable);
 
-struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
+void uvcg_complete_buffer(struct uvc_video_queue *queue,
                                          struct uvc_buffer *buf);
 
 struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
index 7f59a0c..a9bb455 100644 (file)
@@ -112,7 +112,8 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
        if (buf->bytesused == video->queue.buf_used) {
                video->queue.buf_used = 0;
                buf->state = UVC_BUF_STATE_DONE;
-               uvcg_queue_next_buffer(&video->queue, buf);
+               list_del(&buf->queue);
+               uvcg_complete_buffer(&video->queue, buf);
                video->fid ^= UVC_STREAM_FID;
 
                video->payload_size = 0;
@@ -154,7 +155,7 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
        sg = sg_next(sg);
 
        for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
-               if (!len || !buf->sg)
+               if (!len || !buf->sg || !sg_dma_len(buf->sg))
                        break;
 
                sg_left = sg_dma_len(buf->sg) - buf->offset;
@@ -183,8 +184,9 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
                video->queue.buf_used = 0;
                buf->state = UVC_BUF_STATE_DONE;
                buf->offset = 0;
-               uvcg_queue_next_buffer(&video->queue, buf);
+               list_del(&buf->queue);
                video->fid ^= UVC_STREAM_FID;
+               ureq->last_buf = buf;
        }
 }
 
@@ -210,7 +212,8 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
        if (buf->bytesused == video->queue.buf_used) {
                video->queue.buf_used = 0;
                buf->state = UVC_BUF_STATE_DONE;
-               uvcg_queue_next_buffer(&video->queue, buf);
+               list_del(&buf->queue);
+               uvcg_complete_buffer(&video->queue, buf);
                video->fid ^= UVC_STREAM_FID;
        }
 }
@@ -264,6 +267,11 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
                uvcg_queue_cancel(queue, 0);
        }
 
+       if (ureq->last_buf) {
+               uvcg_complete_buffer(&video->queue, ureq->last_buf);
+               ureq->last_buf = NULL;
+       }
+
        spin_lock_irqsave(&video->req_lock, flags);
        list_add_tail(&req->list, &video->req_free);
        spin_unlock_irqrestore(&video->req_lock, flags);
@@ -332,6 +340,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
                video->ureq[i].req->complete = uvc_video_complete;
                video->ureq[i].req->context = &video->ureq[i];
                video->ureq[i].video = video;
+               video->ureq[i].last_buf = NULL;
 
                list_add_tail(&video->ureq[i].req->list, &video->req_free);
                /* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
index 6bcbad3..b62e452 100644 (file)
@@ -422,7 +422,7 @@ static struct usb_gadget_driver dbgp_driver = {
 
 static int __init dbgp_init(void)
 {
-       return usb_gadget_probe_driver(&dbgp_driver);
+       return usb_gadget_register_driver(&dbgp_driver);
 }
 
 static void __exit dbgp_exit(void)
index 0c01e74..7999059 100644 (file)
@@ -1873,7 +1873,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        else
                gadgetfs_driver.max_speed = USB_SPEED_FULL;
 
-       value = usb_gadget_probe_driver(&gadgetfs_driver);
+       value = usb_gadget_register_driver(&gadgetfs_driver);
        if (value != 0) {
                spin_lock_irq(&dev->lock);
                goto fail;
index e9440f7..2417400 100644 (file)
@@ -512,12 +512,12 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
        dev->state = STATE_DEV_REGISTERING;
        spin_unlock_irqrestore(&dev->lock, flags);
 
-       ret = usb_gadget_probe_driver(&dev->driver);
+       ret = usb_gadget_register_driver(&dev->driver);
 
        spin_lock_irqsave(&dev->lock, flags);
        if (ret) {
                dev_err(dev->dev,
-                       "fail, usb_gadget_probe_driver returned %d\n", ret);
+                       "fail, usb_gadget_register_driver returned %d\n", ret);
                dev->state = STATE_DEV_FAILED;
                goto out_unlock;
        }
index 85b1940..7886497 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/list.h>
+#include <linux/idr.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
 #include <linux/sched/task_stack.h>
 
 #include "trace.h"
 
+static DEFINE_IDA(gadget_id_numbers);
+
+static struct bus_type gadget_bus_type;
+
 /**
  * struct usb_udc - describes one usb device controller
  * @driver: the gadget driver pointer. For use by the class code
@@ -47,11 +52,9 @@ struct usb_udc {
 
 static struct class *udc_class;
 static LIST_HEAD(udc_list);
-static LIST_HEAD(gadget_driver_pending_list);
-static DEFINE_MUTEX(udc_lock);
 
-static int udc_bind_to_driver(struct usb_udc *udc,
-               struct usb_gadget_driver *driver);
+/* Protects udc_list, udc->driver, driver->is_bound, and related calls */
+static DEFINE_MUTEX(udc_lock);
 
 /* ------------------------------------------------------------------------- */
 
@@ -1238,38 +1241,16 @@ static void usb_udc_nop_release(struct device *dev)
        dev_vdbg(dev, "%s\n", __func__);
 }
 
-/* should be called with udc_lock held */
-static int check_pending_gadget_drivers(struct usb_udc *udc)
-{
-       struct usb_gadget_driver *driver;
-       int ret = 0;
-
-       list_for_each_entry(driver, &gadget_driver_pending_list, pending)
-               if (!driver->udc_name || strcmp(driver->udc_name,
-                                               dev_name(&udc->dev)) == 0) {
-                       ret = udc_bind_to_driver(udc, driver);
-                       if (ret != -EPROBE_DEFER)
-                               list_del_init(&driver->pending);
-                       break;
-               }
-
-       return ret;
-}
-
 /**
  * usb_initialize_gadget - initialize a gadget and its embedded struct device
  * @parent: the parent device to this udc. Usually the controller driver's
  * device.
  * @gadget: the gadget to be initialized.
  * @release: a gadget release function.
- *
- * Returns zero on success, negative errno otherwise.
- * Calls the gadget release function in the latter case.
  */
 void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
                void (*release)(struct device *dev))
 {
-       dev_set_name(&gadget->dev, "gadget");
        INIT_WORK(&gadget->work, usb_gadget_state_work);
        gadget->dev.parent = parent;
 
@@ -1279,6 +1260,7 @@ void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
                gadget->dev.release = usb_udc_nop_release;
 
        device_initialize(&gadget->dev);
+       gadget->dev.bus = &gadget_bus_type;
 }
 EXPORT_SYMBOL_GPL(usb_initialize_gadget);
 
@@ -1308,10 +1290,6 @@ int usb_add_gadget(struct usb_gadget *gadget)
        if (ret)
                goto err_put_udc;
 
-       ret = device_add(&gadget->dev);
-       if (ret)
-               goto err_put_udc;
-
        udc->gadget = gadget;
        gadget->udc = udc;
 
@@ -1319,6 +1297,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
 
        mutex_lock(&udc_lock);
        list_add_tail(&udc->list, &udc_list);
+       mutex_unlock(&udc_lock);
 
        ret = device_add(&udc->dev);
        if (ret)
@@ -1327,25 +1306,30 @@ int usb_add_gadget(struct usb_gadget *gadget)
        usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
        udc->vbus = true;
 
-       /* pick up one of pending gadget drivers */
-       ret = check_pending_gadget_drivers(udc);
-       if (ret)
+       ret = ida_alloc(&gadget_id_numbers, GFP_KERNEL);
+       if (ret < 0)
                goto err_del_udc;
+       gadget->id_number = ret;
+       dev_set_name(&gadget->dev, "gadget.%d", ret);
 
-       mutex_unlock(&udc_lock);
+       ret = device_add(&gadget->dev);
+       if (ret)
+               goto err_free_id;
 
        return 0;
 
+ err_free_id:
+       ida_free(&gadget_id_numbers, gadget->id_number);
+
  err_del_udc:
        flush_work(&gadget->work);
        device_del(&udc->dev);
 
  err_unlist_udc:
+       mutex_lock(&udc_lock);
        list_del(&udc->list);
        mutex_unlock(&udc_lock);
 
-       device_del(&gadget->dev);
-
  err_put_udc:
        put_device(&udc->dev);
 
@@ -1421,30 +1405,11 @@ int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
 }
 EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
 
-static void usb_gadget_remove_driver(struct usb_udc *udc)
-{
-       dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
-                       udc->driver->function);
-
-       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-
-       usb_gadget_disconnect(udc->gadget);
-       usb_gadget_disable_async_callbacks(udc);
-       if (udc->gadget->irq)
-               synchronize_irq(udc->gadget->irq);
-       udc->driver->unbind(udc->gadget);
-       usb_gadget_udc_stop(udc);
-
-       udc->driver = NULL;
-       udc->gadget->dev.driver = NULL;
-}
-
 /**
- * usb_del_gadget - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
+ * usb_del_gadget - deletes a gadget and unregisters its udc
+ * @gadget: the gadget to be deleted.
  *
- * This will call usb_gadget_unregister_driver() if
- * the @udc is still busy.
+ * This will unbind @gadget, if it is bound.
  * It will not do a final usb_put_gadget().
  */
 void usb_del_gadget(struct usb_gadget *gadget)
@@ -1458,25 +1423,19 @@ void usb_del_gadget(struct usb_gadget *gadget)
 
        mutex_lock(&udc_lock);
        list_del(&udc->list);
-
-       if (udc->driver) {
-               struct usb_gadget_driver *driver = udc->driver;
-
-               usb_gadget_remove_driver(udc);
-               list_add(&driver->pending, &gadget_driver_pending_list);
-       }
        mutex_unlock(&udc_lock);
 
        kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
        flush_work(&gadget->work);
-       device_unregister(&udc->dev);
        device_del(&gadget->dev);
+       ida_free(&gadget_id_numbers, gadget->id_number);
+       device_unregister(&udc->dev);
 }
 EXPORT_SYMBOL_GPL(usb_del_gadget);
 
 /**
- * usb_del_gadget_udc - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
+ * usb_del_gadget_udc - unregisters a gadget
+ * @gadget: the gadget to be unregistered.
  *
  * Calls usb_del_gadget() and does a final usb_put_gadget().
  */
@@ -1489,123 +1448,147 @@ EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
 
 /* ------------------------------------------------------------------------- */
 
-static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
+static int gadget_match_driver(struct device *dev, struct device_driver *drv)
 {
-       int ret;
+       struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+       struct usb_udc *udc = gadget->udc;
+       struct usb_gadget_driver *driver = container_of(drv,
+                       struct usb_gadget_driver, driver);
 
-       dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
-                       driver->function);
+       /* If the driver specifies a udc_name, it must match the UDC's name */
+       if (driver->udc_name &&
+                       strcmp(driver->udc_name, dev_name(&udc->dev)) != 0)
+               return 0;
+
+       /* If the driver is already bound to a gadget, it doesn't match */
+       if (driver->is_bound)
+               return 0;
+
+       /* Otherwise any gadget driver matches any UDC */
+       return 1;
+}
 
+static int gadget_bind_driver(struct device *dev)
+{
+       struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+       struct usb_udc *udc = gadget->udc;
+       struct usb_gadget_driver *driver = container_of(dev->driver,
+                       struct usb_gadget_driver, driver);
+       int ret = 0;
+
+       mutex_lock(&udc_lock);
+       if (driver->is_bound) {
+               mutex_unlock(&udc_lock);
+               return -ENXIO;          /* Driver binds to only one gadget */
+       }
+       driver->is_bound = true;
        udc->driver = driver;
-       udc->gadget->dev.driver = &driver->driver;
+       mutex_unlock(&udc_lock);
+
+       dev_dbg(&udc->dev, "binding gadget driver [%s]\n", driver->function);
 
        usb_gadget_udc_set_speed(udc, driver->max_speed);
 
+       mutex_lock(&udc_lock);
        ret = driver->bind(udc->gadget, driver);
        if (ret)
-               goto err1;
+               goto err_bind;
+
        ret = usb_gadget_udc_start(udc);
-       if (ret) {
-               driver->unbind(udc->gadget);
-               goto err1;
-       }
+       if (ret)
+               goto err_start;
        usb_gadget_enable_async_callbacks(udc);
        usb_udc_connect_control(udc);
+       mutex_unlock(&udc_lock);
 
        kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
        return 0;
-err1:
+
+ err_start:
+       driver->unbind(udc->gadget);
+
+ err_bind:
        if (ret != -EISNAM)
                dev_err(&udc->dev, "failed to start %s: %d\n",
-                       udc->driver->function, ret);
+                       driver->function, ret);
+
        udc->driver = NULL;
-       udc->gadget->dev.driver = NULL;
+       driver->is_bound = false;
+       mutex_unlock(&udc_lock);
+
        return ret;
 }
 
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
+static void gadget_unbind_driver(struct device *dev)
 {
-       struct usb_udc          *udc = NULL, *iter;
-       int                     ret = -ENODEV;
+       struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+       struct usb_udc *udc = gadget->udc;
+       struct usb_gadget_driver *driver = udc->driver;
+
+       dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
+
+       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+       mutex_lock(&udc_lock);
+       usb_gadget_disconnect(gadget);
+       usb_gadget_disable_async_callbacks(udc);
+       if (gadget->irq)
+               synchronize_irq(gadget->irq);
+       udc->driver->unbind(gadget);
+       usb_gadget_udc_stop(udc);
+
+       driver->is_bound = false;
+       udc->driver = NULL;
+       mutex_unlock(&udc_lock);
+}
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+               struct module *owner, const char *mod_name)
+{
+       int ret;
 
        if (!driver || !driver->bind || !driver->setup)
                return -EINVAL;
 
+       driver->driver.bus = &gadget_bus_type;
+       driver->driver.owner = owner;
+       driver->driver.mod_name = mod_name;
+       ret = driver_register(&driver->driver);
+       if (ret) {
+               pr_warn("%s: driver registration failed: %d\n",
+                               driver->function, ret);
+               return ret;
+       }
+
        mutex_lock(&udc_lock);
-       if (driver->udc_name) {
-               list_for_each_entry(iter, &udc_list, list) {
-                       ret = strcmp(driver->udc_name, dev_name(&iter->dev));
-                       if (ret)
-                               continue;
-                       udc = iter;
-                       break;
-               }
-               if (ret)
-                       ret = -ENODEV;
-               else if (udc->driver)
+       if (!driver->is_bound) {
+               if (driver->match_existing_only) {
+                       pr_warn("%s: couldn't find an available UDC or it's busy\n",
+                                       driver->function);
                        ret = -EBUSY;
-               else
-                       goto found;
-       } else {
-               list_for_each_entry(iter, &udc_list, list) {
-                       /* For now we take the first one */
-                       if (iter->driver)
-                               continue;
-                       udc = iter;
-                       goto found;
+               } else {
+                       pr_info("%s: couldn't find an available UDC\n",
+                                       driver->function);
+                       ret = 0;
                }
        }
-
-       if (!driver->match_existing_only) {
-               list_add_tail(&driver->pending, &gadget_driver_pending_list);
-               pr_info("couldn't find an available UDC - added [%s] to list of pending drivers\n",
-                       driver->function);
-               ret = 0;
-       }
-
        mutex_unlock(&udc_lock);
+
        if (ret)
-               pr_warn("couldn't find an available UDC or it's busy: %d\n", ret);
-       return ret;
-found:
-       ret = udc_bind_to_driver(udc, driver);
-       mutex_unlock(&udc_lock);
+               driver_unregister(&driver->driver);
        return ret;
 }
-EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+EXPORT_SYMBOL_GPL(usb_gadget_register_driver_owner);
 
 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 {
-       struct usb_udc          *udc = NULL;
-       int                     ret = -ENODEV;
-
        if (!driver || !driver->unbind)
                return -EINVAL;
 
-       mutex_lock(&udc_lock);
-       list_for_each_entry(udc, &udc_list, list) {
-               if (udc->driver == driver) {
-                       usb_gadget_remove_driver(udc);
-                       usb_gadget_set_state(udc->gadget,
-                                            USB_STATE_NOTATTACHED);
-
-                       /* Maybe there is someone waiting for this UDC? */
-                       check_pending_gadget_drivers(udc);
-                       /*
-                        * For now we ignore bind errors as probably it's
-                        * not a valid reason to fail other's gadget unbind
-                        */
-                       ret = 0;
-                       break;
-               }
-       }
-
-       if (ret) {
-               list_del(&driver->pending);
-               ret = 0;
-       }
-       mutex_unlock(&udc_lock);
-       return ret;
+       driver_unregister(&driver->driver);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
 
@@ -1757,8 +1740,17 @@ static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
        return 0;
 }
 
+static struct bus_type gadget_bus_type = {
+       .name = "gadget",
+       .probe = gadget_bind_driver,
+       .remove = gadget_unbind_driver,
+       .match = gadget_match_driver,
+};
+
 static int __init usb_udc_init(void)
 {
+       int rc;
+
        udc_class = class_create(THIS_MODULE, "udc");
        if (IS_ERR(udc_class)) {
                pr_err("failed to create udc class --> %ld\n",
@@ -1767,12 +1759,17 @@ static int __init usb_udc_init(void)
        }
 
        udc_class->dev_uevent = usb_udc_uevent;
-       return 0;
+
+       rc = bus_register(&gadget_bus_type);
+       if (rc)
+               class_destroy(udc_class);
+       return rc;
 }
 subsys_initcall(usb_udc_init);
 
 static void __exit usb_udc_exit(void)
 {
+       bus_unregister(&gadget_bus_type);
        class_destroy(udc_class);
 }
 module_exit(usb_udc_exit);
index 6a88846..c97cd4b 100644 (file)
@@ -71,7 +71,7 @@ static ushort dma_ep = 1;
 module_param(dma_ep, ushort, 0644);
 
 /*
- * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
+ * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
  *     mode 0 == Slow DREQ mode
  *     mode 1 == Fast DREQ mode
  *     mode 2 == Burst mode
@@ -97,7 +97,7 @@ module_param(fifo_mode, ushort, 0644);
 /*
  * enable_suspend: When enabled, the driver will respond to
  * USB suspend requests by powering down the NET2272.  Otherwise,
- * USB suspend requests will be ignored.  This is acceptible for
+ * USB suspend requests will be ignored.  This is acceptable for
  * self-powered devices.  For bus powered devices set this to 1.
  */
 static ushort enable_suspend = 0;
@@ -288,7 +288,7 @@ static void net2272_ep_reset(struct net2272_ep *ep)
                          | (1 << LOCAL_OUT_ZLP)
                          | (1 << BUFFER_FLUSH));
 
-       /* fifo size is handled seperately */
+       /* fifo size is handled separately */
 }
 
 static int net2272_disable(struct usb_ep *_ep)
index 051d024..d6a6863 100644 (file)
@@ -932,19 +932,11 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
 static inline void
 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
 {
-       struct net2280_dma      *end;
-       dma_addr_t              tmp;
-
        /* swap new dummy for old, link; fill and maybe activate */
-       end = ep->dummy;
-       ep->dummy = req->td;
-       req->td = end;
-
-       tmp = ep->td_dma;
-       ep->td_dma = req->td_dma;
-       req->td_dma = tmp;
+       swap(ep->dummy, req->td);
+       swap(ep->td_dma, req->td_dma);
 
-       end->dmadesc = cpu_to_le32 (ep->td_dma);
+       req->td->dmadesc = cpu_to_le32 (ep->td_dma);
 
        fill_dma_desc(ep, req, valid);
 }
index 5096d24..61cabb9 100644 (file)
@@ -1470,7 +1470,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
                        if (!udc->ep0_in) {
                                stat = 0;
                                /* read next OUT packet of request, maybe
-                                * reactiviting the fifo; stall on errors.
+                                * reactivating the fifo; stall on errors.
                                 */
                                stat = read_fifo(ep0, req);
                                if (!req || stat < 0) {
@@ -2609,6 +2609,8 @@ static void omap_udc_release(struct device *dev)
        if (udc->dc_clk) {
                if (udc->clk_requested)
                        omap_udc_enable_clock(0);
+               clk_unprepare(udc->hhc_clk);
+               clk_unprepare(udc->dc_clk);
                clk_put(udc->hhc_clk);
                clk_put(udc->dc_clk);
        }
@@ -2773,8 +2775,8 @@ static int omap_udc_probe(struct platform_device *pdev)
                hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
                BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
                /* can't use omap_udc_enable_clock yet */
-               clk_enable(dc_clk);
-               clk_enable(hhc_clk);
+               clk_prepare_enable(dc_clk);
+               clk_prepare_enable(hhc_clk);
                udelay(100);
        }
 
@@ -2783,8 +2785,8 @@ static int omap_udc_probe(struct platform_device *pdev)
                hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
                BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
                /* can't use omap_udc_enable_clock yet */
-               clk_enable(dc_clk);
-               clk_enable(hhc_clk);
+               clk_prepare_enable(dc_clk);
+               clk_prepare_enable(hhc_clk);
                udelay(100);
        }
 
@@ -2932,8 +2934,8 @@ cleanup0:
                usb_put_phy(xceiv);
 
        if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
-               clk_disable(hhc_clk);
-               clk_disable(dc_clk);
+               clk_disable_unprepare(hhc_clk);
+               clk_disable_unprepare(dc_clk);
                clk_put(hhc_clk);
                clk_put(dc_clk);
        }
index 6c414c9..c593fc3 100644 (file)
 #include <linux/usb/gadget.h>
 #include <linux/usb/otg.h>
 
-#ifdef CONFIG_ARCH_LUBBOCK
-#include <mach/lubbock.h>
-#endif
-
 #define UDCCR   0x0000 /* UDC Control Register */
 #define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
 #define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
@@ -1578,18 +1574,15 @@ lubbock_vbus_irq(int irq, void *_dev)
        int                     vbus;
 
        dev->stats.irqs++;
-       switch (irq) {
-       case LUBBOCK_USB_IRQ:
+       if (irq == dev->usb_irq) {
                vbus = 1;
-               disable_irq(LUBBOCK_USB_IRQ);
-               enable_irq(LUBBOCK_USB_DISC_IRQ);
-               break;
-       case LUBBOCK_USB_DISC_IRQ:
+               disable_irq(dev->usb_irq);
+               enable_irq(dev->usb_disc_irq);
+       } else if (irq == dev->usb_disc_irq) {
                vbus = 0;
-               disable_irq(LUBBOCK_USB_DISC_IRQ);
-               enable_irq(LUBBOCK_USB_IRQ);
-               break;
-       default:
+               disable_irq(dev->usb_disc_irq);
+               enable_irq(dev->usb_irq);
+       } else {
                return IRQ_NONE;
        }
 
@@ -2422,20 +2415,28 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
 
 #ifdef CONFIG_ARCH_LUBBOCK
        if (machine_is_lubbock()) {
-               retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_DISC_IRQ,
+               dev->usb_irq = platform_get_irq(pdev, 1);
+               if (dev->usb_irq < 0)
+                       return dev->usb_irq;
+
+               dev->usb_disc_irq = platform_get_irq(pdev, 2);
+               if (dev->usb_disc_irq < 0)
+                       return dev->usb_disc_irq;
+
+               retval = devm_request_irq(&pdev->dev, dev->usb_disc_irq,
                                          lubbock_vbus_irq, 0, driver_name,
                                          dev);
                if (retval != 0) {
                        pr_err("%s: can't get irq %i, err %d\n",
-                               driver_name, LUBBOCK_USB_DISC_IRQ, retval);
+                               driver_name, dev->usb_disc_irq, retval);
                        goto err;
                }
-               retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_IRQ,
+               retval = devm_request_irq(&pdev->dev, dev->usb_irq,
                                          lubbock_vbus_irq, 0, driver_name,
                                          dev);
                if (retval != 0) {
                        pr_err("%s: can't get irq %i, err %d\n",
-                               driver_name, LUBBOCK_USB_IRQ, retval);
+                               driver_name, dev->usb_irq, retval);
                        goto err;
                }
        } else
index aa4b68f..6ab6047 100644 (file)
@@ -117,16 +117,13 @@ struct pxa25x_udc {
        u64                                     dma_mask;
        struct pxa25x_ep                        ep [PXA_UDC_NUM_ENDPOINTS];
        void __iomem                            *regs;
+       int                                     usb_irq;
+       int                                     usb_disc_irq;
 };
 #define to_pxa25x(g)   (container_of((g), struct pxa25x_udc, gadget))
 
 /*-------------------------------------------------------------------------*/
 
-#ifdef CONFIG_ARCH_LUBBOCK
-#include <mach/lubbock.h>
-/* lubbock can also report usb connect/disconnect irqs */
-#endif
-
 static struct pxa25x_udc *the_controller;
 
 /*-------------------------------------------------------------------------*/
index 0a6bc18..31bf79c 100644 (file)
@@ -326,7 +326,7 @@ struct udc_usb_ep {
  * @addr: usb endpoint number
  * @config: configuration in which this endpoint is active
  * @interface: interface in which this endpoint is active
- * @alternate: altsetting in which this endpoitn is active
+ * @alternate: altsetting in which this endpoint is active
  * @fifo_size: max packet size in the endpoint fifo
  * @type: endpoint type (bulk, iso, int, ...)
  * @udccsr_value: save register of UDCCSR0 for suspend/resume
index bf803e0..4b7eb77 100644 (file)
@@ -126,7 +126,7 @@ struct s3c_hsudc_req {
 /**
  * struct s3c_hsudc - Driver's abstraction of the device controller.
  * @gadget: Instance of usb_gadget which is referenced by gadget driver.
- * @driver: Reference to currenty active gadget driver.
+ * @driver: Reference to currently active gadget driver.
  * @dev: The device reference used by probe function.
  * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed).
  * @regs: Remapped base address of controller's register space.
@@ -633,7 +633,7 @@ static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc)
 }
 
 /** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt.
- * @hsudc: Device controller on which endpoint 0 interrupt has occured.
+ * @hsudc: Device controller on which endpoint 0 interrupt has occurred.
  *
  * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur
  * when a stall handshake is sent to host or data is sent/received on
index d9c406b..6d31ccf 100644 (file)
@@ -1434,7 +1434,7 @@ __tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
                return 0;
        }
 
-       /* Halt DMA for this endpiont. */
+       /* Halt DMA for this endpoint. */
        if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
                ep_pause(xudc, ep->index);
                ep_wait_for_inactive(xudc, ep->index);
@@ -3423,7 +3423,7 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
        }
 
        /*
-        * Compliacne suite appears to be violating polling LFPS tBurst max
+        * Compliance suite appears to be violating polling LFPS tBurst max
         * of 1.4us.  Send 1.45us instead.
         */
        val = xudc_readl(xudc, SSPX_CORE_CNT32);
index 428c755..4827e3c 100644 (file)
@@ -632,7 +632,7 @@ top:
                dev_dbg(udc->dev, "read %s, %d bytes%s req %p %d/%d\n",
                        ep->ep_usb.name, count, is_short ? "/S" : "", req,
                        req->usb_req.actual, req->usb_req.length);
-               bufferspace -= count;
+
                /* Completion */
                if ((req->usb_req.actual == req->usb_req.length) || is_short) {
                        if (udc->dma_enabled && req->usb_req.length)
index 7f4a03e..8c45bc1 100644 (file)
@@ -61,11 +61,6 @@ static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
        __raw_writel(val, base + reg);
 }
 
-static inline u32 ehci_read(void __iomem *base, u32 reg)
-{
-       return __raw_readl(base + reg);
-}
-
 /* configure so an HC device and id are always provided */
 /* always called with process context; sleeping is OK */
 
index 1115431..f343967 100644 (file)
@@ -518,6 +518,7 @@ static struct platform_driver ehci_platform_driver = {
                .pm     = pm_ptr(&ehci_platform_pm_ops),
                .of_match_table = vt8500_ehci_ids,
                .acpi_match_table = ACPI_PTR(ehci_acpi_match),
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        }
 };
 
index a2a5c29..1163af6 100644 (file)
@@ -645,7 +645,7 @@ qh_urb_transaction (
                token |= (1 /* "in" */ << 8);
        /* else it's already initted to "out" pid (0 << 8) */
 
-       maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+       maxpacket = usb_maxpacket(urb->dev, urb->pipe);
 
        /*
         * buffer gets wrapped in one or more qtds;
@@ -1218,7 +1218,7 @@ static int ehci_submit_single_step_set_feature(
 
        token |= (1 /* "in" */ << 8);  /*This is IN stage*/
 
-       maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0);
+       maxpacket = usb_maxpacket(urb->dev, urb->pipe);
 
        qtd_fill(ehci, qtd, buf, len, token, maxpacket);
 
index 67a6ee8..3d78937 100644 (file)
@@ -32,6 +32,8 @@
  * There are cases when the host controller fails to enable the port due to,
  * for example, insufficient power that can be supplied to the device from
  * the USB bus. In those cases, the messages printed here are not helpful.
+ *
+ * Return: Always return 0
  */
 static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
 {
@@ -46,11 +48,9 @@ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
                dev_warn(hcd->self.controller,
                        "Maybe your device is not a high speed device?\n");
                dev_warn(hcd->self.controller,
-                       "The USB host controller does not support full speed "
-                       "nor low speed devices\n");
+                       "The USB host controller does not support full speed nor low speed devices\n");
                dev_warn(hcd->self.controller,
-                       "You can reconfigure the host controller to have "
-                       "full speed support\n");
+                       "You can reconfigure the host controller to have full speed support\n");
        }
 
        return 0;
@@ -112,6 +112,8 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
  * host controller. Because the Xilinx USB host controller can be configured
  * as HS only or HS/FS only, it checks the configuration in the device tree
  * entry, and sets an appropriate value for hcd->has_tt.
+ *
+ * Return: zero on success, negative error code otherwise
  */
 static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
 {
@@ -196,6 +198,8 @@ err_irq:
  *
  * Remove the hcd structure, and release resources that has been requested
  * during probe.
+ *
+ * Return: Always return 0
  */
 static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
 {
index a8e1048..2ba09c3 100644 (file)
@@ -408,8 +408,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
                        size++;
                else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
                         && (urb->transfer_buffer_length
-                            % usb_maxpacket(urb->dev, pipe,
-                                            usb_pipeout(pipe))) != 0)
+                            % usb_maxpacket(urb->dev, pipe)) != 0)
                        size++;
                break;
        case PIPE_ISOCHRONOUS:
index c3fd375..f8c111e 100644 (file)
@@ -2596,7 +2596,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
                token |= (1 /* "in" */ << 8);
        /* else it's already initted to "out" pid (0 << 8) */
 
-       maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+       maxpacket = usb_maxpacket(urb->dev, urb->pipe);
 
        /*
         * buffer gets wrapped in one or more qtds;
index 8835f6b..4f564d7 100644 (file)
@@ -726,7 +726,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
                INIT_LIST_HEAD(&ep->schedule);
                ep->udev = udev;
                ep->epnum = epnum;
-               ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+               ep->maxpacket = usb_maxpacket(udev, urb->pipe);
                usb_settoggle(udev, epnum, is_out, 0);
 
                if (type == PIPE_CONTROL) {
@@ -757,8 +757,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
                        ep->load = usb_calc_bus_time(udev->speed,
                                                     !is_out,
                                                     (type == PIPE_ISOCHRONOUS),
-                                                    usb_maxpacket(udev, pipe,
-                                                                  is_out)) /
+                                                    usb_maxpacket(udev, pipe)) /
                            1000;
                }
                hep->hcpriv = ep;
@@ -1541,10 +1540,12 @@ static int isp116x_remove(struct platform_device *pdev)
 
        iounmap(isp116x->data_reg);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       release_mem_region(res->start, 2);
+       if (res)
+               release_mem_region(res->start, 2);
        iounmap(isp116x->addr_reg);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, 2);
+       if (res)
+               release_mem_region(res->start, 2);
 
        usb_put_hcd(hcd);
        return 0;
index d8610ce..0e14d1d 100644 (file)
@@ -1279,7 +1279,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
                ep->udev = usb_get_dev(udev);
                ep->hep = hep;
                ep->epnum = epnum;
-               ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+               ep->maxpacket = usb_maxpacket(udev, urb->pipe);
                ep->ptd_offset = -EINVAL;
                ep->ptd_index = -EINVAL;
                usb_settoggle(udev, epnum, is_out, 0);
@@ -1299,8 +1299,8 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
                        ep->interval = urb->interval;
                        ep->branch = PERIODIC_SIZE;
                        ep->load = usb_calc_bus_time(udev->speed, !is_out,
-                                                    (type == PIPE_ISOCHRONOUS),
-                                                    usb_maxpacket(udev, pipe, is_out)) / 1000;
+                                                    type == PIPE_ISOCHRONOUS,
+                                                    usb_maxpacket(udev, pipe)) / 1000;
                        break;
                }
                hep->hcpriv = ep;
index 99a5523..502a3ac 100644 (file)
@@ -546,7 +546,7 @@ max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
                return MAX3421_HXFR_BULK_OUT(epnum);
        }
 
-       max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+       max_packet = usb_maxpacket(urb->dev, urb->pipe);
 
        if (max_packet > MAX3421_FIFO_SIZE) {
                /*
@@ -952,7 +952,7 @@ max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
         * USB 2.0 Section 5.3.2 Pipes: packets must be full size
         * except for last one.
         */
-       max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
+       max_packet = usb_maxpacket(urb->dev, urb->pipe);
        if (max_packet > MAX3421_FIFO_SIZE) {
                /*
                 * We do not support isochronous transfers at this
@@ -998,7 +998,7 @@ max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
                 * max_packet as an indicator that the end of the
                 * packet has been reached).
                 */
-               u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+               u32 max_packet = usb_maxpacket(urb->dev, urb->pipe);
 
                if (max3421_hcd->curr_len == max_packet)
                        return 0;
index 666b1c6..c4c821c 100644 (file)
@@ -181,8 +181,7 @@ static int ohci_urb_enqueue (
                                size++;
                        else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
                                && (urb->transfer_buffer_length
-                                       % usb_maxpacket (urb->dev, pipe,
-                                               usb_pipeout (pipe))) == 0)
+                                       % usb_maxpacket(urb->dev, pipe)) == 0)
                                size++;
                        break;
                case PIPE_ISOCHRONOUS: /* number of packets from URB */
index 069791d..f5bc9c8 100644 (file)
@@ -259,6 +259,10 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
                goto err_put_hcd;
        }
 
+       retval = clk_prepare(priv->usb_host_ck);
+       if (retval)
+               goto err_put_host_ck;
+
        if (!cpu_is_omap15xx())
                priv->usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
        else
@@ -266,13 +270,17 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
 
        if (IS_ERR(priv->usb_dc_ck)) {
                retval = PTR_ERR(priv->usb_dc_ck);
-               goto err_put_host_ck;
+               goto err_unprepare_host_ck;
        }
 
+       retval = clk_prepare(priv->usb_dc_ck);
+       if (retval)
+               goto err_put_dc_ck;
+
        if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
                dev_dbg(&pdev->dev, "request_mem_region failed\n");
                retval = -EBUSY;
-               goto err_put_dc_ck;
+               goto err_unprepare_dc_ck;
        }
 
        hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
@@ -297,8 +305,12 @@ err3:
        iounmap(hcd->regs);
 err2:
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_unprepare_dc_ck:
+       clk_unprepare(priv->usb_dc_ck);
 err_put_dc_ck:
        clk_put(priv->usb_dc_ck);
+err_unprepare_host_ck:
+       clk_unprepare(priv->usb_host_ck);
 err_put_host_ck:
        clk_put(priv->usb_host_ck);
 err_put_hcd:
@@ -333,7 +345,9 @@ static int ohci_hcd_omap_remove(struct platform_device *pdev)
        }
        iounmap(hcd->regs);
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+       clk_unprepare(priv->usb_dc_ck);
        clk_put(priv->usb_dc_ck);
+       clk_unprepare(priv->usb_host_ck);
        clk_put(priv->usb_host_ck);
        usb_put_hcd(hcd);
        return 0;
index 4a8456f..47dfbfe 100644 (file)
@@ -334,6 +334,7 @@ static struct platform_driver ohci_platform_driver = {
                .name   = "ohci-platform",
                .pm     = &ohci_platform_pm_ops,
                .of_match_table = ohci_platform_ids,
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        }
 };
 
index 45f7cce..1960b8d 100644 (file)
@@ -19,9 +19,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 
-#include <asm/prom.h>
-
-
 static int
 ohci_ppc_of_start(struct usb_hcd *hcd)
 {
index 54aa5c7..ab4f610 100644 (file)
@@ -36,8 +36,7 @@
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
 #include <linux/usb/otg.h>
-
-#include <mach/hardware.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include "ohci.h"
 
index b741670..3a44131 100644 (file)
@@ -1685,7 +1685,7 @@ static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
                token |= (1 /* "in" */ << 8);
        /* else it's already initted to "out" pid (0 << 8) */
 
-       maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+       maxpacket = usb_maxpacket(urb->dev, urb->pipe);
 
        /*
         * buffer gets wrapped in one or more qtds;
@@ -1796,7 +1796,7 @@ static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
 
        is_input = usb_pipein(urb->pipe);
        type = usb_pipetype(urb->pipe);
-       maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+       maxp = usb_maxpacket(urb->dev, urb->pipe);
 
        /* Compute interrupt scheduling parameters just once, and save.
         * - allowing for high bandwidth, how many nsec/uframe are used?
@@ -3909,8 +3909,10 @@ static int oxu_bus_suspend(struct usb_hcd *hcd)
                }
        }
 
+       spin_unlock_irq(&oxu->lock);
        /* turn off now-idle HC */
        del_timer_sync(&oxu->watchdog);
+       spin_lock_irq(&oxu->lock);
        ehci_halt(oxu);
        hcd->state = HC_STATE_SUSPENDED;
 
@@ -4223,13 +4225,9 @@ static int oxu_drv_probe(struct platform_device *pdev)
        /*
         * Get the platform resources
         */
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(&pdev->dev,
-                       "no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
-               return -ENODEV;
-       }
-       irq = res->start;
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
        dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 63719cd..abb88dd 100644 (file)
@@ -1867,8 +1867,7 @@ static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
        td->pipe = hep->hcpriv;
        td->urb = urb;
        td->address = get_urb_to_r8a66597_addr(r8a66597, urb);
-       td->maxpacket = usb_maxpacket(urb->dev, urb->pipe,
-                                     !usb_pipein(urb->pipe));
+       td->maxpacket = usb_maxpacket(urb->dev, urb->pipe);
        if (usb_pipecontrol(urb->pipe))
                td->type = USB_PID_SETUP;
        else if (usb_pipein(urb->pipe))
index 8562373..d206bd9 100644 (file)
@@ -842,7 +842,7 @@ static int sl811h_urb_enqueue(
                INIT_LIST_HEAD(&ep->schedule);
                ep->udev = udev;
                ep->epnum = epnum;
-               ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+               ep->maxpacket = usb_maxpacket(udev, urb->pipe);
                ep->defctrl = SL11H_HCTLMASK_ARM | SL11H_HCTLMASK_ENABLE;
                usb_settoggle(udev, epnum, is_out, 0);
 
@@ -878,8 +878,8 @@ static int sl811h_urb_enqueue(
                        if (type == PIPE_ISOCHRONOUS)
                                ep->defctrl |= SL11H_HCTLMASK_ISOCH;
                        ep->load = usb_calc_bus_time(udev->speed, !is_out,
-                               (type == PIPE_ISOCHRONOUS),
-                               usb_maxpacket(udev, pipe, is_out))
+                                                    type == PIPE_ISOCHRONOUS,
+                                                    usb_maxpacket(udev, pipe))
                                        / 1000;
                        break;
                }
index f65f1ba..c54f2bc 100644 (file)
@@ -707,6 +707,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
                                u16 test_mode, u16 wIndex, unsigned long *flags)
        __must_hold(&xhci->lock)
 {
+       struct usb_hcd *usb3_hcd = xhci_get_usb3_hcd(xhci);
        int i, retval;
 
        /* Disable all Device Slots */
@@ -727,7 +728,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
        xhci_dbg(xhci, "Disable all port (PP = 0)\n");
        /* Power off USB3 ports*/
        for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
-               xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags);
+               xhci_set_port_power(xhci, usb3_hcd, i, false, flags);
        /* Power off USB2 ports*/
        for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
                xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
index bbb27ee..8c19e15 100644 (file)
@@ -782,14 +782,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
 
 /***************** Device context manipulation *************************/
 
-static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
-               struct xhci_virt_ep *ep)
-{
-       timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
-                   0);
-       ep->xhci = xhci;
-}
-
 static void xhci_free_tt_info(struct xhci_hcd *xhci,
                struct xhci_virt_device *virt_dev,
                int slot_id)
@@ -994,11 +986,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
        xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
                        (unsigned long long)dev->in_ctx->dma);
 
-       /* Initialize the cancellation list and watchdog timers for each ep */
+       /* Initialize the cancellation and bandwidth list for each ep */
        for (i = 0; i < 31; i++) {
                dev->eps[i].ep_index = i;
                dev->eps[i].vdev = dev;
-               xhci_init_endpoint_timer(xhci, &dev->eps[i]);
+               dev->eps[i].xhci = xhci;
                INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
                INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
        }
@@ -1072,7 +1064,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
        struct usb_hcd *hcd;
 
        if (udev->speed >= USB_SPEED_SUPER)
-               hcd = xhci->shared_hcd;
+               hcd = xhci_get_usb3_hcd(xhci);
        else
                hcd = xhci->main_hcd;
 
@@ -2362,10 +2354,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
                xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
        }
 
-       /*
-        * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
-        * Not sure how the USB core will handle a hub with no ports...
-        */
+       if (!xhci->usb2_rhub.num_ports)
+               xhci_info(xhci, "USB2 root hub has no ports\n");
+
+       if (!xhci->usb3_rhub.num_ports)
+               xhci_info(xhci, "USB3 root hub has no ports\n");
 
        xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
        xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
index d7e0e6e..fac9492 100644 (file)
@@ -59,6 +59,7 @@
 #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI            0x9a13
 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI          0x464e
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI        0x51ed
 
 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI                  0x1639
@@ -129,8 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                                pdev->revision == 0x0) {
                        xhci->quirks |= XHCI_RESET_EP_QUIRK;
                        xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
-                               "QUIRK: Fresco Logic xHC needs configure"
-                               " endpoint cmd after reset endpoint");
+                               "XHCI_RESET_EP_QUIRK for this evaluation HW is deprecated");
                }
                if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
                                pdev->revision == 0x4) {
@@ -268,6 +268,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
             pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
index 649ffd8..0448558 100644 (file)
@@ -180,7 +180,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
        struct device           *sysdev, *tmpdev;
        struct xhci_hcd         *xhci;
        struct resource         *res;
-       struct usb_hcd          *hcd;
+       struct usb_hcd          *hcd, *usb3_hcd;
        int                     ret;
        int                     irq;
        struct xhci_plat_priv   *priv = NULL;
@@ -245,6 +245,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
 
        xhci = hcd_to_xhci(hcd);
 
+       xhci->allow_single_roothub = 1;
+
        /*
         * Not all platforms have clks so it is not an error if the
         * clock do not exist.
@@ -283,12 +285,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
        device_set_wakeup_capable(&pdev->dev, true);
 
        xhci->main_hcd = hcd;
-       xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
-                       dev_name(&pdev->dev), hcd);
-       if (!xhci->shared_hcd) {
-               ret = -ENOMEM;
-               goto disable_clk;
-       }
 
        /* imod_interval is the interrupt moderation value in nanoseconds. */
        xhci->imod_interval = 40000;
@@ -313,16 +309,16 @@ static int xhci_plat_probe(struct platform_device *pdev)
        if (IS_ERR(hcd->usb_phy)) {
                ret = PTR_ERR(hcd->usb_phy);
                if (ret == -EPROBE_DEFER)
-                       goto put_usb3_hcd;
+                       goto disable_clk;
                hcd->usb_phy = NULL;
        } else {
                ret = usb_phy_init(hcd->usb_phy);
                if (ret)
-                       goto put_usb3_hcd;
+                       goto disable_clk;
        }
 
        hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
-       xhci->shared_hcd->tpl_support = hcd->tpl_support;
+
        if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
                hcd->skip_phy_initialization = 1;
 
@@ -333,12 +329,26 @@ static int xhci_plat_probe(struct platform_device *pdev)
        if (ret)
                goto disable_usb_phy;
 
-       if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
-               xhci->shared_hcd->can_do_streams = 1;
+       if (!xhci_has_one_roothub(xhci)) {
+               xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+                                                   dev_name(&pdev->dev), hcd);
+               if (!xhci->shared_hcd) {
+                       ret = -ENOMEM;
+                       goto dealloc_usb2_hcd;
+               }
 
-       ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
-       if (ret)
-               goto dealloc_usb2_hcd;
+               xhci->shared_hcd->tpl_support = hcd->tpl_support;
+       }
+
+       usb3_hcd = xhci_get_usb3_hcd(xhci);
+       if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4)
+               usb3_hcd->can_do_streams = 1;
+
+       if (xhci->shared_hcd) {
+               ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+               if (ret)
+                       goto put_usb3_hcd;
+       }
 
        device_enable_async_suspend(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
@@ -352,15 +362,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
        return 0;
 
 
+put_usb3_hcd:
+       usb_put_hcd(xhci->shared_hcd);
+
 dealloc_usb2_hcd:
        usb_remove_hcd(hcd);
 
 disable_usb_phy:
        usb_phy_shutdown(hcd->usb_phy);
 
-put_usb3_hcd:
-       usb_put_hcd(xhci->shared_hcd);
-
 disable_clk:
        clk_disable_unprepare(xhci->clk);
 
index f970799..46d0b9a 100644 (file)
@@ -740,14 +740,6 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
        }
 }
 
-static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
-               struct xhci_virt_ep *ep)
-{
-       ep->ep_state &= ~EP_STOP_CMD_PENDING;
-       /* Can't del_timer_sync in interrupt */
-       del_timer(&ep->stop_cmd_timer);
-}
-
 /*
  * Must be called with xhci->lock held in interrupt context,
  * releases and re-acquires xhci->lock
@@ -1122,18 +1114,17 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
                                                          reset_type);
                        if (err)
                                break;
-                       xhci_stop_watchdog_timer_in_irq(xhci, ep);
+                       ep->ep_state &= ~EP_STOP_CMD_PENDING;
                        return;
                case EP_STATE_RUNNING:
                        /* Race, HW handled stop ep cmd before ep was running */
                        xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
 
                        command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
-                       if (!command)
-                               xhci_stop_watchdog_timer_in_irq(xhci, ep);
-
-                       mod_timer(&ep->stop_cmd_timer,
-                                 jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
+                       if (!command) {
+                               ep->ep_state &= ~EP_STOP_CMD_PENDING;
+                               return;
+                       }
                        xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
                        xhci_ring_cmd_db(xhci);
 
@@ -1142,9 +1133,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
                        break;
                }
        }
+
        /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
        xhci_invalidate_cancelled_tds(ep);
-       xhci_stop_watchdog_timer_in_irq(xhci, ep);
+       ep->ep_state &= ~EP_STOP_CMD_PENDING;
 
        /* Otherwise ring the doorbell(s) to restart queued transfers */
        xhci_giveback_invalidated_tds(ep);
@@ -1248,61 +1240,6 @@ void xhci_hc_died(struct xhci_hcd *xhci)
                usb_hc_died(xhci_to_hcd(xhci));
 }
 
-/* Watchdog timer function for when a stop endpoint command fails to complete.
- * In this case, we assume the host controller is broken or dying or dead.  The
- * host may still be completing some other events, so we have to be careful to
- * let the event ring handler and the URB dequeueing/enqueueing functions know
- * through xhci->state.
- *
- * The timer may also fire if the host takes a very long time to respond to the
- * command, and the stop endpoint command completion handler cannot delete the
- * timer before the timer function is called.  Another endpoint cancellation may
- * sneak in before the timer function can grab the lock, and that may queue
- * another stop endpoint command and add the timer back.  So we cannot use a
- * simple flag to say whether there is a pending stop endpoint command for a
- * particular endpoint.
- *
- * Instead we use a combination of that flag and checking if a new timer is
- * pending.
- */
-void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
-{
-       struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
-       struct xhci_hcd *xhci = ep->xhci;
-       unsigned long flags;
-       u32 usbsts;
-       char str[XHCI_MSG_MAX];
-
-       spin_lock_irqsave(&xhci->lock, flags);
-
-       /* bail out if cmd completed but raced with stop ep watchdog timer.*/
-       if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
-           timer_pending(&ep->stop_cmd_timer)) {
-               spin_unlock_irqrestore(&xhci->lock, flags);
-               xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
-               return;
-       }
-       usbsts = readl(&xhci->op_regs->status);
-
-       xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
-       xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
-
-       ep->ep_state &= ~EP_STOP_CMD_PENDING;
-
-       xhci_halt(xhci);
-
-       /*
-        * handle a stop endpoint cmd timeout as if host died (-ENODEV).
-        * In the future we could distinguish between -ENODEV and -ETIMEDOUT
-        * and try to recover a -ETIMEDOUT with a host controller reset
-        */
-       xhci_hc_died(xhci);
-
-       spin_unlock_irqrestore(&xhci->lock, flags);
-       xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-                       "xHCI host controller is dead.");
-}
-
 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
                struct xhci_virt_device *dev,
                struct xhci_ring *ep_ring,
@@ -1489,8 +1426,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
        /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
        xhci_invalidate_cancelled_tds(ep);
 
-       if (xhci->quirks & XHCI_RESET_EP_QUIRK)
-               xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
        /* Clear our internal halted state */
        ep->ep_state &= ~EP_HALTED;
 
@@ -1534,17 +1469,13 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
        struct xhci_input_control_ctx *ctrl_ctx;
        struct xhci_ep_ctx *ep_ctx;
        unsigned int ep_index;
-       unsigned int ep_state;
-       u32 add_flags, drop_flags;
+       u32 add_flags;
 
        /*
-        * Configure endpoint commands can come from the USB core
-        * configuration or alt setting changes, or because the HW
-        * needed an extra configure endpoint command after a reset
-        * endpoint command or streams were being configured.
-        * If the command was for a halted endpoint, the xHCI driver
-        * is not waiting on the configure endpoint command.
+        * Configure endpoint commands can come from the USB core configuration
+        * or alt setting changes, or when streams were being configured.
         */
+
        virt_dev = xhci->devs[slot_id];
        if (!virt_dev)
                return;
@@ -1555,34 +1486,13 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
        }
 
        add_flags = le32_to_cpu(ctrl_ctx->add_flags);
-       drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+
        /* Input ctx add_flags are the endpoint index plus one */
        ep_index = xhci_last_valid_endpoint(add_flags) - 1;
 
        ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
        trace_xhci_handle_cmd_config_ep(ep_ctx);
 
-       /* A usb_set_interface() call directly after clearing a halted
-        * condition may race on this quirky hardware.  Not worth
-        * worrying about, since this is prototype hardware.  Not sure
-        * if this will work for streams, but streams support was
-        * untested on this prototype.
-        */
-       if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
-                       ep_index != (unsigned int) -1 &&
-                       add_flags - SLOT_FLAG == drop_flags) {
-               ep_state = virt_dev->eps[ep_index].ep_state;
-               if (!(ep_state & EP_HALTED))
-                       return;
-               xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
-                               "Completed config ep cmd - "
-                               "last ep index = %d, state = %d",
-                               ep_index, ep_state);
-               /* Clear internal halted state and restart ring(s) */
-               virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
-               ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
-               return;
-       }
        return;
 }
 
@@ -1650,9 +1560,12 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
 
 void xhci_handle_command_timeout(struct work_struct *work)
 {
-       struct xhci_hcd *xhci;
-       unsigned long flags;
-       u64 hw_ring_state;
+       struct xhci_hcd *xhci;
+       unsigned long   flags;
+       char            str[XHCI_MSG_MAX];
+       u64             hw_ring_state;
+       u32             cmd_field3;
+       u32             usbsts;
 
        xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
 
@@ -1666,6 +1579,27 @@ void xhci_handle_command_timeout(struct work_struct *work)
                spin_unlock_irqrestore(&xhci->lock, flags);
                return;
        }
+
+       cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
+       usbsts = readl(&xhci->op_regs->status);
+       xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
+
+       /* Bail out and tear down xhci if a stop endpoint command failed */
+       if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
+               struct xhci_virt_ep     *ep;
+
+               xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
+
+               ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
+                                     TRB_TO_EP_INDEX(cmd_field3));
+               if (ep)
+                       ep->ep_state &= ~EP_STOP_CMD_PENDING;
+
+               xhci_halt(xhci);
+               xhci_hc_died(xhci);
+               goto time_out_completed;
+       }
+
        /* mark this command to be cancelled */
        xhci->current_cmd->status = COMP_COMMAND_ABORTED;
 
index 25b87e9..f0ab631 100644 (file)
@@ -486,6 +486,10 @@ static void compliance_mode_recovery(struct timer_list *t)
 
        xhci = from_timer(xhci, t, comp_mode_recovery_timer);
        rhub = &xhci->usb3_rhub;
+       hcd = rhub->hcd;
+
+       if (!hcd)
+               return;
 
        for (i = 0; i < rhub->num_ports; i++) {
                temp = readl(rhub->ports[i]->addr);
@@ -499,7 +503,6 @@ static void compliance_mode_recovery(struct timer_list *t)
                                        i + 1);
                        xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
                                        "Attempting compliance mode recovery");
-                       hcd = xhci->shared_hcd;
 
                        if (hcd->state == HC_STATE_SUSPENDED)
                                usb_hcd_resume_root_hub(hcd);
@@ -612,14 +615,11 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
                xhci_halt(xhci);
                return -ENODEV;
        }
-       xhci->shared_hcd->state = HC_STATE_RUNNING;
        xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
 
        if (xhci->quirks & XHCI_NEC_HOST)
                xhci_ring_cmd_db(xhci);
 
-       xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-                       "Finished xhci_run for USB3 roothub");
        return 0;
 }
 
@@ -694,12 +694,17 @@ int xhci_run(struct usb_hcd *hcd)
                        xhci_free_command(xhci, command);
        }
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-                       "Finished xhci_run for USB2 roothub");
+                       "Finished %s for main hcd", __func__);
 
        xhci_create_dbc_dev(xhci);
 
        xhci_debugfs_init(xhci);
 
+       if (xhci_has_one_roothub(xhci))
+               return xhci_run_finished(xhci);
+
+       set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(xhci_run);
@@ -992,7 +997,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
                return 0;
 
        if (hcd->state != HC_STATE_SUSPENDED ||
-                       xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+           (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
                return -EINVAL;
 
        /* Clear root port wake on bits if wakeup not allowed. */
@@ -1009,15 +1014,18 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
                 __func__, hcd->self.busnum);
        clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
        del_timer_sync(&hcd->rh_timer);
-       clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
-       del_timer_sync(&xhci->shared_hcd->rh_timer);
+       if (xhci->shared_hcd) {
+               clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+               del_timer_sync(&xhci->shared_hcd->rh_timer);
+       }
 
        if (xhci->quirks & XHCI_SUSPEND_DELAY)
                usleep_range(1000, 1500);
 
        spin_lock_irq(&xhci->lock);
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-       clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+       if (xhci->shared_hcd)
+               clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
        /* step 1: stop endpoint */
        /* skipped assuming that port suspend has done */
 
@@ -1117,7 +1125,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                msleep(100);
 
        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-       set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+       if (xhci->shared_hcd)
+               set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
 
        spin_lock_irq(&xhci->lock);
 
@@ -1177,7 +1186,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
                /* Let the USB core know _both_ roothubs lost power. */
                usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
-               usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
+               if (xhci->shared_hcd)
+                       usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
 
                xhci_dbg(xhci, "Stop HCD\n");
                xhci_halt(xhci);
@@ -1217,12 +1227,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
                xhci_dbg(xhci, "Start the primary HCD\n");
                retval = xhci_run(hcd->primary_hcd);
-               if (!retval) {
+               if (!retval && secondary_hcd) {
                        xhci_dbg(xhci, "Start the secondary HCD\n");
                        retval = xhci_run(secondary_hcd);
                }
                hcd->state = HC_STATE_SUSPENDED;
-               xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+               if (xhci->shared_hcd)
+                       xhci->shared_hcd->state = HC_STATE_SUSPENDED;
                goto done;
        }
 
@@ -1260,7 +1271,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                }
 
                if (pending_portevent) {
-                       usb_hcd_resume_root_hub(xhci->shared_hcd);
+                       if (xhci->shared_hcd)
+                               usb_hcd_resume_root_hub(xhci->shared_hcd);
                        usb_hcd_resume_root_hub(hcd);
                }
        }
@@ -1279,8 +1291,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        /* Re-enable port polling. */
        xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
                 __func__, hcd->self.busnum);
-       set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
-       usb_hcd_poll_rh_status(xhci->shared_hcd);
+       if (xhci->shared_hcd) {
+               set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+               usb_hcd_poll_rh_status(xhci->shared_hcd);
+       }
        set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
        usb_hcd_poll_rh_status(hcd);
 
@@ -1860,9 +1874,6 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                        goto done;
                }
                ep->ep_state |= EP_STOP_CMD_PENDING;
-               ep->stop_cmd_timer.expires = jiffies +
-                       XHCI_STOP_EP_CMD_TIMEOUT * HZ;
-               add_timer(&ep->stop_cmd_timer);
                xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
                                         ep_index, 0);
                xhci_ring_cmd_db(xhci);
@@ -3972,10 +3983,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        trace_xhci_free_dev(slot_ctx);
 
        /* Stop any wayward timer functions (which may grab the lock) */
-       for (i = 0; i < 31; i++) {
+       for (i = 0; i < 31; i++)
                virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
-               del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
-       }
        virt_dev->udev = NULL;
        xhci_disable_slot(xhci, udev->slot_id);
        xhci_free_virt_device(xhci, udev->slot_id);
@@ -4879,9 +4888,6 @@ static int xhci_check_intel_tier_policy(struct usb_device *udev,
        struct usb_device *parent;
        unsigned int num_hubs;
 
-       if (state == USB3_LPM_U2)
-               return 0;
-
        /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
        for (parent = udev->parent, num_hubs = 0; parent->parent;
                        parent = parent->parent)
@@ -4890,7 +4896,7 @@ static int xhci_check_intel_tier_policy(struct usb_device *udev,
        if (num_hubs < 2)
                return 0;
 
-       dev_dbg(&udev->dev, "Disabling U1 link state for device"
+       dev_dbg(&udev->dev, "Disabling U1/U2 link state for device"
                        " below second-tier hub.\n");
        dev_dbg(&udev->dev, "Plug device into first-tier hub "
                        "to decrease power consumption.\n");
@@ -4931,9 +4937,6 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
                return timeout;
        }
 
-       if (xhci_check_tier_policy(xhci, udev, state) < 0)
-               return timeout;
-
        /* Gather some information about the currently installed configuration
         * and alternate interface settings.
         */
@@ -5040,6 +5043,9 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
                        !xhci->devs[udev->slot_id])
                return USB3_LPM_DISABLED;
 
+       if (xhci_check_tier_policy(xhci, udev, state) < 0)
+               return USB3_LPM_DISABLED;
+
        hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
        mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
        if (mel < 0) {
@@ -5207,6 +5213,57 @@ static int xhci_get_frame(struct usb_hcd *hcd)
        return readl(&xhci->run_regs->microframe_index) >> 3;
 }
 
+static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
+{
+       xhci->usb2_rhub.hcd = hcd;
+       hcd->speed = HCD_USB2;
+       hcd->self.root_hub->speed = USB_SPEED_HIGH;
+       /*
+        * USB 2.0 roothub under xHCI has an integrated TT,
+        * (rate matching hub) as opposed to having an OHCI/UHCI
+        * companion controller.
+        */
+       hcd->has_tt = 1;
+}
+
+static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
+{
+       unsigned int minor_rev;
+
+       /*
+        * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+        * should return 0x31 for sbrn, or that the minor revision
+        * is a two digit BCD containig minor and sub-minor numbers.
+        * This was later clarified in xHCI 1.2.
+        *
+        * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+        * minor revision set to 0x1 instead of 0x10.
+        */
+       if (xhci->usb3_rhub.min_rev == 0x1)
+               minor_rev = 1;
+       else
+               minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+
+       switch (minor_rev) {
+       case 2:
+               hcd->speed = HCD_USB32;
+               hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+               hcd->self.root_hub->rx_lanes = 2;
+               hcd->self.root_hub->tx_lanes = 2;
+               hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
+               break;
+       case 1:
+               hcd->speed = HCD_USB31;
+               hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+               hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
+               break;
+       }
+       xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
+                 minor_rev, minor_rev ? "Enhanced " : "");
+
+       xhci->usb3_rhub.hcd = hcd;
+}
+
 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 {
        struct xhci_hcd         *xhci;
@@ -5215,7 +5272,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
         * quirks
         */
        struct device           *dev = hcd->self.sysdev;
-       unsigned int            minor_rev;
        int                     retval;
 
        /* Accept arbitrarily long scatter-gather lists */
@@ -5229,61 +5285,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 
        xhci = hcd_to_xhci(hcd);
 
-       if (usb_hcd_is_primary_hcd(hcd)) {
-               xhci->main_hcd = hcd;
-               xhci->usb2_rhub.hcd = hcd;
-               /* Mark the first roothub as being USB 2.0.
-                * The xHCI driver will register the USB 3.0 roothub.
-                */
-               hcd->speed = HCD_USB2;
-               hcd->self.root_hub->speed = USB_SPEED_HIGH;
-               /*
-                * USB 2.0 roothub under xHCI has an integrated TT,
-                * (rate matching hub) as opposed to having an OHCI/UHCI
-                * companion controller.
-                */
-               hcd->has_tt = 1;
-       } else {
-               /*
-                * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
-                * should return 0x31 for sbrn, or that the minor revision
-                * is a two digit BCD containig minor and sub-minor numbers.
-                * This was later clarified in xHCI 1.2.
-                *
-                * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
-                * minor revision set to 0x1 instead of 0x10.
-                */
-               if (xhci->usb3_rhub.min_rev == 0x1)
-                       minor_rev = 1;
-               else
-                       minor_rev = xhci->usb3_rhub.min_rev / 0x10;
-
-               switch (minor_rev) {
-               case 2:
-                       hcd->speed = HCD_USB32;
-                       hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
-                       hcd->self.root_hub->rx_lanes = 2;
-                       hcd->self.root_hub->tx_lanes = 2;
-                       hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
-                       break;
-               case 1:
-                       hcd->speed = HCD_USB31;
-                       hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
-                       hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
-                       break;
-               }
-               xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
-                         minor_rev,
-                         minor_rev ? "Enhanced " : "");
-
-               xhci->usb3_rhub.hcd = hcd;
-               /* xHCI private pointer was set in xhci_pci_probe for the second
-                * registered roothub.
-                */
+       if (!usb_hcd_is_primary_hcd(hcd)) {
+               xhci_hcd_init_usb3_data(xhci, hcd);
                return 0;
        }
 
        mutex_init(&xhci->mutex);
+       xhci->main_hcd = hcd;
        xhci->cap_regs = hcd->regs;
        xhci->op_regs = hcd->regs +
                HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5358,6 +5366,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                return retval;
        xhci_dbg(xhci, "Called HCD init\n");
 
+       if (xhci_hcd_is_usb3(hcd))
+               xhci_hcd_init_usb3_data(xhci, hcd);
+       else
+               xhci_hcd_init_usb2_data(xhci, hcd);
+
        xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
                  xhci->hcc_params, xhci->hci_version, xhci->quirks);
 
index 473a33c..0bd76c9 100644 (file)
@@ -948,8 +948,6 @@ struct xhci_virt_ep {
 #define EP_CLEARING_TT         (1 << 8)
        /* ----  Related to URB cancellation ---- */
        struct list_head        cancelled_td_list;
-       /* Watchdog timer for stop endpoint command to cancel URBs */
-       struct timer_list       stop_cmd_timer;
        struct xhci_hcd         *xhci;
        /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
         * command.  We'll need to update the ring's dequeue segment and dequeue
@@ -1848,7 +1846,7 @@ struct xhci_hcd {
 #define XHCI_STATE_REMOVING    (1 << 2)
        unsigned long long      quirks;
 #define        XHCI_LINK_TRB_QUIRK     BIT_ULL(0)
-#define XHCI_RESET_EP_QUIRK    BIT_ULL(1)
+#define XHCI_RESET_EP_QUIRK    BIT_ULL(1) /* Deprecated */
 #define XHCI_NEC_HOST          BIT_ULL(2)
 #define XHCI_AMD_PLL_FIX       BIT_ULL(3)
 #define XHCI_SPURIOUS_SUCCESS  BIT_ULL(4)
@@ -1911,6 +1909,8 @@ struct xhci_hcd {
        unsigned                hw_lpm_support:1;
        /* Broken Suspend flag for SNPS Suspend resume issue */
        unsigned                broken_suspend:1;
+       /* Indicates that omitting hcd is supported if root hub has no ports */
+       unsigned                allow_single_roothub:1;
        /* cached usb2 extened protocol capabilites */
        u32                     *ext_caps;
        unsigned int            num_ext_caps;
@@ -1966,6 +1966,30 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
        return xhci->main_hcd;
 }
 
+static inline struct usb_hcd *xhci_get_usb3_hcd(struct xhci_hcd *xhci)
+{
+       if (xhci->shared_hcd)
+               return xhci->shared_hcd;
+
+       if (!xhci->usb2_rhub.num_ports)
+               return xhci->main_hcd;
+
+       return NULL;
+}
+
+static inline bool xhci_hcd_is_usb3(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+       return hcd == xhci_get_usb3_hcd(xhci);
+}
+
+static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
+{
+       return xhci->allow_single_roothub &&
+              (!xhci->usb2_rhub.num_ports || !xhci->usb3_rhub.num_ports);
+}
+
 #define xhci_dbg(xhci, fmt, args...) \
        dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
 #define xhci_err(xhci, fmt, args...) \
index d1d9a7d..af88f4f 100644 (file)
@@ -251,6 +251,8 @@ static const struct reg_field isp1760_hc_reg_fields[] = {
        [HW_DM_PULLDOWN]        = REG_FIELD(ISP176x_HC_OTG_CTRL, 2, 2),
        [HW_DP_PULLDOWN]        = REG_FIELD(ISP176x_HC_OTG_CTRL, 1, 1),
        [HW_DP_PULLUP]          = REG_FIELD(ISP176x_HC_OTG_CTRL, 0, 0),
+       /* Make sure the array is sized properly during compilation */
+       [HC_FIELD_MAX]          = {},
 };
 
 static const struct reg_field isp1763_hc_reg_fields[] = {
@@ -321,6 +323,8 @@ static const struct reg_field isp1763_hc_reg_fields[] = {
        [HW_DM_PULLDOWN_CLEAR]  = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 2, 2),
        [HW_DP_PULLDOWN_CLEAR]  = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 1, 1),
        [HW_DP_PULLUP_CLEAR]    = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 0, 0),
+       /* Make sure the array is sized properly during compilation */
+       [HC_FIELD_MAX]          = {},
 };
 
 static const struct regmap_range isp1763_hc_volatile_ranges[] = {
@@ -405,6 +409,8 @@ static const struct reg_field isp1761_dc_reg_fields[] = {
        [DC_CHIP_ID_HIGH]       = REG_FIELD(ISP176x_DC_CHIPID, 16, 31),
        [DC_CHIP_ID_LOW]        = REG_FIELD(ISP176x_DC_CHIPID, 0, 15),
        [DC_SCRATCH]            = REG_FIELD(ISP176x_DC_SCRATCH, 0, 15),
+       /* Make sure the array is sized properly during compilation */
+       [DC_FIELD_MAX]          = {},
 };
 
 static const struct regmap_range isp1763_dc_volatile_ranges[] = {
@@ -458,6 +464,8 @@ static const struct reg_field isp1763_dc_reg_fields[] = {
        [DC_CHIP_ID_HIGH]       = REG_FIELD(ISP1763_DC_CHIPID_HIGH, 0, 15),
        [DC_CHIP_ID_LOW]        = REG_FIELD(ISP1763_DC_CHIPID_LOW, 0, 15),
        [DC_SCRATCH]            = REG_FIELD(ISP1763_DC_SCRATCH, 0, 15),
+       /* Make sure the array is sized properly during compilation */
+       [DC_FIELD_MAX]          = {},
 };
 
 static const struct regmap_config isp1763_dc_regmap_conf = {
index 893becb..76862ba 100644 (file)
@@ -825,8 +825,7 @@ static void create_ptd_atl(struct isp1760_qh *qh,
        memset(ptd, 0, sizeof(*ptd));
 
        /* according to 3.6.2, max packet len can not be > 0x400 */
-       maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
-                                               usb_pipeout(qtd->urb->pipe));
+       maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe);
        multi =  1 + ((maxpacket >> 11) & 0x3);
        maxpacket &= 0x7ff;
 
@@ -1808,8 +1807,7 @@ static void packetize_urb(struct usb_hcd *hcd,
                        packet_type = IN_PID;
        }
 
-       maxpacketsize = usb_maxpacket(urb->dev, urb->pipe,
-                                     usb_pipeout(urb->pipe));
+       maxpacketsize = usb_maxpacket(urb->dev, urb->pipe);
 
        /*
         * buffer gets wrapped in one or more qtds;
index 6c38c62..b2f9804 100644 (file)
@@ -1449,8 +1449,7 @@ wait:if (ftdi->disconnected > 0) {
                        command->length = 0x8007;
                        command->address = (toggle_bits << 6) | (ep_number << 2)
                                | (address << 0);
-                       command->width = usb_maxpacket(urb->dev, urb->pipe,
-                                                      usb_pipeout(urb->pipe));
+                       command->width = usb_maxpacket(urb->dev, urb->pipe);
                        command->follows = 8;
                        command->value = 0;
                        command->buffer = urb->setup_packet;
@@ -1514,8 +1513,7 @@ wait:if (ftdi->disconnected > 0) {
                                                            1);
                        command->address = (toggle_bits << 6) | (ep_number << 2)
                                | (address << 0);
-                       command->width = usb_maxpacket(urb->dev, urb->pipe,
-                                                      usb_pipeout(urb->pipe));
+                       command->width = usb_maxpacket(urb->dev, urb->pipe);
                        command->follows = 0;
                        command->value = 0;
                        command->buffer = NULL;
@@ -1571,8 +1569,7 @@ wait:if (ftdi->disconnected > 0) {
                        command->length = 0x0000;
                        command->address = (toggle_bits << 6) | (ep_number << 2)
                                | (address << 0);
-                       command->width = usb_maxpacket(urb->dev, urb->pipe,
-                                                      usb_pipeout(urb->pipe));
+                       command->width = usb_maxpacket(urb->dev, urb->pipe);
                        command->follows = 0;
                        command->value = 0;
                        command->buffer = NULL;
@@ -1634,8 +1631,7 @@ wait:if (ftdi->disconnected > 0) {
                        command->header = 0x81 | (ed << 5);
                        command->address = (toggle_bits << 6) | (ep_number << 2)
                                | (address << 0);
-                       command->width = usb_maxpacket(urb->dev, urb->pipe,
-                                                      usb_pipeout(urb->pipe));
+                       command->width = usb_maxpacket(urb->dev, urb->pipe);
                        command->follows = min_t(u32, 1024,
                                                 urb->transfer_buffer_length -
                                                 urb->actual_length);
@@ -1715,8 +1711,7 @@ wait:if (ftdi->disconnected > 0) {
                                                            1);
                        command->address = (toggle_bits << 6) | (ep_number << 2)
                                | (address << 0);
-                       command->width = usb_maxpacket(urb->dev, urb->pipe,
-                                                      usb_pipeout(urb->pipe));
+                       command->width = usb_maxpacket(urb->dev, urb->pipe);
                        command->follows = 0;
                        command->value = 0;
                        command->buffer = NULL;
index f868613..25ec566 100644 (file)
@@ -437,7 +437,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
        INIT_WORK(&lvs->rh_work, lvs_rh_work);
 
        pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(hdev, pipe);
        usb_fill_int_urb(lvs->urb, hdev, pipe, &lvs->buffer[0], maxp,
                        lvs_rh_irq, lvs, endpoint->bInterval);
 
index 1aeb34d..cad9913 100644 (file)
@@ -36,6 +36,8 @@
 #define DMA_INTR_STATUS_MSK    GENMASK(7, 0)
 #define DMA_INTR_UNMASK_SET_MSK        GENMASK(31, 24)
 
+#define MTK_MUSB_CLKS_NUM      3
+
 struct mtk_glue {
        struct device *dev;
        struct musb *musb;
@@ -44,9 +46,7 @@ struct mtk_glue {
        struct phy *phy;
        struct usb_phy *xceiv;
        enum phy_mode phy_mode;
-       struct clk *main;
-       struct clk *mcu;
-       struct clk *univpll;
+       struct clk_bulk_data clks[MTK_MUSB_CLKS_NUM];
        enum usb_role role;
        struct usb_role_switch *role_sw;
 };
@@ -55,64 +55,11 @@ static int mtk_musb_clks_get(struct mtk_glue *glue)
 {
        struct device *dev = glue->dev;
 
-       glue->main = devm_clk_get(dev, "main");
-       if (IS_ERR(glue->main)) {
-               dev_err(dev, "fail to get main clock\n");
-               return PTR_ERR(glue->main);
-       }
-
-       glue->mcu = devm_clk_get(dev, "mcu");
-       if (IS_ERR(glue->mcu)) {
-               dev_err(dev, "fail to get mcu clock\n");
-               return PTR_ERR(glue->mcu);
-       }
-
-       glue->univpll = devm_clk_get(dev, "univpll");
-       if (IS_ERR(glue->univpll)) {
-               dev_err(dev, "fail to get univpll clock\n");
-               return PTR_ERR(glue->univpll);
-       }
-
-       return 0;
-}
+       glue->clks[0].id = "main";
+       glue->clks[1].id = "mcu";
+       glue->clks[2].id = "univpll";
 
-static int mtk_musb_clks_enable(struct mtk_glue *glue)
-{
-       int ret;
-
-       ret = clk_prepare_enable(glue->main);
-       if (ret) {
-               dev_err(glue->dev, "failed to enable main clock\n");
-               goto err_main_clk;
-       }
-
-       ret = clk_prepare_enable(glue->mcu);
-       if (ret) {
-               dev_err(glue->dev, "failed to enable mcu clock\n");
-               goto err_mcu_clk;
-       }
-
-       ret = clk_prepare_enable(glue->univpll);
-       if (ret) {
-               dev_err(glue->dev, "failed to enable univpll clock\n");
-               goto err_univpll_clk;
-       }
-
-       return 0;
-
-err_univpll_clk:
-       clk_disable_unprepare(glue->mcu);
-err_mcu_clk:
-       clk_disable_unprepare(glue->main);
-err_main_clk:
-       return ret;
-}
-
-static void mtk_musb_clks_disable(struct mtk_glue *glue)
-{
-       clk_disable_unprepare(glue->univpll);
-       clk_disable_unprepare(glue->mcu);
-       clk_disable_unprepare(glue->main);
+       return devm_clk_bulk_get(dev, MTK_MUSB_CLKS_NUM, glue->clks);
 }
 
 static int mtk_otg_switch_set(struct mtk_glue *glue, enum usb_role role)
@@ -390,7 +337,7 @@ static int mtk_musb_exit(struct musb *musb)
        mtk_otg_switch_exit(glue);
        phy_power_off(glue->phy);
        phy_exit(glue->phy);
-       mtk_musb_clks_disable(glue);
+       clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
 
        pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
@@ -528,7 +475,7 @@ static int mtk_musb_probe(struct platform_device *pdev)
        pm_runtime_enable(dev);
        pm_runtime_get_sync(dev);
 
-       ret = mtk_musb_clks_enable(glue);
+       ret = clk_bulk_prepare_enable(MTK_MUSB_CLKS_NUM, glue->clks);
        if (ret)
                goto err_enable_clk;
 
@@ -551,7 +498,7 @@ static int mtk_musb_probe(struct platform_device *pdev)
        return 0;
 
 err_device_register:
-       mtk_musb_clks_disable(glue);
+       clk_bulk_disable_unprepare(MTK_MUSB_CLKS_NUM, glue->clks);
 err_enable_clk:
        pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
index d2b7e61..f571a65 100644 (file)
@@ -362,6 +362,7 @@ static int omap2430_probe(struct platform_device *pdev)
        control_node = of_parse_phandle(np, "ctrl-module", 0);
        if (control_node) {
                control_pdev = of_find_device_by_node(control_node);
+               of_node_put(control_node);
                if (!control_pdev) {
                        dev_err(&pdev->dev, "Failed to get control device\n");
                        ret = -EINVAL;
index ee0863c..6e6ef8c 100644 (file)
@@ -95,8 +95,8 @@ static int omap_otg_probe(struct platform_device *pdev)
                return -ENODEV;
 
        extcon = extcon_get_extcon_dev(config->extcon);
-       if (!extcon)
-               return -EPROBE_DEFER;
+       if (IS_ERR(extcon))
+               return PTR_ERR(extcon);
 
        otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
        if (!otg_dev)
index c0e4df8..39eaa7b 100644 (file)
@@ -208,10 +208,9 @@ static void ark3116_set_termios(struct tty_struct *tty,
                lcr |= UART_LCR_PARITY;
        if (!(cflag & PARODD))
                lcr |= UART_LCR_EPAR;
-#ifdef CMSPAR
        if (cflag & CMSPAR)
                lcr |= UART_LCR_SPAR;
-#endif
+
        /* handshake control */
        hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
 
index 49c08f0..b440d33 100644 (file)
@@ -1671,7 +1671,7 @@ static ssize_t latency_timer_show(struct device *dev,
        if (priv->flags & ASYNC_LOW_LATENCY)
                return sprintf(buf, "1\n");
        else
-               return sprintf(buf, "%i\n", priv->latency);
+               return sprintf(buf, "%u\n", priv->latency);
 }
 
 /* Write a new value of the latency timer, in units of milliseconds. */
index 152ad88..e60425b 100644 (file)
@@ -1137,6 +1137,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+         .driver_info = RSVD(3) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
index 1d878d0..3506c47 100644 (file)
@@ -421,6 +421,9 @@ static int pl2303_detect_type(struct usb_serial *serial)
        bcdUSB = le16_to_cpu(desc->bcdUSB);
 
        switch (bcdUSB) {
+       case 0x101:
+               /* USB 1.0.1? Let's assume they meant 1.1... */
+               fallthrough;
        case 0x110:
                switch (bcdDevice) {
                case 0x300:
index 06aad0d..332fb92 100644 (file)
 #include <linux/usb/ezusb.h>
 #include "whiteheat.h"                 /* WhiteHEAT specific commands */
 
-#ifndef CMSPAR
-#define CMSPAR 0
-#endif
-
 /*
  * Version Information
  */
index 20b857e..747be69 100644 (file)
@@ -1104,7 +1104,7 @@ static int init_alauda(struct us_data *us)
 
        us->extra = kzalloc(sizeof(struct alauda_info), GFP_NOIO);
        if (!us->extra)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -ENOMEM;
 
        info = (struct alauda_info *) us->extra;
        us->extra_destructor = alauda_info_destructor;
@@ -1113,7 +1113,7 @@ static int init_alauda(struct us_data *us)
                altsetting->endpoint[0].desc.bEndpointAddress
                & USB_ENDPOINT_NUMBER_MASK);
 
-       return USB_STOR_TRANSPORT_GOOD;
+       return 0;
 }
 
 static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us)
index 05429f1..4e0eef1 100644 (file)
@@ -1449,7 +1449,7 @@ static void isd200_free_info_ptrs(void *info_)
  * Allocates (if necessary) and initializes the driver structure.
  *
  * RETURNS:
- *    ISD status code
+ *    error status code
  */
 static int isd200_init_info(struct us_data *us)
 {
@@ -1457,7 +1457,7 @@ static int isd200_init_info(struct us_data *us)
 
        info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
        if (!info)
-               return ISD200_ERROR;
+               return -ENOMEM;
 
        info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
        info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL);
@@ -1466,13 +1466,13 @@ static int isd200_init_info(struct us_data *us)
        if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) {
                isd200_free_info_ptrs(info);
                kfree(info);
-               return ISD200_ERROR;
+               return -ENOMEM;
        }
 
        us->extra = info;
        us->extra_destructor = isd200_free_info_ptrs;
 
-       return ISD200_GOOD;
+       return 0;
 }
 
 /**************************************************************************
index 05cec81..38ddfed 100644 (file)
@@ -174,24 +174,25 @@ static void rio_karma_destructor(void *extra)
 
 static int rio_karma_init(struct us_data *us)
 {
-       int ret = 0;
        struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
 
        if (!data)
-               goto out;
+               return -ENOMEM;
 
        data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO);
        if (!data->recv) {
                kfree(data);
-               goto out;
+               return -ENOMEM;
        }
 
        us->extra = data;
        us->extra_destructor = rio_karma_destructor;
-       ret = rio_karma_send_command(RIO_ENTER_STORAGE, us);
-       data->in_storage = (ret == 0);
-out:
-       return ret;
+       if (rio_karma_send_command(RIO_ENTER_STORAGE, us))
+               return -EIO;
+
+       data->in_storage = 1;
+
+       return 0;
 }
 
 static struct scsi_host_template karma_host_template;
index a989fe9..1db2eef 100644 (file)
@@ -180,7 +180,7 @@ static int onetouch_connect_input(struct us_data *ss)
                return -ENODEV;
 
        pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(udev, pipe);
        maxp = min(maxp, ONETOUCH_PKT_LEN);
 
        onetouch = kzalloc(sizeof(struct usb_onetouch), GFP_KERNEL);
index 54aa139..f0d0ca3 100644 (file)
@@ -1456,7 +1456,7 @@ static int init_usbat(struct us_data *us, int devicetype)
 
        us->extra = kzalloc(sizeof(struct usbat_info), GFP_NOIO);
        if (!us->extra)
-               return 1;
+               return -ENOMEM;
 
        info = (struct usbat_info *) (us->extra);
 
@@ -1465,7 +1465,7 @@ static int init_usbat(struct us_data *us, int devicetype)
                                 USBAT_UIO_OE1 | USBAT_UIO_OE0,
                                 USBAT_UIO_EPAD | USBAT_UIO_1);
        if (rc != USB_STOR_XFER_GOOD)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 1\n");
 
@@ -1473,42 +1473,42 @@ static int init_usbat(struct us_data *us, int devicetype)
 
        rc = usbat_read_user_io(us, status);
        if (rc != USB_STOR_TRANSPORT_GOOD)
-               return rc;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 2\n");
 
        rc = usbat_read_user_io(us, status);
        if (rc != USB_STOR_XFER_GOOD)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        rc = usbat_read_user_io(us, status);
        if (rc != USB_STOR_XFER_GOOD)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 3\n");
 
        rc = usbat_select_and_test_registers(us);
        if (rc != USB_STOR_TRANSPORT_GOOD)
-               return rc;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 4\n");
 
        rc = usbat_read_user_io(us, status);
        if (rc != USB_STOR_XFER_GOOD)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 5\n");
 
        /* Enable peripheral control signals and card detect */
        rc = usbat_device_enable_cdt(us);
        if (rc != USB_STOR_TRANSPORT_GOOD)
-               return rc;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 6\n");
 
        rc = usbat_read_user_io(us, status);
        if (rc != USB_STOR_XFER_GOOD)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 7\n");
 
@@ -1516,19 +1516,19 @@ static int init_usbat(struct us_data *us, int devicetype)
 
        rc = usbat_read_user_io(us, status);
        if (rc != USB_STOR_XFER_GOOD)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 8\n");
 
        rc = usbat_select_and_test_registers(us);
        if (rc != USB_STOR_TRANSPORT_GOOD)
-               return rc;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 9\n");
 
        /* At this point, we need to detect which device we are using */
        if (usbat_set_transport(us, info, devicetype))
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 10\n");
 
@@ -1539,11 +1539,11 @@ static int init_usbat(struct us_data *us, int devicetype)
        rc = usbat_set_shuttle_features(us, (USBAT_FEAT_ETEN | USBAT_FEAT_ET2 | USBAT_FEAT_ET1),
                                                                        0x00, 0x88, 0x08, subcountH, subcountL);
        if (rc != USB_STOR_XFER_GOOD)
-               return USB_STOR_TRANSPORT_ERROR;
+               return -EIO;
 
        usb_stor_dbg(us, "INIT 11\n");
 
-       return USB_STOR_TRANSPORT_GOOD;
+       return 0;
 }
 
 /*
index 1928b39..64d96d2 100644 (file)
@@ -363,7 +363,7 @@ static int usb_stor_intr_transfer(struct us_data *us, void *buf,
        usb_stor_dbg(us, "xfer %u bytes\n", length);
 
        /* calculate the max packet size */
-       maxp = usb_maxpacket(us->pusb_dev, pipe, usb_pipeout(pipe));
+       maxp = usb_maxpacket(us->pusb_dev, pipe);
        if (maxp > length)
                maxp = length;
 
index 78e0e78..26ea2fd 100644 (file)
@@ -24,7 +24,7 @@ typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
        state.mode = conf;
        state.data = data;
 
-       return alt->mux->set(alt->mux, &state);
+       return typec_mux_set(alt->mux, &state);
 }
 
 static int typec_altmode_set_state(struct typec_altmode *adev,
index c8340de..fd55c2c 100644 (file)
 #include "class.h"
 #include "mux.h"
 
+#define TYPEC_MUX_MAX_DEVS     3
+
+struct typec_switch {
+       struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
+       unsigned int num_sw_devs;
+};
+
 static int switch_fwnode_match(struct device *dev, const void *fwnode)
 {
-       if (!is_typec_switch(dev))
+       if (!is_typec_switch_dev(dev))
                return 0;
 
        return dev_fwnode(dev) == fwnode;
@@ -49,7 +56,7 @@ static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
        dev = class_find_device(&typec_mux_class, NULL, fwnode,
                                switch_fwnode_match);
 
-       return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+       return dev ? to_typec_switch_dev(dev) : ERR_PTR(-EPROBE_DEFER);
 }
 
 /**
@@ -63,14 +70,50 @@ static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
  */
 struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode)
 {
+       struct typec_switch_dev *sw_devs[TYPEC_MUX_MAX_DEVS];
        struct typec_switch *sw;
+       int count;
+       int err;
+       int i;
+
+       sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+       if (!sw)
+               return ERR_PTR(-ENOMEM);
+
+       count = fwnode_connection_find_matches(fwnode, "orientation-switch", NULL,
+                                              typec_switch_match,
+                                              (void **)sw_devs,
+                                              ARRAY_SIZE(sw_devs));
+       if (count <= 0) {
+               kfree(sw);
+               return NULL;
+       }
 
-       sw = fwnode_connection_find_match(fwnode, "orientation-switch", NULL,
-                                         typec_switch_match);
-       if (!IS_ERR_OR_NULL(sw))
-               WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+       for (i = 0; i < count; i++) {
+               if (IS_ERR(sw_devs[i])) {
+                       err = PTR_ERR(sw_devs[i]);
+                       goto put_sw_devs;
+               }
+       }
+
+       for (i = 0; i < count; i++) {
+               WARN_ON(!try_module_get(sw_devs[i]->dev.parent->driver->owner));
+               sw->sw_devs[i] = sw_devs[i];
+       }
+
+       sw->num_sw_devs = count;
 
        return sw;
+
+put_sw_devs:
+       for (i = 0; i < count; i++) {
+               if (!IS_ERR(sw_devs[i]))
+                       put_device(&sw_devs[i]->dev);
+       }
+
+       kfree(sw);
+
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
 
@@ -82,16 +125,25 @@ EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
  */
 void typec_switch_put(struct typec_switch *sw)
 {
-       if (!IS_ERR_OR_NULL(sw)) {
-               module_put(sw->dev.parent->driver->owner);
-               put_device(&sw->dev);
+       struct typec_switch_dev *sw_dev;
+       unsigned int i;
+
+       if (IS_ERR_OR_NULL(sw))
+               return;
+
+       for (i = 0; i < sw->num_sw_devs; i++) {
+               sw_dev = sw->sw_devs[i];
+
+               module_put(sw_dev->dev.parent->driver->owner);
+               put_device(&sw_dev->dev);
        }
+       kfree(sw);
 }
 EXPORT_SYMBOL_GPL(typec_switch_put);
 
 static void typec_switch_release(struct device *dev)
 {
-       kfree(to_typec_switch(dev));
+       kfree(to_typec_switch_dev(dev));
 }
 
 const struct device_type typec_switch_dev_type = {
@@ -109,82 +161,102 @@ const struct device_type typec_switch_dev_type = {
  * connector to the USB controllers. USB Type-C plugs can be inserted
  * right-side-up or upside-down.
  */
-struct typec_switch *
+struct typec_switch_dev *
 typec_switch_register(struct device *parent,
                      const struct typec_switch_desc *desc)
 {
-       struct typec_switch *sw;
+       struct typec_switch_dev *sw_dev;
        int ret;
 
        if (!desc || !desc->set)
                return ERR_PTR(-EINVAL);
 
-       sw = kzalloc(sizeof(*sw), GFP_KERNEL);
-       if (!sw)
+       sw_dev = kzalloc(sizeof(*sw_dev), GFP_KERNEL);
+       if (!sw_dev)
                return ERR_PTR(-ENOMEM);
 
-       sw->set = desc->set;
+       sw_dev->set = desc->set;
 
-       device_initialize(&sw->dev);
-       sw->dev.parent = parent;
-       sw->dev.fwnode = desc->fwnode;
-       sw->dev.class = &typec_mux_class;
-       sw->dev.type = &typec_switch_dev_type;
-       sw->dev.driver_data = desc->drvdata;
-       dev_set_name(&sw->dev, "%s-switch",
-                    desc->name ? desc->name : dev_name(parent));
+       device_initialize(&sw_dev->dev);
+       sw_dev->dev.parent = parent;
+       sw_dev->dev.fwnode = desc->fwnode;
+       sw_dev->dev.class = &typec_mux_class;
+       sw_dev->dev.type = &typec_switch_dev_type;
+       sw_dev->dev.driver_data = desc->drvdata;
+       ret = dev_set_name(&sw_dev->dev, "%s-switch", desc->name ? desc->name : dev_name(parent));
+       if (ret) {
+               put_device(&sw_dev->dev);
+               return ERR_PTR(ret);
+       }
 
-       ret = device_add(&sw->dev);
+       ret = device_add(&sw_dev->dev);
        if (ret) {
                dev_err(parent, "failed to register switch (%d)\n", ret);
-               put_device(&sw->dev);
+               put_device(&sw_dev->dev);
                return ERR_PTR(ret);
        }
 
-       return sw;
+       return sw_dev;
 }
 EXPORT_SYMBOL_GPL(typec_switch_register);
 
 int typec_switch_set(struct typec_switch *sw,
                     enum typec_orientation orientation)
 {
+       struct typec_switch_dev *sw_dev;
+       unsigned int i;
+       int ret;
+
        if (IS_ERR_OR_NULL(sw))
                return 0;
 
-       return sw->set(sw, orientation);
+       for (i = 0; i < sw->num_sw_devs; i++) {
+               sw_dev = sw->sw_devs[i];
+
+               ret = sw_dev->set(sw_dev, orientation);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(typec_switch_set);
 
 /**
  * typec_switch_unregister - Unregister USB Type-C orientation switch
- * @sw: USB Type-C orientation switch
+ * @sw_dev: USB Type-C orientation switch
  *
  * Unregister switch that was registered with typec_switch_register().
  */
-void typec_switch_unregister(struct typec_switch *sw)
+void typec_switch_unregister(struct typec_switch_dev *sw_dev)
 {
-       if (!IS_ERR_OR_NULL(sw))
-               device_unregister(&sw->dev);
+       if (!IS_ERR_OR_NULL(sw_dev))
+               device_unregister(&sw_dev->dev);
 }
 EXPORT_SYMBOL_GPL(typec_switch_unregister);
 
-void typec_switch_set_drvdata(struct typec_switch *sw, void *data)
+void typec_switch_set_drvdata(struct typec_switch_dev *sw_dev, void *data)
 {
-       dev_set_drvdata(&sw->dev, data);
+       dev_set_drvdata(&sw_dev->dev, data);
 }
 EXPORT_SYMBOL_GPL(typec_switch_set_drvdata);
 
-void *typec_switch_get_drvdata(struct typec_switch *sw)
+void *typec_switch_get_drvdata(struct typec_switch_dev *sw_dev)
 {
-       return dev_get_drvdata(&sw->dev);
+       return dev_get_drvdata(&sw_dev->dev);
 }
 EXPORT_SYMBOL_GPL(typec_switch_get_drvdata);
 
 /* ------------------------------------------------------------------------- */
 
+struct typec_mux {
+       struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
+       unsigned int num_mux_devs;
+};
+
 static int mux_fwnode_match(struct device *dev, const void *fwnode)
 {
-       if (!is_typec_mux(dev))
+       if (!is_typec_mux_dev(dev))
                return 0;
 
        return dev_fwnode(dev) == fwnode;
@@ -246,7 +318,7 @@ find_mux:
        dev = class_find_device(&typec_mux_class, NULL, fwnode,
                                mux_fwnode_match);
 
-       return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
+       return dev ? to_typec_mux_dev(dev) : ERR_PTR(-EPROBE_DEFER);
 }
 
 /**
@@ -262,14 +334,50 @@ find_mux:
 struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
                                       const struct typec_altmode_desc *desc)
 {
+       struct typec_mux_dev *mux_devs[TYPEC_MUX_MAX_DEVS];
        struct typec_mux *mux;
+       int count;
+       int err;
+       int i;
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return ERR_PTR(-ENOMEM);
+
+       count = fwnode_connection_find_matches(fwnode, "mode-switch",
+                                              (void *)desc, typec_mux_match,
+                                              (void **)mux_devs,
+                                              ARRAY_SIZE(mux_devs));
+       if (count <= 0) {
+               kfree(mux);
+               return NULL;
+       }
 
-       mux = fwnode_connection_find_match(fwnode, "mode-switch", (void *)desc,
-                                          typec_mux_match);
-       if (!IS_ERR_OR_NULL(mux))
-               WARN_ON(!try_module_get(mux->dev.parent->driver->owner));
+       for (i = 0; i < count; i++) {
+               if (IS_ERR(mux_devs[i])) {
+                       err = PTR_ERR(mux_devs[i]);
+                       goto put_mux_devs;
+               }
+       }
+
+       for (i = 0; i < count; i++) {
+               WARN_ON(!try_module_get(mux_devs[i]->dev.parent->driver->owner));
+               mux->mux_devs[i] = mux_devs[i];
+       }
+
+       mux->num_mux_devs = count;
 
        return mux;
+
+put_mux_devs:
+       for (i = 0; i < count; i++) {
+               if (!IS_ERR(mux_devs[i]))
+                       put_device(&mux_devs[i]->dev);
+       }
+
+       kfree(mux);
+
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
 
@@ -281,25 +389,45 @@ EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
  */
 void typec_mux_put(struct typec_mux *mux)
 {
-       if (!IS_ERR_OR_NULL(mux)) {
-               module_put(mux->dev.parent->driver->owner);
-               put_device(&mux->dev);
+       struct typec_mux_dev *mux_dev;
+       unsigned int i;
+
+       if (IS_ERR_OR_NULL(mux))
+               return;
+
+       for (i = 0; i < mux->num_mux_devs; i++) {
+               mux_dev = mux->mux_devs[i];
+               module_put(mux_dev->dev.parent->driver->owner);
+               put_device(&mux_dev->dev);
        }
+       kfree(mux);
 }
 EXPORT_SYMBOL_GPL(typec_mux_put);
 
 int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
 {
+       struct typec_mux_dev *mux_dev;
+       unsigned int i;
+       int ret;
+
        if (IS_ERR_OR_NULL(mux))
                return 0;
 
-       return mux->set(mux, state);
+       for (i = 0; i < mux->num_mux_devs; i++) {
+               mux_dev = mux->mux_devs[i];
+
+               ret = mux_dev->set(mux_dev, state);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(typec_mux_set);
 
 static void typec_mux_release(struct device *dev)
 {
-       kfree(to_typec_mux(dev));
+       kfree(to_typec_mux_dev(dev));
 }
 
 const struct device_type typec_mux_dev_type = {
@@ -317,63 +445,66 @@ const struct device_type typec_mux_dev_type = {
  * the pins on the connector need to be reconfigured. This function registers
  * multiplexer switches routing the pins on the connector.
  */
-struct typec_mux *
+struct typec_mux_dev *
 typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
 {
-       struct typec_mux *mux;
+       struct typec_mux_dev *mux_dev;
        int ret;
 
        if (!desc || !desc->set)
                return ERR_PTR(-EINVAL);
 
-       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
-       if (!mux)
+       mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
+       if (!mux_dev)
                return ERR_PTR(-ENOMEM);
 
-       mux->set = desc->set;
+       mux_dev->set = desc->set;
 
-       device_initialize(&mux->dev);
-       mux->dev.parent = parent;
-       mux->dev.fwnode = desc->fwnode;
-       mux->dev.class = &typec_mux_class;
-       mux->dev.type = &typec_mux_dev_type;
-       mux->dev.driver_data = desc->drvdata;
-       dev_set_name(&mux->dev, "%s-mux",
-                    desc->name ? desc->name : dev_name(parent));
+       device_initialize(&mux_dev->dev);
+       mux_dev->dev.parent = parent;
+       mux_dev->dev.fwnode = desc->fwnode;
+       mux_dev->dev.class = &typec_mux_class;
+       mux_dev->dev.type = &typec_mux_dev_type;
+       mux_dev->dev.driver_data = desc->drvdata;
+       ret = dev_set_name(&mux_dev->dev, "%s-mux", desc->name ? desc->name : dev_name(parent));
+       if (ret) {
+               put_device(&mux_dev->dev);
+               return ERR_PTR(ret);
+       }
 
-       ret = device_add(&mux->dev);
+       ret = device_add(&mux_dev->dev);
        if (ret) {
                dev_err(parent, "failed to register mux (%d)\n", ret);
-               put_device(&mux->dev);
+               put_device(&mux_dev->dev);
                return ERR_PTR(ret);
        }
 
-       return mux;
+       return mux_dev;
 }
 EXPORT_SYMBOL_GPL(typec_mux_register);
 
 /**
  * typec_mux_unregister - Unregister Multiplexer Switch
- * @mux: USB Type-C Connector Multiplexer/DeMultiplexer
+ * @mux_dev: USB Type-C Connector Multiplexer/DeMultiplexer
  *
  * Unregister mux that was registered with typec_mux_register().
  */
-void typec_mux_unregister(struct typec_mux *mux)
+void typec_mux_unregister(struct typec_mux_dev *mux_dev)
 {
-       if (!IS_ERR_OR_NULL(mux))
-               device_unregister(&mux->dev);
+       if (!IS_ERR_OR_NULL(mux_dev))
+               device_unregister(&mux_dev->dev);
 }
 EXPORT_SYMBOL_GPL(typec_mux_unregister);
 
-void typec_mux_set_drvdata(struct typec_mux *mux, void *data)
+void typec_mux_set_drvdata(struct typec_mux_dev *mux_dev, void *data)
 {
-       dev_set_drvdata(&mux->dev, data);
+       dev_set_drvdata(&mux_dev->dev, data);
 }
 EXPORT_SYMBOL_GPL(typec_mux_set_drvdata);
 
-void *typec_mux_get_drvdata(struct typec_mux *mux)
+void *typec_mux_get_drvdata(struct typec_mux_dev *mux_dev)
 {
-       return dev_get_drvdata(&mux->dev);
+       return dev_get_drvdata(&mux_dev->dev);
 }
 EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
 
index b1d6e83..58f0f28 100644 (file)
@@ -5,23 +5,23 @@
 
 #include <linux/usb/typec_mux.h>
 
-struct typec_switch {
+struct typec_switch_dev {
        struct device dev;
        typec_switch_set_fn_t set;
 };
 
-struct typec_mux {
+struct typec_mux_dev {
        struct device dev;
        typec_mux_set_fn_t set;
 };
 
-#define to_typec_switch(_dev_) container_of(_dev_, struct typec_switch, dev)
-#define to_typec_mux(_dev_) container_of(_dev_, struct typec_mux, dev)
+#define to_typec_switch_dev(_dev_) container_of(_dev_, struct typec_switch_dev, dev)
+#define to_typec_mux_dev(_dev_) container_of(_dev_, struct typec_mux_dev, dev)
 
 extern const struct device_type typec_switch_dev_type;
 extern const struct device_type typec_mux_dev_type;
 
-#define is_typec_switch(dev) ((dev)->type == &typec_switch_dev_type)
-#define is_typec_mux(dev) ((dev)->type == &typec_mux_dev_type)
+#define is_typec_switch_dev(dev) ((dev)->type == &typec_switch_dev_type)
+#define is_typec_mux_dev(dev) ((dev)->type == &typec_mux_dev_type)
 
 #endif /* __USB_TYPEC_MUX__ */
index edead55..5eb2c17 100644 (file)
@@ -2,6 +2,16 @@
 
 menu "USB Type-C Multiplexer/DeMultiplexer Switch support"
 
+config TYPEC_MUX_FSA4480
+       tristate "ON Semi FSA4480 Analog Audio Switch driver"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Driver for the ON Semiconductor FSA4480 Analog Audio Switch, which
+         provides support for muxing analog audio and sideband signals on a
+         common USB Type-C connector.
+         If compiled as a module, the module will be named fsa4480.
+
 config TYPEC_MUX_PI3USB30532
        tristate "Pericom PI3USB30532 Type-C cross switch driver"
        depends on I2C
index 280a6f5..e52a56c 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 
+obj-$(CONFIG_TYPEC_MUX_FSA4480)                += fsa4480.o
 obj-$(CONFIG_TYPEC_MUX_PI3USB30532)    += pi3usb30532.o
 obj-$(CONFIG_TYPEC_MUX_INTEL_PMC)      += intel_pmc_mux.o
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
new file mode 100644 (file)
index 0000000..6184f53
--- /dev/null
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021-2022 Linaro Ltd.
+ * Copyright (C) 2018-2020 The Linux Foundation
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define FSA4480_SWITCH_ENABLE  0x04
+#define FSA4480_SWITCH_SELECT  0x05
+#define FSA4480_SWITCH_STATUS1 0x07
+#define FSA4480_SLOW_L         0x08
+#define FSA4480_SLOW_R         0x09
+#define FSA4480_SLOW_MIC       0x0a
+#define FSA4480_SLOW_SENSE     0x0b
+#define FSA4480_SLOW_GND       0x0c
+#define FSA4480_DELAY_L_R      0x0d
+#define FSA4480_DELAY_L_MIC    0x0e
+#define FSA4480_DELAY_L_SENSE  0x0f
+#define FSA4480_DELAY_L_AGND   0x10
+#define FSA4480_RESET          0x1e
+#define FSA4480_MAX_REGISTER   0x1f
+
+#define FSA4480_ENABLE_DEVICE  BIT(7)
+#define FSA4480_ENABLE_SBU     GENMASK(6, 5)
+#define FSA4480_ENABLE_USB     GENMASK(4, 3)
+
+#define FSA4480_SEL_SBU_REVERSE        GENMASK(6, 5)
+#define FSA4480_SEL_USB                GENMASK(4, 3)
+
+struct fsa4480 {
+       struct i2c_client *client;
+
+       /* used to serialize concurrent change requests */
+       struct mutex lock;
+
+       struct typec_switch_dev *sw;
+       struct typec_mux_dev *mux;
+
+       struct regmap *regmap;
+
+       u8 cur_enable;
+       u8 cur_select;
+};
+
+static const struct regmap_config fsa4480_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = FSA4480_MAX_REGISTER,
+       /* Accesses only done under fsa4480->lock */
+       .disable_locking = true,
+};
+
+static int fsa4480_switch_set(struct typec_switch_dev *sw,
+                             enum typec_orientation orientation)
+{
+       struct fsa4480 *fsa = typec_switch_get_drvdata(sw);
+       u8 new_sel;
+
+       mutex_lock(&fsa->lock);
+       new_sel = FSA4480_SEL_USB;
+       if (orientation == TYPEC_ORIENTATION_REVERSE)
+               new_sel |= FSA4480_SEL_SBU_REVERSE;
+
+       if (new_sel == fsa->cur_select)
+               goto out_unlock;
+
+       if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
+               /* Disable SBU output while re-configuring the switch */
+               regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE,
+                            fsa->cur_enable & ~FSA4480_ENABLE_SBU);
+
+               /* 35us to allow the SBU switch to turn off */
+               usleep_range(35, 1000);
+       }
+
+       regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, new_sel);
+       fsa->cur_select = new_sel;
+
+       if (fsa->cur_enable & FSA4480_ENABLE_SBU) {
+               regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
+
+               /* 15us to allow the SBU switch to turn on again */
+               usleep_range(15, 1000);
+       }
+
+out_unlock:
+       mutex_unlock(&fsa->lock);
+
+       return 0;
+}
+
+static int fsa4480_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+       struct fsa4480 *fsa = typec_mux_get_drvdata(mux);
+       u8 new_enable;
+
+       mutex_lock(&fsa->lock);
+
+       new_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+       if (state->mode >= TYPEC_DP_STATE_A)
+               new_enable |= FSA4480_ENABLE_SBU;
+
+       if (new_enable == fsa->cur_enable)
+               goto out_unlock;
+
+       regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, new_enable);
+       fsa->cur_enable = new_enable;
+
+       if (new_enable & FSA4480_ENABLE_SBU) {
+               /* 15us to allow the SBU switch to turn off */
+               usleep_range(15, 1000);
+       }
+
+out_unlock:
+       mutex_unlock(&fsa->lock);
+
+       return 0;
+}
+
+static int fsa4480_probe(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct typec_switch_desc sw_desc = { };
+       struct typec_mux_desc mux_desc = { };
+       struct fsa4480 *fsa;
+
+       fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL);
+       if (!fsa)
+               return -ENOMEM;
+
+       fsa->client = client;
+       mutex_init(&fsa->lock);
+
+       fsa->regmap = devm_regmap_init_i2c(client, &fsa4480_regmap_config);
+       if (IS_ERR(fsa->regmap))
+               return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
+
+       fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB;
+       fsa->cur_select = FSA4480_SEL_USB;
+
+       /* set default settings */
+       regmap_write(fsa->regmap, FSA4480_SLOW_L, 0x00);
+       regmap_write(fsa->regmap, FSA4480_SLOW_R, 0x00);
+       regmap_write(fsa->regmap, FSA4480_SLOW_MIC, 0x00);
+       regmap_write(fsa->regmap, FSA4480_SLOW_SENSE, 0x00);
+       regmap_write(fsa->regmap, FSA4480_SLOW_GND, 0x00);
+       regmap_write(fsa->regmap, FSA4480_DELAY_L_R, 0x00);
+       regmap_write(fsa->regmap, FSA4480_DELAY_L_MIC, 0x00);
+       regmap_write(fsa->regmap, FSA4480_DELAY_L_SENSE, 0x00);
+       regmap_write(fsa->regmap, FSA4480_DELAY_L_AGND, 0x09);
+       regmap_write(fsa->regmap, FSA4480_SWITCH_SELECT, fsa->cur_select);
+       regmap_write(fsa->regmap, FSA4480_SWITCH_ENABLE, fsa->cur_enable);
+
+       sw_desc.drvdata = fsa;
+       sw_desc.fwnode = dev_fwnode(dev);
+       sw_desc.set = fsa4480_switch_set;
+
+       fsa->sw = typec_switch_register(dev, &sw_desc);
+       if (IS_ERR(fsa->sw))
+               return dev_err_probe(dev, PTR_ERR(fsa->sw), "failed to register typec switch\n");
+
+       mux_desc.drvdata = fsa;
+       mux_desc.fwnode = dev_fwnode(dev);
+       mux_desc.set = fsa4480_mux_set;
+
+       fsa->mux = typec_mux_register(dev, &mux_desc);
+       if (IS_ERR(fsa->mux)) {
+               typec_switch_unregister(fsa->sw);
+               return dev_err_probe(dev, PTR_ERR(fsa->mux), "failed to register typec mux\n");
+       }
+
+       i2c_set_clientdata(client, fsa);
+       return 0;
+}
+
+static int fsa4480_remove(struct i2c_client *client)
+{
+       struct fsa4480 *fsa = i2c_get_clientdata(client);
+
+       typec_mux_unregister(fsa->mux);
+       typec_switch_unregister(fsa->sw);
+
+       return 0;
+}
+
+static const struct i2c_device_id fsa4480_table[] = {
+       { "fsa4480" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, fsa4480_table);
+
+static const struct of_device_id fsa4480_of_table[] = {
+       { .compatible = "fcs,fsa4480" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, fsa4480_of_table);
+
+static struct i2c_driver fsa4480_driver = {
+       .driver = {
+               .name = "fsa4480",
+               .of_match_table = fsa4480_of_table,
+       },
+       .probe_new      = fsa4480_probe,
+       .remove         = fsa4480_remove,
+       .id_table       = fsa4480_table,
+};
+module_i2c_driver(fsa4480_driver);
+
+MODULE_DESCRIPTION("ON Semiconductor FSA4480 driver");
+MODULE_LICENSE("GPL v2");
index 2cdd221..47b733f 100644 (file)
@@ -121,8 +121,8 @@ struct pmc_usb_port {
        int num;
        u32 iom_status;
        struct pmc_usb *pmc;
-       struct typec_mux *typec_mux;
-       struct typec_switch *typec_sw;
+       struct typec_mux_dev *typec_mux;
+       struct typec_switch_dev *typec_sw;
        struct usb_role_switch *usb_sw;
 
        enum typec_orientation orientation;
@@ -173,7 +173,7 @@ static int hsl_orientation(struct pmc_usb_port *port)
        return port->orientation - 1;
 }
 
-static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
+static int pmc_usb_send_command(struct intel_scu_ipc_dev *ipc, u8 *msg, u32 len)
 {
        u8 response[4];
        u8 status_res;
@@ -184,7 +184,7 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
         * Status can be checked from the response message if the
         * function intel_scu_ipc_dev_command succeeds.
         */
-       ret = intel_scu_ipc_dev_command(port->pmc->ipc, PMC_USBC_CMD, 0, msg,
+       ret = intel_scu_ipc_dev_command(ipc, PMC_USBC_CMD, 0, msg,
                                        len, response, sizeof(response));
 
        if (ret)
@@ -203,6 +203,23 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
        return 0;
 }
 
+static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
+{
+       int retry_count = 3;
+       int ret;
+
+       /*
+        * If PMC is busy then retry the command once again
+        */
+       while (retry_count--) {
+               ret = pmc_usb_send_command(port->pmc->ipc, msg, len);
+               if (ret != -EBUSY)
+                       break;
+       }
+
+       return ret;
+}
+
 static int
 pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
 {
@@ -416,7 +433,7 @@ static int pmc_usb_connect(struct pmc_usb_port *port, enum usb_role role)
 }
 
 static int
-pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+pmc_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
 {
        struct pmc_usb_port *port = typec_mux_get_drvdata(mux);
 
@@ -452,7 +469,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
        return -EOPNOTSUPP;
 }
 
-static int pmc_usb_set_orientation(struct typec_switch *sw,
+static int pmc_usb_set_orientation(struct typec_switch_dev *sw,
                                   enum typec_orientation orientation)
 {
        struct pmc_usb_port *port = typec_switch_get_drvdata(sw);
index 7afe275..6ce9f28 100644 (file)
@@ -23,8 +23,8 @@
 struct pi3usb30532 {
        struct i2c_client *client;
        struct mutex lock; /* protects the cached conf register */
-       struct typec_switch *sw;
-       struct typec_mux *mux;
+       struct typec_switch_dev *sw;
+       struct typec_mux_dev *mux;
        u8 conf;
 };
 
@@ -45,7 +45,7 @@ static int pi3usb30532_set_conf(struct pi3usb30532 *pi, u8 new_conf)
        return 0;
 }
 
-static int pi3usb30532_sw_set(struct typec_switch *sw,
+static int pi3usb30532_sw_set(struct typec_switch_dev *sw,
                              enum typec_orientation orientation)
 {
        struct pi3usb30532 *pi = typec_switch_get_drvdata(sw);
@@ -74,7 +74,7 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
 }
 
 static int
-pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+pi3usb30532_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
 {
        struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
        u8 new_conf;
index 72f9001..96c55ea 100644 (file)
@@ -1708,8 +1708,8 @@ static int fusb302_probe(struct i2c_client *client,
         */
        if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
                chip->extcon = extcon_get_extcon_dev(name);
-               if (!chip->extcon)
-                       return -EPROBE_DEFER;
+               if (IS_ERR(chip->extcon))
+                       return PTR_ERR(chip->extcon);
        }
 
        chip->vbus = devm_regulator_get(chip->dev, "vbus");
index 16b4560..dfbba5a 100644 (file)
@@ -93,6 +93,8 @@ struct tps6598x {
        struct power_supply *psy;
        struct power_supply_desc psy_desc;
        enum power_supply_usb_type usb_type;
+
+       u16 pwr_status;
 };
 
 static enum power_supply_property tps6598x_psy_props[] = {
@@ -230,17 +232,12 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status)
 {
        struct typec_partner_desc desc;
        enum typec_pwr_opmode mode;
-       u16 pwr_status;
        int ret;
 
        if (tps->partner)
                return 0;
 
-       ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
-       if (ret < 0)
-               return ret;
-
-       mode = TPS_POWER_STATUS_PWROPMODE(pwr_status);
+       mode = TPS_POWER_STATUS_PWROPMODE(tps->pwr_status);
 
        desc.usb_pd = mode == TYPEC_PWR_MODE_PD;
        desc.accessory = TYPEC_ACCESSORY_NONE; /* XXX: handle accessories */
@@ -455,6 +452,7 @@ static bool tps6598x_read_power_status(struct tps6598x *tps)
                dev_err(tps->dev, "failed to read power status: %d\n", ret);
                return false;
        }
+       tps->pwr_status = pwr_status;
        trace_tps6598x_power_status(pwr_status);
 
        return true;
@@ -601,15 +599,8 @@ static const struct regmap_config tps6598x_regmap_config = {
 static int tps6598x_psy_get_online(struct tps6598x *tps,
                                   union power_supply_propval *val)
 {
-       int ret;
-       u16 pwr_status;
-
-       ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
-       if (ret < 0)
-               return ret;
-
-       if (TPS_POWER_STATUS_CONNECTION(pwr_status) &&
-           TPS_POWER_STATUS_SOURCESINK(pwr_status)) {
+       if (TPS_POWER_STATUS_CONNECTION(tps->pwr_status) &&
+           TPS_POWER_STATUS_SOURCESINK(tps->pwr_status)) {
                val->intval = 1;
        } else {
                val->intval = 0;
@@ -622,15 +613,11 @@ static int tps6598x_psy_get_prop(struct power_supply *psy,
                                 union power_supply_propval *val)
 {
        struct tps6598x *tps = power_supply_get_drvdata(psy);
-       u16 pwr_status;
        int ret = 0;
 
        switch (psp) {
        case POWER_SUPPLY_PROP_USB_TYPE:
-               ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
-               if (ret < 0)
-                       return ret;
-               if (TPS_POWER_STATUS_PWROPMODE(pwr_status) == TYPEC_PWR_MODE_PD)
+               if (TPS_POWER_STATUS_PWROPMODE(tps->pwr_status) == TYPEC_PWR_MODE_PD)
                        val->intval = POWER_SUPPLY_USB_TYPE_PD;
                else
                        val->intval = POWER_SUPPLY_USB_TYPE_C;
@@ -837,6 +824,11 @@ static int tps6598x_probe(struct i2c_client *client)
        fwnode_handle_put(fwnode);
 
        if (status & TPS_STATUS_PLUG_PRESENT) {
+               ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status);
+               if (ret < 0) {
+                       dev_err(tps->dev, "failed to read power status: %d\n", ret);
+                       goto err_role_put;
+               }
                ret = tps6598x_connect(tps, status);
                if (ret)
                        dev_err(&client->dev, "failed to register partner\n");
index a6045ae..cbd862f 100644 (file)
@@ -1063,6 +1063,14 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
        con->num = index + 1;
        con->ucsi = ucsi;
 
+       cap->fwnode = ucsi_find_fwnode(con);
+       con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
+       if (IS_ERR(con->usb_role_sw)) {
+               dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
+                       con->num);
+               return PTR_ERR(con->usb_role_sw);
+       }
+
        /* Delay other interactions with the con until registration is complete */
        mutex_lock(&con->lock);
 
@@ -1098,7 +1106,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
        if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY)
                *accessory = TYPEC_ACCESSORY_DEBUG;
 
-       cap->fwnode = ucsi_find_fwnode(con);
        cap->driver_data = con;
        cap->ops = &ucsi_ops;
 
@@ -1156,13 +1163,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
                ucsi_port_psy_changed(con);
        }
 
-       con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
-       if (IS_ERR(con->usb_role_sw)) {
-               dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
-                       con->num);
-               con->usb_role_sw = NULL;
-       }
-
        /* Only notify USB controller if partner supports USB data */
        if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
                u_role = USB_ROLE_NONE;
@@ -1196,6 +1196,32 @@ out_unlock:
        return ret;
 }
 
+static void ucsi_unregister_connectors(struct ucsi *ucsi)
+{
+       struct ucsi_connector *con;
+       int i;
+
+       if (!ucsi->connector)
+               return;
+
+       for (i = 0; i < ucsi->cap.num_connectors; i++) {
+               con = &ucsi->connector[i];
+
+               if (!con->wq)
+                       break;
+
+               cancel_work_sync(&con->work);
+               ucsi_unregister_partner(con);
+               ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+               ucsi_unregister_port_psy(con);
+               destroy_workqueue(con->wq);
+               typec_unregister_port(con->port);
+       }
+
+       kfree(ucsi->connector);
+       ucsi->connector = NULL;
+}
+
 /**
  * ucsi_init - Initialize UCSI interface
  * @ucsi: UCSI to be initialized
@@ -1204,7 +1230,6 @@ out_unlock:
  */
 static int ucsi_init(struct ucsi *ucsi)
 {
-       struct ucsi_connector *con;
        u64 command;
        int ret;
        int i;
@@ -1235,7 +1260,7 @@ static int ucsi_init(struct ucsi *ucsi)
        }
 
        /* Allocate the connectors. Released in ucsi_unregister() */
-       ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
+       ucsi->connector = kcalloc(ucsi->cap.num_connectors,
                                  sizeof(*ucsi->connector), GFP_KERNEL);
        if (!ucsi->connector) {
                ret = -ENOMEM;
@@ -1259,15 +1284,7 @@ static int ucsi_init(struct ucsi *ucsi)
        return 0;
 
 err_unregister:
-       for (con = ucsi->connector; con->port; con++) {
-               ucsi_unregister_partner(con);
-               ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
-               ucsi_unregister_port_psy(con);
-               if (con->wq)
-                       destroy_workqueue(con->wq);
-               typec_unregister_port(con->port);
-               con->port = NULL;
-       }
+       ucsi_unregister_connectors(ucsi);
 
 err_reset:
        memset(&ucsi->cap, 0, sizeof(ucsi->cap));
@@ -1278,12 +1295,20 @@ err:
 
 static void ucsi_init_work(struct work_struct *work)
 {
-       struct ucsi *ucsi = container_of(work, struct ucsi, work);
+       struct ucsi *ucsi = container_of(work, struct ucsi, work.work);
        int ret;
 
        ret = ucsi_init(ucsi);
        if (ret)
                dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+
+       if (ret == -EPROBE_DEFER) {
+               if (ucsi->work_count++ > UCSI_ROLE_SWITCH_WAIT_COUNT)
+                       return;
+
+               queue_delayed_work(system_long_wq, &ucsi->work,
+                                  UCSI_ROLE_SWITCH_INTERVAL);
+       }
 }
 
 /**
@@ -1323,7 +1348,7 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
        if (!ucsi)
                return ERR_PTR(-ENOMEM);
 
-       INIT_WORK(&ucsi->work, ucsi_init_work);
+       INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work);
        mutex_init(&ucsi->ppm_lock);
        ucsi->dev = dev;
        ucsi->ops = ops;
@@ -1358,7 +1383,7 @@ int ucsi_register(struct ucsi *ucsi)
        if (!ucsi->version)
                return -ENODEV;
 
-       queue_work(system_long_wq, &ucsi->work);
+       queue_delayed_work(system_long_wq, &ucsi->work, 0);
 
        return 0;
 }
@@ -1373,26 +1398,14 @@ EXPORT_SYMBOL_GPL(ucsi_register);
 void ucsi_unregister(struct ucsi *ucsi)
 {
        u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
-       int i;
 
        /* Make sure that we are not in the middle of driver initialization */
-       cancel_work_sync(&ucsi->work);
+       cancel_delayed_work_sync(&ucsi->work);
 
        /* Disable notifications */
        ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
 
-       for (i = 0; i < ucsi->cap.num_connectors; i++) {
-               cancel_work_sync(&ucsi->connector[i].work);
-               ucsi_unregister_partner(&ucsi->connector[i]);
-               ucsi_unregister_altmodes(&ucsi->connector[i],
-                                        UCSI_RECIPIENT_CON);
-               ucsi_unregister_port_psy(&ucsi->connector[i]);
-               if (ucsi->connector[i].wq)
-                       destroy_workqueue(ucsi->connector[i].wq);
-               typec_unregister_port(ucsi->connector[i].port);
-       }
-
-       kfree(ucsi->connector);
+       ucsi_unregister_connectors(ucsi);
 }
 EXPORT_SYMBOL_GPL(ucsi_unregister);
 
index 280f1e1..8eb391e 100644 (file)
@@ -287,7 +287,11 @@ struct ucsi {
        struct ucsi_capability cap;
        struct ucsi_connector *connector;
 
-       struct work_struct work;
+       struct delayed_work work;
+       int work_count;
+#define UCSI_ROLE_SWITCH_RETRY_PER_HZ  10
+#define UCSI_ROLE_SWITCH_INTERVAL      (HZ / UCSI_ROLE_SWITCH_RETRY_PER_HZ)
+#define UCSI_ROLE_SWITCH_WAIT_COUNT    (10 * UCSI_ROLE_SWITCH_RETRY_PER_HZ)
 
        /* PPM Communication lock */
        struct mutex ppm_lock;
index 6771f05..8873c16 100644 (file)
@@ -19,7 +19,7 @@
 struct ucsi_acpi {
        struct device *dev;
        struct ucsi *ucsi;
-       void __iomem *base;
+       void *base;
        struct completion complete;
        unsigned long flags;
        guid_t guid;
@@ -51,7 +51,7 @@ static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset,
        if (ret)
                return ret;
 
-       memcpy(val, (const void __force *)(ua->base + offset), val_len);
+       memcpy(val, ua->base + offset, val_len);
 
        return 0;
 }
@@ -61,7 +61,7 @@ static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
 {
        struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
 
-       memcpy((void __force *)(ua->base + offset), val, val_len);
+       memcpy(ua->base + offset, val, val_len);
 
        return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
 }
@@ -132,20 +132,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       /* This will make sure we can use ioremap() */
-       status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
-       if (ACPI_FAILURE(status))
-               return -ENOMEM;
-
-       /*
-        * NOTE: The memory region for the data structures is used also in an
-        * operation region, which means ACPI has already reserved it. Therefore
-        * it can not be requested here, and we can not use
-        * devm_ioremap_resource().
-        */
-       ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-       if (!ua->base)
-               return -ENOMEM;
+       ua->base = devm_memremap(&pdev->dev, res->start, resource_size(res), MEMREMAP_WB);
+       if (IS_ERR(ua->base))
+               return PTR_ERR(ua->base);
 
        ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
        if (ret)
index d8d3892..3c6d452 100644 (file)
@@ -393,7 +393,6 @@ static int stub_probe(struct usb_device *udev)
 
 err_port:
        dev_set_drvdata(&udev->dev, NULL);
-       usb_put_dev(udev);
 
        /* we already have busid_priv, just lock busid_lock */
        spin_lock(&busid_priv->busid_lock);
@@ -408,6 +407,7 @@ call_put_busid_priv:
        put_busid_priv(busid_priv);
 
 sdev_free:
+       usb_put_dev(udev);
        stub_device_free(sdev);
 
        return rc;
index 325c220..5dd41e8 100644 (file)
@@ -138,7 +138,9 @@ static int tweak_set_configuration_cmd(struct urb *urb)
        req = (struct usb_ctrlrequest *) urb->setup_packet;
        config = le16_to_cpu(req->wValue);
 
+       usb_lock_device(sdev->udev);
        err = usb_set_configuration(sdev->udev, config);
+       usb_unlock_device(sdev->udev);
        if (err && err != -ENODEV)
                dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
                        config, err);
index f480d54..5a09a09 100644 (file)
@@ -470,7 +470,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return ret;
 
        eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
-                                    dev, &eni_vdpa_ops, NULL, false);
+                                    dev, &eni_vdpa_ops, 1, 1, NULL, false);
        if (IS_ERR(eni_vdpa)) {
                ENI_ERR(pdev, "failed to allocate vDPA structure\n");
                return PTR_ERR(eni_vdpa);
index 4366320..0a56707 100644 (file)
@@ -290,16 +290,16 @@ static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
        struct ifcvf_hw *vf = &adapter->vf;
        int config_vector, ret;
 
-       if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
-               return 0;
-
        if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
-               /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */
                config_vector = vf->nr_vring;
-
-       if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+       else if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
                /* vector 0 for vqs and 1 for config interrupt */
                config_vector = 1;
+       else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+               /* re-use the vqs vector */
+               return 0;
+       else
+               return -EINVAL;
 
        snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
                 pci_name(pdev));
@@ -626,6 +626,11 @@ static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
        return  vf->config_size;
 }
 
+static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+       return 0;
+}
+
 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
                                  unsigned int offset,
                                  void *buf, unsigned int len)
@@ -704,6 +709,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
        .get_device_id  = ifcvf_vdpa_get_device_id,
        .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
        .get_vq_align   = ifcvf_vdpa_get_vq_align,
+       .get_vq_group   = ifcvf_vdpa_get_vq_group,
        .get_config_size        = ifcvf_vdpa_get_config_size,
        .get_config     = ifcvf_vdpa_get_config,
        .set_config     = ifcvf_vdpa_set_config,
@@ -758,14 +764,13 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
        pdev = ifcvf_mgmt_dev->pdev;
        dev = &pdev->dev;
        adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
-                                   dev, &ifc_vdpa_ops, name, false);
+                                   dev, &ifc_vdpa_ops, 1, 1, name, false);
        if (IS_ERR(adapter)) {
                IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
                return PTR_ERR(adapter);
        }
 
        ifcvf_mgmt_dev->adapter = adapter;
-       pci_set_drvdata(pdev, ifcvf_mgmt_dev);
 
        vf = &adapter->vf;
        vf->dev_type = get_dev_type(pdev);
@@ -880,6 +885,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err;
        }
 
+       pci_set_drvdata(pdev, ifcvf_mgmt_dev);
+
        return 0;
 
 err:
index daaf7b5..4410409 100644 (file)
@@ -61,6 +61,8 @@ struct mlx5_control_vq {
        struct vringh_kiov riov;
        struct vringh_kiov wiov;
        unsigned short head;
+       unsigned int received_desc;
+       unsigned int completed_desc;
 };
 
 struct mlx5_vdpa_wq_ent {
index e0de440..b7a9554 100644 (file)
@@ -48,6 +48,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 
 #define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
 
+#define MLX5V_UNTAGGED 0x1000
+
 struct mlx5_vdpa_net_resources {
        u32 tisn;
        u32 tdn;
@@ -119,6 +121,7 @@ struct mlx5_vdpa_virtqueue {
        struct mlx5_vdpa_umem umem2;
        struct mlx5_vdpa_umem umem3;
 
+       u32 counter_set_id;
        bool initialized;
        int index;
        u32 virtq_id;
@@ -143,6 +146,8 @@ static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
        return idx <= mvdev->max_idx;
 }
 
+#define MLX5V_MACVLAN_SIZE 256
+
 struct mlx5_vdpa_net {
        struct mlx5_vdpa_dev mvdev;
        struct mlx5_vdpa_net_resources res;
@@ -154,17 +159,22 @@ struct mlx5_vdpa_net {
         * since memory map might change and we need to destroy and create
         * resources while driver in operational.
         */
-       struct mutex reslock;
+       struct rw_semaphore reslock;
        struct mlx5_flow_table *rxft;
-       struct mlx5_fc *rx_counter;
-       struct mlx5_flow_handle *rx_rule_ucast;
-       struct mlx5_flow_handle *rx_rule_mcast;
        bool setup;
        u32 cur_num_vqs;
        u32 rqt_size;
        struct notifier_block nb;
        struct vdpa_callback config_cb;
        struct mlx5_vdpa_wq_ent cvq_ent;
+       struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
+};
+
+struct macvlan_node {
+       struct hlist_node hlist;
+       struct mlx5_flow_handle *ucast_rule;
+       struct mlx5_flow_handle *mcast_rule;
+       u64 macvlan;
 };
 
 static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -818,6 +828,12 @@ static u16 get_features_12_3(u64 features)
               (!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6);
 }
 
+static bool counters_supported(const struct mlx5_vdpa_dev *mvdev)
+{
+       return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) &
+              BIT_ULL(MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+}
+
 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
 {
        int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
@@ -872,6 +888,8 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
        MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
        MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+       if (counters_supported(&ndev->mvdev))
+               MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id);
 
        err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
        if (err)
@@ -1135,6 +1153,47 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        return err;
 }
 
+static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {};
+       u32 out[MLX5_ST_SZ_DW(create_virtio_q_counters_out)] = {};
+       void *cmd_hdr;
+       int err;
+
+       if (!counters_supported(&ndev->mvdev))
+               return 0;
+
+       cmd_hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
+
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+       err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+       if (err)
+               return err;
+
+       mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+       return 0;
+}
+
+static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_virtio_q_counters_in)] = {};
+       u32 out[MLX5_ST_SZ_DW(destroy_virtio_q_counters_out)] = {};
+
+       if (!counters_supported(&ndev->mvdev))
+               return;
+
+       MLX5_SET(destroy_virtio_q_counters_in, in, hdr.opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+       MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id);
+       MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid);
+       MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+       if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
+               mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
+}
+
 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
 {
        u16 idx = mvq->index;
@@ -1162,6 +1221,10 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
        if (err)
                goto err_connect;
 
+       err = counter_set_alloc(ndev, mvq);
+       if (err)
+               goto err_counter;
+
        err = create_virtqueue(ndev, mvq);
        if (err)
                goto err_connect;
@@ -1179,6 +1242,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
        return 0;
 
 err_connect:
+       counter_set_dealloc(ndev, mvq);
+err_counter:
        qp_destroy(ndev, &mvq->vqqp);
 err_vqqp:
        qp_destroy(ndev, &mvq->fwqp);
@@ -1223,6 +1288,7 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
 
        suspend_vq(ndev, mvq);
        destroy_virtqueue(ndev, mvq);
+       counter_set_dealloc(ndev, mvq);
        qp_destroy(ndev, &mvq->vqqp);
        qp_destroy(ndev, &mvq->fwqp);
        cq_destroy(ndev, mvq->index);
@@ -1347,12 +1413,17 @@ static void destroy_tir(struct mlx5_vdpa_net *ndev)
        mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
 }
 
-static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+#define MAX_STEERING_ENT 0x8000
+#define MAX_STEERING_GROUPS 2
+
+static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
+                                       u16 vid, bool tagged,
+                                       struct mlx5_flow_handle **ucast,
+                                       struct mlx5_flow_handle **mcast)
 {
-       struct mlx5_flow_destination dest[2] = {};
-       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_flow_destination dest = {};
        struct mlx5_flow_act flow_act = {};
-       struct mlx5_flow_namespace *ns;
+       struct mlx5_flow_handle *rule;
        struct mlx5_flow_spec *spec;
        void *headers_c;
        void *headers_v;
@@ -1365,85 +1436,178 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
                return -ENOMEM;
 
        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       ft_attr.max_fte = 2;
-       ft_attr.autogroup.max_num_groups = 2;
+       headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+       headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+       dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
+       dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
+       memset(dmac_c, 0xff, ETH_ALEN);
+       ether_addr_copy(dmac_v, mac);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+       if (tagged) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
+       }
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       dest.tir_num = ndev->res.tirn;
+       rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
+       if (IS_ERR(rule))
+               return PTR_ERR(rule);
 
-       ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
-       if (!ns) {
-               mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
-               err = -EOPNOTSUPP;
-               goto err_ns;
+       *ucast = rule;
+
+       memset(dmac_c, 0, ETH_ALEN);
+       memset(dmac_v, 0, ETH_ALEN);
+       dmac_c[0] = 1;
+       dmac_v[0] = 1;
+       rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
+       kvfree(spec);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               goto err_mcast;
        }
 
-       ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
-       if (IS_ERR(ndev->rxft)) {
-               err = PTR_ERR(ndev->rxft);
-               goto err_ns;
+       *mcast = rule;
+       return 0;
+
+err_mcast:
+       mlx5_del_flow_rules(*ucast);
+       return err;
+}
+
+static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev,
+                                        struct mlx5_flow_handle *ucast,
+                                        struct mlx5_flow_handle *mcast)
+{
+       mlx5_del_flow_rules(ucast);
+       mlx5_del_flow_rules(mcast);
+}
+
+static u64 search_val(u8 *mac, u16 vlan, bool tagged)
+{
+       u64 val;
+
+       if (!tagged)
+               vlan = MLX5V_UNTAGGED;
+
+       val = (u64)vlan << 48 |
+             (u64)mac[0] << 40 |
+             (u64)mac[1] << 32 |
+             (u64)mac[2] << 24 |
+             (u64)mac[3] << 16 |
+             (u64)mac[4] << 8 |
+             (u64)mac[5];
+
+       return val;
+}
+
+static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value)
+{
+       struct macvlan_node *pos;
+       u32 idx;
+
+       idx = hash_64(value, 8); // tbd 8
+       hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) {
+               if (pos->macvlan == value)
+                       return pos;
        }
+       return NULL;
+}
+
+static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) // vlan -> vid
+{
+       struct macvlan_node *ptr;
+       u64 val;
+       u32 idx;
+       int err;
+
+       val = search_val(mac, vlan, tagged);
+       if (mac_vlan_lookup(ndev, val))
+               return -EEXIST;
+
+       ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, vlan, tagged,
+                                          &ptr->ucast_rule, &ptr->mcast_rule);
+       if (err)
+               goto err_add;
+
+       ptr->macvlan = val;
+       idx = hash_64(val, 8);
+       hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]);
+       return 0;
+
+err_add:
+       kfree(ptr);
+       return err;
+}
+
+static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged)
+{
+       struct macvlan_node *ptr;
+
+       ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged));
+       if (!ptr)
+               return;
+
+       hlist_del(&ptr->hlist);
+       mlx5_vdpa_del_mac_vlan_rules(ndev, ptr->ucast_rule, ptr->mcast_rule);
+       kfree(ptr);
+}
+
+static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev)
+{
+       struct macvlan_node *pos;
+       struct hlist_node *n;
+       int i;
 
-       ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
-       if (IS_ERR(ndev->rx_counter)) {
-               err = PTR_ERR(ndev->rx_counter);
-               goto err_fc;
+       for (i = 0; i < MLX5V_MACVLAN_SIZE; i++) {
+               hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) {
+                       hlist_del(&pos->hlist);
+                       mlx5_vdpa_del_mac_vlan_rules(ndev, pos->ucast_rule, pos->mcast_rule);
+                       kfree(pos);
+               }
        }
+}
 
-       headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
-       dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
-       memset(dmac_c, 0xff, ETH_ALEN);
-       headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
-       dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
-       ether_addr_copy(dmac_v, ndev->config.mac);
+static int setup_steering(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_flow_namespace *ns;
+       int err;
 
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
-       dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-       dest[0].tir_num = ndev->res.tirn;
-       dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-       dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
-       ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 2);
+       ft_attr.max_fte = MAX_STEERING_ENT;
+       ft_attr.autogroup.max_num_groups = MAX_STEERING_GROUPS;
 
-       if (IS_ERR(ndev->rx_rule_ucast)) {
-               err = PTR_ERR(ndev->rx_rule_ucast);
-               ndev->rx_rule_ucast = NULL;
-               goto err_rule_ucast;
+       ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+       if (!ns) {
+               mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+               return -EOPNOTSUPP;
        }
 
-       memset(dmac_c, 0, ETH_ALEN);
-       memset(dmac_v, 0, ETH_ALEN);
-       dmac_c[0] = 1;
-       dmac_v[0] = 1;
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-       ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 1);
-       if (IS_ERR(ndev->rx_rule_mcast)) {
-               err = PTR_ERR(ndev->rx_rule_mcast);
-               ndev->rx_rule_mcast = NULL;
-               goto err_rule_mcast;
+       ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+       if (IS_ERR(ndev->rxft)) {
+               mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
+               return PTR_ERR(ndev->rxft);
        }
 
-       kvfree(spec);
+       err = mac_vlan_add(ndev, ndev->config.mac, 0, false);
+       if (err)
+               goto err_add;
+
        return 0;
 
-err_rule_mcast:
-       mlx5_del_flow_rules(ndev->rx_rule_ucast);
-       ndev->rx_rule_ucast = NULL;
-err_rule_ucast:
-       mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
-err_fc:
+err_add:
        mlx5_destroy_flow_table(ndev->rxft);
-err_ns:
-       kvfree(spec);
        return err;
 }
 
-static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+static void teardown_steering(struct mlx5_vdpa_net *ndev)
 {
-       if (!ndev->rx_rule_ucast)
-               return;
-
-       mlx5_del_flow_rules(ndev->rx_rule_mcast);
-       ndev->rx_rule_mcast = NULL;
-       mlx5_del_flow_rules(ndev->rx_rule_ucast);
-       ndev->rx_rule_ucast = NULL;
-       mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+       clear_mac_vlan_table(ndev);
        mlx5_destroy_flow_table(ndev->rxft);
 }
 
@@ -1494,9 +1658,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
 
                /* Need recreate the flow table entry, so that the packet could forward back
                 */
-               remove_fwd_to_tir(ndev);
+               mac_vlan_del(ndev, ndev->config.mac, 0, false);
 
-               if (add_fwd_to_tir(ndev)) {
+               if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
                        mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
 
                        /* Although it hardly run here, we still need double check */
@@ -1520,7 +1684,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
 
                        memcpy(ndev->config.mac, mac_back, ETH_ALEN);
 
-                       if (add_fwd_to_tir(ndev))
+                       if (mac_vlan_add(ndev, ndev->config.mac, 0, false))
                                mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
 
                        break;
@@ -1622,6 +1786,42 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
        return status;
 }
 
+static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+       struct mlx5_control_vq *cvq = &mvdev->cvq;
+       __virtio16 vlan;
+       size_t read;
+       u16 id;
+
+       switch (cmd) {
+       case VIRTIO_NET_CTRL_VLAN_ADD:
+               read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
+               if (read != sizeof(vlan))
+                       break;
+
+               id = mlx5vdpa16_to_cpu(mvdev, vlan);
+               if (mac_vlan_add(ndev, ndev->config.mac, id, true))
+                       break;
+
+               status = VIRTIO_NET_OK;
+               break;
+       case VIRTIO_NET_CTRL_VLAN_DEL:
+               read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
+               if (read != sizeof(vlan))
+                       break;
+
+               id = mlx5vdpa16_to_cpu(mvdev, vlan);
+               mac_vlan_del(ndev, ndev->config.mac, id, true);
+               break;
+       default:
+       break;
+}
+
+return status;
+}
+
 static void mlx5_cvq_kick_handler(struct work_struct *work)
 {
        virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
@@ -1638,7 +1838,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
        ndev = to_mlx5_vdpa_ndev(mvdev);
        cvq = &mvdev->cvq;
 
-       mutex_lock(&ndev->reslock);
+       down_write(&ndev->reslock);
 
        if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
                goto out;
@@ -1659,6 +1859,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
                if (read != sizeof(ctrl))
                        break;
 
+               cvq->received_desc++;
                switch (ctrl.class) {
                case VIRTIO_NET_CTRL_MAC:
                        status = handle_ctrl_mac(mvdev, ctrl.cmd);
@@ -1666,7 +1867,9 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
                case VIRTIO_NET_CTRL_MQ:
                        status = handle_ctrl_mq(mvdev, ctrl.cmd);
                        break;
-
+               case VIRTIO_NET_CTRL_VLAN:
+                       status = handle_ctrl_vlan(mvdev, ctrl.cmd);
+                       break;
                default:
                        break;
                }
@@ -1682,12 +1885,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
                if (vringh_need_notify_iotlb(&cvq->vring))
                        vringh_notify(&cvq->vring);
 
+               cvq->completed_desc++;
                queue_work(mvdev->wq, &wqent->work);
                break;
        }
 
 out:
-       mutex_unlock(&ndev->reslock);
+       up_write(&ndev->reslock);
 }
 
 static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -1888,6 +2092,11 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
        return PAGE_SIZE;
 }
 
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+       return 0;
+}
+
 enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
        MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
        MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
@@ -1925,6 +2134,7 @@ static u64 get_supported_features(struct mlx5_core_dev *mdev)
        mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
        mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
        mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
+       mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VLAN);
 
        return mlx_vdpa_features;
 }
@@ -2185,7 +2395,7 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        int err;
 
-       WARN_ON(!mutex_is_locked(&ndev->reslock));
+       WARN_ON(!rwsem_is_locked(&ndev->reslock));
 
        if (ndev->setup) {
                mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
@@ -2210,9 +2420,9 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
                goto err_tir;
        }
 
-       err = add_fwd_to_tir(ndev);
+       err = setup_steering(ndev);
        if (err) {
-               mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
+               mlx5_vdpa_warn(mvdev, "setup_steering\n");
                goto err_fwd;
        }
        ndev->setup = true;
@@ -2233,12 +2443,12 @@ out:
 static void teardown_driver(struct mlx5_vdpa_net *ndev)
 {
 
-       WARN_ON(!mutex_is_locked(&ndev->reslock));
+       WARN_ON(!rwsem_is_locked(&ndev->reslock));
 
        if (!ndev->setup)
                return;
 
-       remove_fwd_to_tir(ndev);
+       teardown_steering(ndev);
        destroy_tir(ndev);
        destroy_rqt(ndev);
        teardown_virtqueues(ndev);
@@ -2263,7 +2473,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
 
        print_status(mvdev, status, true);
 
-       mutex_lock(&ndev->reslock);
+       down_write(&ndev->reslock);
 
        if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
                if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
@@ -2279,14 +2489,14 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
        }
 
        ndev->mvdev.status = status;
-       mutex_unlock(&ndev->reslock);
+       up_write(&ndev->reslock);
        return;
 
 err_setup:
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
        ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
 err_clear:
-       mutex_unlock(&ndev->reslock);
+       up_write(&ndev->reslock);
 }
 
 static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2297,12 +2507,14 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
        print_status(mvdev, 0, true);
        mlx5_vdpa_info(mvdev, "performing device reset\n");
 
-       mutex_lock(&ndev->reslock);
+       down_write(&ndev->reslock);
        teardown_driver(ndev);
        clear_vqs_ready(ndev);
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
        ndev->mvdev.status = 0;
        ndev->cur_num_vqs = 0;
+       ndev->mvdev.cvq.received_desc = 0;
+       ndev->mvdev.cvq.completed_desc = 0;
        memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
        ndev->mvdev.actual_features = 0;
        ++mvdev->generation;
@@ -2310,7 +2522,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
                if (mlx5_vdpa_create_mr(mvdev, NULL))
                        mlx5_vdpa_warn(mvdev, "create MR failed\n");
        }
-       mutex_unlock(&ndev->reslock);
+       up_write(&ndev->reslock);
 
        return 0;
 }
@@ -2343,14 +2555,15 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
        return mvdev->generation;
 }
 
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+                            struct vhost_iotlb *iotlb)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        bool change_map;
        int err;
 
-       mutex_lock(&ndev->reslock);
+       down_write(&ndev->reslock);
 
        err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
        if (err) {
@@ -2362,7 +2575,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
                err = mlx5_vdpa_change_map(mvdev, iotlb);
 
 err:
-       mutex_unlock(&ndev->reslock);
+       up_write(&ndev->reslock);
        return err;
 }
 
@@ -2381,7 +2594,6 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
                mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
        }
        mlx5_vdpa_free_resources(&ndev->mvdev);
-       mutex_destroy(&ndev->reslock);
        kfree(ndev->event_cbs);
        kfree(ndev->vqs);
 }
@@ -2422,6 +2634,93 @@ static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
        return mvdev->actual_features;
 }
 
+static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+                            u64 *received_desc, u64 *completed_desc)
+{
+       u32 in[MLX5_ST_SZ_DW(query_virtio_q_counters_in)] = {};
+       u32 out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {};
+       void *cmd_hdr;
+       void *ctx;
+       int err;
+
+       if (!counters_supported(&ndev->mvdev))
+               return -EOPNOTSUPP;
+
+       if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+               return -EAGAIN;
+
+       cmd_hdr = MLX5_ADDR_OF(query_virtio_q_counters_in, in, hdr);
+
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id);
+
+       err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+       if (err)
+               return err;
+
+       ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters);
+       *received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc);
+       *completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc);
+       return 0;
+}
+
+static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
+                                        struct sk_buff *msg,
+                                        struct netlink_ext_ack *extack)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq;
+       struct mlx5_control_vq *cvq;
+       u64 received_desc;
+       u64 completed_desc;
+       int err = 0;
+
+       down_read(&ndev->reslock);
+       if (!is_index_valid(mvdev, idx)) {
+               NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid");
+               err = -EINVAL;
+               goto out_err;
+       }
+
+       if (idx == ctrl_vq_idx(mvdev)) {
+               cvq = &mvdev->cvq;
+               received_desc = cvq->received_desc;
+               completed_desc = cvq->completed_desc;
+               goto out;
+       }
+
+       mvq = &ndev->vqs[idx];
+       err = counter_set_query(ndev, mvq, &received_desc, &completed_desc);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack, "failed to query hardware");
+               goto out_err;
+       }
+
+out:
+       err = -EMSGSIZE;
+       if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "received_desc"))
+               goto out_err;
+
+       if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, received_desc,
+                             VDPA_ATTR_PAD))
+               goto out_err;
+
+       if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "completed_desc"))
+               goto out_err;
+
+       if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, completed_desc,
+                             VDPA_ATTR_PAD))
+               goto out_err;
+
+       err = 0;
+out_err:
+       up_read(&ndev->reslock);
+       return err;
+}
+
 static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .set_vq_address = mlx5_vdpa_set_vq_address,
        .set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2431,9 +2730,11 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .get_vq_ready = mlx5_vdpa_get_vq_ready,
        .set_vq_state = mlx5_vdpa_set_vq_state,
        .get_vq_state = mlx5_vdpa_get_vq_state,
+       .get_vendor_vq_stats = mlx5_vdpa_get_vendor_vq_stats,
        .get_vq_notification = mlx5_get_vq_notification,
        .get_vq_irq = mlx5_get_vq_irq,
        .get_vq_align = mlx5_vdpa_get_vq_align,
+       .get_vq_group = mlx5_vdpa_get_vq_group,
        .get_device_features = mlx5_vdpa_get_device_features,
        .set_driver_features = mlx5_vdpa_set_driver_features,
        .get_driver_features = mlx5_vdpa_get_driver_features,
@@ -2669,7 +2970,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        }
 
        ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
-                                name, false);
+                                1, 1, name, false);
        if (IS_ERR(ndev))
                return PTR_ERR(ndev);
 
@@ -2686,18 +2987,18 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        }
 
        init_mvqs(ndev);
-       mutex_init(&ndev->reslock);
+       init_rwsem(&ndev->reslock);
        config = &ndev->config;
 
        if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
                err = config_func_mtu(mdev, add_config->net.mtu);
                if (err)
-                       goto err_mtu;
+                       goto err_alloc;
        }
 
        err = query_mtu(mdev, &mtu);
        if (err)
-               goto err_mtu;
+               goto err_alloc;
 
        ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
 
@@ -2711,14 +3012,14 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        } else {
                err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
                if (err)
-                       goto err_mtu;
+                       goto err_alloc;
        }
 
        if (!is_zero_ether_addr(config->mac)) {
                pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
                err = mlx5_mpfs_add_mac(pfmdev, config->mac);
                if (err)
-                       goto err_mtu;
+                       goto err_alloc;
 
                ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
        }
@@ -2768,8 +3069,6 @@ err_res:
 err_mpfs:
        if (!is_zero_ether_addr(config->mac))
                mlx5_mpfs_del_mac(pfmdev, config->mac);
-err_mtu:
-       mutex_destroy(&ndev->reslock);
 err_alloc:
        put_device(&mvdev->vdev.dev);
        return err;
index 2b75c00..ebf2f36 100644 (file)
 
 static LIST_HEAD(mdev_head);
 /* A global mutex that protects vdpa management device and device level operations. */
-static DEFINE_MUTEX(vdpa_dev_mutex);
+static DECLARE_RWSEM(vdpa_dev_lock);
 static DEFINE_IDA(vdpa_index_ida);
 
 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
 {
-       mutex_lock(&vdev->cf_mutex);
+       down_write(&vdev->cf_lock);
        vdev->config->set_status(vdev, status);
-       mutex_unlock(&vdev->cf_mutex);
+       up_write(&vdev->cf_lock);
 }
 EXPORT_SYMBOL(vdpa_set_status);
 
@@ -77,32 +77,11 @@ static ssize_t driver_override_store(struct device *dev,
                                     const char *buf, size_t count)
 {
        struct vdpa_device *vdev = dev_to_vdpa(dev);
-       const char *driver_override, *old;
-       char *cp;
+       int ret;
 
-       /* We need to keep extra room for a newline */
-       if (count >= (PAGE_SIZE - 1))
-               return -EINVAL;
-
-       driver_override = kstrndup(buf, count, GFP_KERNEL);
-       if (!driver_override)
-               return -ENOMEM;
-
-       cp = strchr(driver_override, '\n');
-       if (cp)
-               *cp = '\0';
-
-       device_lock(dev);
-       old = vdev->driver_override;
-       if (strlen(driver_override)) {
-               vdev->driver_override = driver_override;
-       } else {
-               kfree(driver_override);
-               vdev->driver_override = NULL;
-       }
-       device_unlock(dev);
-
-       kfree(old);
+       ret = driver_set_override(dev, &vdev->driver_override, buf, count);
+       if (ret)
+               return ret;
 
        return count;
 }
@@ -148,7 +127,6 @@ static void vdpa_release_dev(struct device *d)
                ops->free(vdev);
 
        ida_simple_remove(&vdpa_index_ida, vdev->index);
-       mutex_destroy(&vdev->cf_mutex);
        kfree(vdev->driver_override);
        kfree(vdev);
 }
@@ -159,6 +137,8 @@ static void vdpa_release_dev(struct device *d)
  * initialized but before registered.
  * @parent: the parent device
  * @config: the bus operations that is supported by this device
+ * @ngroups: number of groups supported by this device
+ * @nas: number of address spaces supported by this device
  * @size: size of the parent structure that contains private data
  * @name: name of the vdpa device; optional.
  * @use_va: indicate whether virtual address must be used by this device
@@ -171,6 +151,7 @@ static void vdpa_release_dev(struct device *d)
  */
 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
+                                       unsigned int ngroups, unsigned int nas,
                                        size_t size, const char *name,
                                        bool use_va)
 {
@@ -203,6 +184,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
        vdev->config = config;
        vdev->features_valid = false;
        vdev->use_va = use_va;
+       vdev->ngroups = ngroups;
+       vdev->nas = nas;
 
        if (name)
                err = dev_set_name(&vdev->dev, "%s", name);
@@ -211,7 +194,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
        if (err)
                goto err_name;
 
-       mutex_init(&vdev->cf_mutex);
+       init_rwsem(&vdev->cf_lock);
        device_initialize(&vdev->dev);
 
        return vdev;
@@ -238,7 +221,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 
        vdev->nvqs = nvqs;
 
-       lockdep_assert_held(&vdpa_dev_mutex);
+       lockdep_assert_held(&vdpa_dev_lock);
        dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
        if (dev) {
                put_device(dev);
@@ -278,9 +261,9 @@ int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
 {
        int err;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_write(&vdpa_dev_lock);
        err = __vdpa_register_device(vdev, nvqs);
-       mutex_unlock(&vdpa_dev_mutex);
+       up_write(&vdpa_dev_lock);
        return err;
 }
 EXPORT_SYMBOL_GPL(vdpa_register_device);
@@ -293,7 +276,7 @@ EXPORT_SYMBOL_GPL(vdpa_register_device);
  */
 void _vdpa_unregister_device(struct vdpa_device *vdev)
 {
-       lockdep_assert_held(&vdpa_dev_mutex);
+       lockdep_assert_held(&vdpa_dev_lock);
        WARN_ON(!vdev->mdev);
        device_unregister(&vdev->dev);
 }
@@ -305,9 +288,9 @@ EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
  */
 void vdpa_unregister_device(struct vdpa_device *vdev)
 {
-       mutex_lock(&vdpa_dev_mutex);
+       down_write(&vdpa_dev_lock);
        device_unregister(&vdev->dev);
-       mutex_unlock(&vdpa_dev_mutex);
+       up_write(&vdpa_dev_lock);
 }
 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
 
@@ -352,9 +335,9 @@ int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
                return -EINVAL;
 
        INIT_LIST_HEAD(&mdev->list);
-       mutex_lock(&vdpa_dev_mutex);
+       down_write(&vdpa_dev_lock);
        list_add_tail(&mdev->list, &mdev_head);
-       mutex_unlock(&vdpa_dev_mutex);
+       up_write(&vdpa_dev_lock);
        return 0;
 }
 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
@@ -371,14 +354,14 @@ static int vdpa_match_remove(struct device *dev, void *data)
 
 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
 {
-       mutex_lock(&vdpa_dev_mutex);
+       down_write(&vdpa_dev_lock);
 
        list_del(&mdev->list);
 
        /* Filter out all the entries belong to this management device and delete it. */
        bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
 
-       mutex_unlock(&vdpa_dev_mutex);
+       up_write(&vdpa_dev_lock);
 }
 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
 
@@ -407,9 +390,9 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
                     void *buf, unsigned int len)
 {
-       mutex_lock(&vdev->cf_mutex);
+       down_read(&vdev->cf_lock);
        vdpa_get_config_unlocked(vdev, offset, buf, len);
-       mutex_unlock(&vdev->cf_mutex);
+       up_read(&vdev->cf_lock);
 }
 EXPORT_SYMBOL_GPL(vdpa_get_config);
 
@@ -423,9 +406,9 @@ EXPORT_SYMBOL_GPL(vdpa_get_config);
 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
                     const void *buf, unsigned int length)
 {
-       mutex_lock(&vdev->cf_mutex);
+       down_write(&vdev->cf_lock);
        vdev->config->set_config(vdev, offset, buf, length);
-       mutex_unlock(&vdev->cf_mutex);
+       up_write(&vdev->cf_lock);
 }
 EXPORT_SYMBOL_GPL(vdpa_set_config);
 
@@ -532,17 +515,17 @@ static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *i
        if (!msg)
                return -ENOMEM;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_read(&vdpa_dev_lock);
        mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
        if (IS_ERR(mdev)) {
-               mutex_unlock(&vdpa_dev_mutex);
+               up_read(&vdpa_dev_lock);
                NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
                err = PTR_ERR(mdev);
                goto out;
        }
 
        err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
-       mutex_unlock(&vdpa_dev_mutex);
+       up_read(&vdpa_dev_lock);
        if (err)
                goto out;
        err = genlmsg_reply(msg, info);
@@ -561,7 +544,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
        int idx = 0;
        int err;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_read(&vdpa_dev_lock);
        list_for_each_entry(mdev, &mdev_head, list) {
                if (idx < start) {
                        idx++;
@@ -574,7 +557,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
                idx++;
        }
 out:
-       mutex_unlock(&vdpa_dev_mutex);
+       up_read(&vdpa_dev_lock);
        cb->args[0] = idx;
        return msg->len;
 }
@@ -627,7 +610,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
            !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_write(&vdpa_dev_lock);
        mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
        if (IS_ERR(mdev)) {
                NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
@@ -643,7 +626,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
 
        err = mdev->ops->dev_add(mdev, name, &config);
 err:
-       mutex_unlock(&vdpa_dev_mutex);
+       up_write(&vdpa_dev_lock);
        return err;
 }
 
@@ -659,7 +642,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
                return -EINVAL;
        name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_write(&vdpa_dev_lock);
        dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
        if (!dev) {
                NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -677,7 +660,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
 mdev_err:
        put_device(dev);
 dev_err:
-       mutex_unlock(&vdpa_dev_mutex);
+       up_write(&vdpa_dev_lock);
        return err;
 }
 
@@ -743,7 +726,7 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_read(&vdpa_dev_lock);
        dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
        if (!dev) {
                NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -756,14 +739,19 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
                goto mdev_err;
        }
        err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
-       if (!err)
-               err = genlmsg_reply(msg, info);
+       if (err)
+               goto mdev_err;
+
+       err = genlmsg_reply(msg, info);
+       put_device(dev);
+       up_read(&vdpa_dev_lock);
+       return err;
+
 mdev_err:
        put_device(dev);
 err:
-       mutex_unlock(&vdpa_dev_mutex);
-       if (err)
-               nlmsg_free(msg);
+       up_read(&vdpa_dev_lock);
+       nlmsg_free(msg);
        return err;
 }
 
@@ -804,9 +792,9 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
        info.start_idx = cb->args[0];
        info.idx = 0;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_read(&vdpa_dev_lock);
        bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
-       mutex_unlock(&vdpa_dev_mutex);
+       up_read(&vdpa_dev_lock);
        cb->args[0] = info.idx;
        return msg->len;
 }
@@ -861,7 +849,7 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
        u8 status;
        int err;
 
-       mutex_lock(&vdev->cf_mutex);
+       down_read(&vdev->cf_lock);
        status = vdev->config->get_status(vdev);
        if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
                NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
@@ -898,14 +886,116 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
        if (err)
                goto msg_err;
 
-       mutex_unlock(&vdev->cf_mutex);
+       up_read(&vdev->cf_lock);
        genlmsg_end(msg, hdr);
        return 0;
 
 msg_err:
        genlmsg_cancel(msg, hdr);
 out:
-       mutex_unlock(&vdev->cf_mutex);
+       up_read(&vdev->cf_lock);
+       return err;
+}
+
+static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
+                              struct genl_info *info, u32 index)
+{
+       struct virtio_net_config config = {};
+       u64 features;
+       u16 max_vqp;
+       u8 status;
+       int err;
+
+       status = vdev->config->get_status(vdev);
+       if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+               NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
+               return -EAGAIN;
+       }
+       vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
+
+       max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+       if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
+               return -EMSGSIZE;
+
+       features = vdev->config->get_driver_features(vdev);
+       if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
+                             features, VDPA_ATTR_PAD))
+               return -EMSGSIZE;
+
+       if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
+               return -EMSGSIZE;
+
+       err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
+                            struct genl_info *info, u32 index)
+{
+       int err;
+
+       down_read(&vdev->cf_lock);
+       if (!vdev->config->get_vendor_vq_stats) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       err = vdpa_fill_stats_rec(vdev, msg, info, index);
+out:
+       up_read(&vdev->cf_lock);
+       return err;
+}
+
+static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
+                                     struct sk_buff *msg,
+                                     struct genl_info *info, u32 index)
+{
+       u32 device_id;
+       void *hdr;
+       int err;
+       u32 portid = info->snd_portid;
+       u32 seq = info->snd_seq;
+       u32 flags = 0;
+
+       hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
+                         VDPA_CMD_DEV_VSTATS_GET);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
+               err = -EMSGSIZE;
+               goto undo_msg;
+       }
+
+       device_id = vdev->config->get_device_id(vdev);
+       if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
+               err = -EMSGSIZE;
+               goto undo_msg;
+       }
+
+       switch (device_id) {
+       case VIRTIO_ID_NET:
+               if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
+                       NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
+                       err = -ERANGE;
+                       break;
+               }
+
+               err = vendor_stats_fill(vdev, msg, info, index);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+       genlmsg_end(msg, hdr);
+
+       return err;
+
+undo_msg:
+       genlmsg_cancel(msg, hdr);
        return err;
 }
 
@@ -924,7 +1014,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
        if (!msg)
                return -ENOMEM;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_read(&vdpa_dev_lock);
        dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
        if (!dev) {
                NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -945,7 +1035,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
 mdev_err:
        put_device(dev);
 dev_err:
-       mutex_unlock(&vdpa_dev_mutex);
+       up_read(&vdpa_dev_lock);
        if (err)
                nlmsg_free(msg);
        return err;
@@ -983,13 +1073,67 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *
        info.start_idx = cb->args[0];
        info.idx = 0;
 
-       mutex_lock(&vdpa_dev_mutex);
+       down_read(&vdpa_dev_lock);
        bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
-       mutex_unlock(&vdpa_dev_mutex);
+       up_read(&vdpa_dev_lock);
        cb->args[0] = info.idx;
        return msg->len;
 }
 
+static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
+                                         struct genl_info *info)
+{
+       struct vdpa_device *vdev;
+       struct sk_buff *msg;
+       const char *devname;
+       struct device *dev;
+       u32 index;
+       int err;
+
+       if (!info->attrs[VDPA_ATTR_DEV_NAME])
+               return -EINVAL;
+
+       if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
+               return -EINVAL;
+
+       devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
+       down_read(&vdpa_dev_lock);
+       dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
+       if (!dev) {
+               NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+               err = -ENODEV;
+               goto dev_err;
+       }
+       vdev = container_of(dev, struct vdpa_device, dev);
+       if (!vdev->mdev) {
+               NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+               err = -EINVAL;
+               goto mdev_err;
+       }
+       err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
+       if (err)
+               goto mdev_err;
+
+       err = genlmsg_reply(msg, info);
+
+       put_device(dev);
+       up_read(&vdpa_dev_lock);
+
+       return err;
+
+mdev_err:
+       put_device(dev);
+dev_err:
+       nlmsg_free(msg);
+       up_read(&vdpa_dev_lock);
+       return err;
+}
+
 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
        [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
        [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
@@ -1030,6 +1174,12 @@ static const struct genl_ops vdpa_nl_ops[] = {
                .doit = vdpa_nl_cmd_dev_config_get_doit,
                .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
        },
+       {
+               .cmd = VDPA_CMD_DEV_VSTATS_GET,
+               .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+               .doit = vdpa_nl_cmd_dev_stats_get_doit,
+               .flags = GENL_ADMIN_PERM,
+       },
 };
 
 static struct genl_family vdpa_nl_family __ro_after_init = {
index ddbe142..0f28658 100644 (file)
@@ -96,11 +96,17 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
 {
        int i;
 
-       for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+       spin_lock(&vdpasim->iommu_lock);
+
+       for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
                vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
+               vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
+                                &vdpasim->iommu_lock);
+       }
+
+       for (i = 0; i < vdpasim->dev_attr.nas; i++)
+               vhost_iotlb_reset(&vdpasim->iommu[i]);
 
-       spin_lock(&vdpasim->iommu_lock);
-       vhost_iotlb_reset(vdpasim->iommu);
        spin_unlock(&vdpasim->iommu_lock);
 
        vdpasim->features = 0;
@@ -145,7 +151,7 @@ static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
        dma_addr = iova_dma_addr(&vdpasim->iova, iova);
 
        spin_lock(&vdpasim->iommu_lock);
-       ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
+       ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
                                    (u64)dma_addr + size - 1, (u64)paddr, perm);
        spin_unlock(&vdpasim->iommu_lock);
 
@@ -161,7 +167,7 @@ static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
                                size_t size)
 {
        spin_lock(&vdpasim->iommu_lock);
-       vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
+       vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
                              (u64)dma_addr + size - 1);
        spin_unlock(&vdpasim->iommu_lock);
 
@@ -251,6 +257,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
                ops = &vdpasim_config_ops;
 
        vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
+                                   dev_attr->ngroups, dev_attr->nas,
                                    dev_attr->name, false);
        if (IS_ERR(vdpasim)) {
                ret = PTR_ERR(vdpasim);
@@ -278,16 +285,20 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
        if (!vdpasim->vqs)
                goto err_iommu;
 
-       vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
+       vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
+                                      sizeof(*vdpasim->iommu), GFP_KERNEL);
        if (!vdpasim->iommu)
                goto err_iommu;
 
+       for (i = 0; i < vdpasim->dev_attr.nas; i++)
+               vhost_iotlb_init(&vdpasim->iommu[i], 0, 0);
+
        vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
        if (!vdpasim->buffer)
                goto err_iommu;
 
        for (i = 0; i < dev_attr->nvqs; i++)
-               vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
+               vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
                                 &vdpasim->iommu_lock);
 
        ret = iova_cache_get();
@@ -353,11 +364,14 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+       bool old_ready;
 
        spin_lock(&vdpasim->lock);
+       old_ready = vq->ready;
        vq->ready = ready;
-       if (vq->ready)
+       if (vq->ready && !old_ready) {
                vdpasim_queue_ready(vdpasim, idx);
+       }
        spin_unlock(&vdpasim->lock);
 }
 
@@ -399,6 +413,15 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
        return VDPASIM_QUEUE_ALIGN;
 }
 
+static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+       /* RX and TX belongs to group 0, CVQ belongs to group 1 */
+       if (idx == 2)
+               return 1;
+       else
+               return 0;
+}
+
 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -534,20 +557,53 @@ static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
        return range;
 }
 
-static int vdpasim_set_map(struct vdpa_device *vdpa,
+static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
+                                 unsigned int asid)
+{
+       struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+       struct vhost_iotlb *iommu;
+       int i;
+
+       if (group > vdpasim->dev_attr.ngroups)
+               return -EINVAL;
+
+       if (asid >= vdpasim->dev_attr.nas)
+               return -EINVAL;
+
+       iommu = &vdpasim->iommu[asid];
+
+       spin_lock(&vdpasim->lock);
+
+       for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+               if (vdpasim_get_vq_group(vdpa, i) == group)
+                       vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
+                                        &vdpasim->iommu_lock);
+
+       spin_unlock(&vdpasim->lock);
+
+       return 0;
+}
+
+static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
                           struct vhost_iotlb *iotlb)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
        struct vhost_iotlb_map *map;
+       struct vhost_iotlb *iommu;
        u64 start = 0ULL, last = 0ULL - 1;
        int ret;
 
+       if (asid >= vdpasim->dev_attr.nas)
+               return -EINVAL;
+
        spin_lock(&vdpasim->iommu_lock);
-       vhost_iotlb_reset(vdpasim->iommu);
+
+       iommu = &vdpasim->iommu[asid];
+       vhost_iotlb_reset(iommu);
 
        for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
             map = vhost_iotlb_itree_next(map, start, last)) {
-               ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
+               ret = vhost_iotlb_add_range(iommu, map->start,
                                            map->last, map->addr, map->perm);
                if (ret)
                        goto err;
@@ -556,31 +612,39 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
        return 0;
 
 err:
-       vhost_iotlb_reset(vdpasim->iommu);
+       vhost_iotlb_reset(iommu);
        spin_unlock(&vdpasim->iommu_lock);
        return ret;
 }
 
-static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
+static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
+                          u64 iova, u64 size,
                           u64 pa, u32 perm, void *opaque)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
        int ret;
 
+       if (asid >= vdpasim->dev_attr.nas)
+               return -EINVAL;
+
        spin_lock(&vdpasim->iommu_lock);
-       ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
-                                       pa, perm, opaque);
+       ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
+                                       iova + size - 1, pa, perm, opaque);
        spin_unlock(&vdpasim->iommu_lock);
 
        return ret;
 }
 
-static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
+static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
+                            u64 iova, u64 size)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 
+       if (asid >= vdpasim->dev_attr.nas)
+               return -EINVAL;
+
        spin_lock(&vdpasim->iommu_lock);
-       vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+       vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
        spin_unlock(&vdpasim->iommu_lock);
 
        return 0;
@@ -604,8 +668,7 @@ static void vdpasim_free(struct vdpa_device *vdpa)
        }
 
        kvfree(vdpasim->buffer);
-       if (vdpasim->iommu)
-               vhost_iotlb_free(vdpasim->iommu);
+       vhost_iotlb_free(vdpasim->iommu);
        kfree(vdpasim->vqs);
        kfree(vdpasim->config);
 }
@@ -620,6 +683,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
        .set_vq_state           = vdpasim_set_vq_state,
        .get_vq_state           = vdpasim_get_vq_state,
        .get_vq_align           = vdpasim_get_vq_align,
+       .get_vq_group           = vdpasim_get_vq_group,
        .get_device_features    = vdpasim_get_device_features,
        .set_driver_features    = vdpasim_set_driver_features,
        .get_driver_features    = vdpasim_get_driver_features,
@@ -635,6 +699,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
        .set_config             = vdpasim_set_config,
        .get_generation         = vdpasim_get_generation,
        .get_iova_range         = vdpasim_get_iova_range,
+       .set_group_asid         = vdpasim_set_group_asid,
        .dma_map                = vdpasim_dma_map,
        .dma_unmap              = vdpasim_dma_unmap,
        .free                   = vdpasim_free,
@@ -650,6 +715,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
        .set_vq_state           = vdpasim_set_vq_state,
        .get_vq_state           = vdpasim_get_vq_state,
        .get_vq_align           = vdpasim_get_vq_align,
+       .get_vq_group           = vdpasim_get_vq_group,
        .get_device_features    = vdpasim_get_device_features,
        .set_driver_features    = vdpasim_set_driver_features,
        .get_driver_features    = vdpasim_get_driver_features,
@@ -665,6 +731,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
        .set_config             = vdpasim_set_config,
        .get_generation         = vdpasim_get_generation,
        .get_iova_range         = vdpasim_get_iova_range,
+       .set_group_asid         = vdpasim_set_group_asid,
        .set_map                = vdpasim_set_map,
        .free                   = vdpasim_free,
 };
index cd58e88..622782e 100644 (file)
@@ -41,6 +41,8 @@ struct vdpasim_dev_attr {
        size_t buffer_size;
        int nvqs;
        u32 id;
+       u32 ngroups;
+       u32 nas;
 
        work_func_t work_fn;
        void (*get_config)(struct vdpasim *vdpasim, void *config);
@@ -63,6 +65,7 @@ struct vdpasim {
        u32 status;
        u32 generation;
        u64 features;
+       u32 groups;
        /* spinlock to synchronize iommu table */
        spinlock_t iommu_lock;
 };
index d5324f6..5125976 100644 (file)
 #define DRV_LICENSE  "GPL v2"
 
 #define VDPASIM_NET_FEATURES   (VDPASIM_FEATURES | \
-                                (1ULL << VIRTIO_NET_F_MAC))
+                                (1ULL << VIRTIO_NET_F_MAC) | \
+                                (1ULL << VIRTIO_NET_F_MTU) | \
+                                (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
+                                (1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR))
 
-#define VDPASIM_NET_VQ_NUM     2
+/* 3 virtqueues, 2 address spaces, 2 virtqueue groups */
+#define VDPASIM_NET_VQ_NUM     3
+#define VDPASIM_NET_AS_NUM     2
+#define VDPASIM_NET_GROUP_NUM  2
+
+static void vdpasim_net_complete(struct vdpasim_virtqueue *vq, size_t len)
+{
+       /* Make sure data is wrote before advancing index */
+       smp_wmb();
+
+       vringh_complete_iotlb(&vq->vring, vq->head, len);
+
+       /* Make sure used is visible before rasing the interrupt. */
+       smp_wmb();
+
+       local_bh_disable();
+       if (vringh_need_notify_iotlb(&vq->vring) > 0)
+               vringh_notify(&vq->vring);
+       local_bh_enable();
+}
+
+static bool receive_filter(struct vdpasim *vdpasim, size_t len)
+{
+       bool modern = vdpasim->features & (1ULL << VIRTIO_F_VERSION_1);
+       size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) :
+                                 sizeof(struct virtio_net_hdr);
+       struct virtio_net_config *vio_config = vdpasim->config;
+
+       if (len < ETH_ALEN + hdr_len)
+               return false;
+
+       if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
+               return true;
+
+       return false;
+}
+
+static virtio_net_ctrl_ack vdpasim_handle_ctrl_mac(struct vdpasim *vdpasim,
+                                                  u8 cmd)
+{
+       struct virtio_net_config *vio_config = vdpasim->config;
+       struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
+       virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+       size_t read;
+
+       switch (cmd) {
+       case VIRTIO_NET_CTRL_MAC_ADDR_SET:
+               read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov,
+                                            vio_config->mac, ETH_ALEN);
+               if (read == ETH_ALEN)
+                       status = VIRTIO_NET_OK;
+               break;
+       default:
+               break;
+       }
+
+       return status;
+}
+
+static void vdpasim_handle_cvq(struct vdpasim *vdpasim)
+{
+       struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2];
+       virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+       struct virtio_net_ctrl_hdr ctrl;
+       size_t read, write;
+       int err;
+
+       if (!(vdpasim->features & (1ULL << VIRTIO_NET_F_CTRL_VQ)))
+               return;
+
+       if (!cvq->ready)
+               return;
+
+       while (true) {
+               err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov,
+                                          &cvq->out_iov,
+                                          &cvq->head, GFP_ATOMIC);
+               if (err <= 0)
+                       break;
+
+               read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, &ctrl,
+                                            sizeof(ctrl));
+               if (read != sizeof(ctrl))
+                       break;
+
+               switch (ctrl.class) {
+               case VIRTIO_NET_CTRL_MAC:
+                       status = vdpasim_handle_ctrl_mac(vdpasim, ctrl.cmd);
+                       break;
+               default:
+                       break;
+               }
+
+               /* Make sure data is wrote before advancing index */
+               smp_wmb();
+
+               write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov,
+                                             &status, sizeof(status));
+               vringh_complete_iotlb(&cvq->vring, cvq->head, write);
+               vringh_kiov_cleanup(&cvq->in_iov);
+               vringh_kiov_cleanup(&cvq->out_iov);
+
+               /* Make sure used is visible before rasing the interrupt. */
+               smp_wmb();
+
+               local_bh_disable();
+               if (cvq->cb)
+                       cvq->cb(cvq->private);
+               local_bh_enable();
+       }
+}
 
 static void vdpasim_net_work(struct work_struct *work)
 {
@@ -36,7 +149,6 @@ static void vdpasim_net_work(struct work_struct *work)
        struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
        struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
        ssize_t read, write;
-       size_t total_write;
        int pkts = 0;
        int err;
 
@@ -45,53 +157,40 @@ static void vdpasim_net_work(struct work_struct *work)
        if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
                goto out;
 
+       vdpasim_handle_cvq(vdpasim);
+
        if (!txq->ready || !rxq->ready)
                goto out;
 
        while (true) {
-               total_write = 0;
                err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
                                           &txq->head, GFP_ATOMIC);
                if (err <= 0)
                        break;
 
+               read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
+                                            vdpasim->buffer,
+                                            PAGE_SIZE);
+
+               if (!receive_filter(vdpasim, read)) {
+                       vdpasim_net_complete(txq, 0);
+                       continue;
+               }
+
                err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
                                           &rxq->head, GFP_ATOMIC);
                if (err <= 0) {
-                       vringh_complete_iotlb(&txq->vring, txq->head, 0);
+                       vdpasim_net_complete(txq, 0);
                        break;
                }
 
-               while (true) {
-                       read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
-                                                    vdpasim->buffer,
-                                                    PAGE_SIZE);
-                       if (read <= 0)
-                               break;
-
-                       write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
-                                                     vdpasim->buffer, read);
-                       if (write <= 0)
-                               break;
-
-                       total_write += write;
-               }
-
-               /* Make sure data is wrote before advancing index */
-               smp_wmb();
-
-               vringh_complete_iotlb(&txq->vring, txq->head, 0);
-               vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
-
-               /* Make sure used is visible before rasing the interrupt. */
-               smp_wmb();
+               write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
+                                             vdpasim->buffer, read);
+               if (write <= 0)
+                       break;
 
-               local_bh_disable();
-               if (vringh_need_notify_iotlb(&txq->vring) > 0)
-                       vringh_notify(&txq->vring);
-               if (vringh_need_notify_iotlb(&rxq->vring) > 0)
-                       vringh_notify(&rxq->vring);
-               local_bh_enable();
+               vdpasim_net_complete(txq, 0);
+               vdpasim_net_complete(rxq, write);
 
                if (++pkts > 4) {
                        schedule_work(&vdpasim->work);
@@ -145,6 +244,8 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
        dev_attr.id = VIRTIO_ID_NET;
        dev_attr.supported_features = VDPASIM_NET_FEATURES;
        dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
+       dev_attr.ngroups = VDPASIM_NET_GROUP_NUM;
+       dev_attr.nas = VDPASIM_NET_AS_NUM;
        dev_attr.config_size = sizeof(struct virtio_net_config);
        dev_attr.get_config = vdpasim_net_get_config;
        dev_attr.work_fn = vdpasim_net_work;
index f85d1a0..d503848 100644 (file)
@@ -693,6 +693,7 @@ static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
 }
 
 static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
+                               unsigned int asid,
                                struct vhost_iotlb *iotlb)
 {
        struct vduse_dev *dev = vdpa_to_vduse(vdpa);
@@ -1495,7 +1496,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
                return -EEXIST;
 
        vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
-                                &vduse_vdpa_config_ops, name, true);
+                                &vduse_vdpa_config_ops, 1, 1, name, true);
        if (IS_ERR(vdev))
                return PTR_ERR(vdev);
 
index cce101e..0452207 100644 (file)
@@ -32,7 +32,7 @@ struct vp_vring {
 
 struct vp_vdpa {
        struct vdpa_device vdpa;
-       struct virtio_pci_modern_device mdev;
+       struct virtio_pci_modern_device *mdev;
        struct vp_vring *vring;
        struct vdpa_callback config_cb;
        char msix_name[VP_VDPA_NAME_SIZE];
@@ -41,6 +41,12 @@ struct vp_vdpa {
        int vectors;
 };
 
+struct vp_vdpa_mgmtdev {
+       struct vdpa_mgmt_dev mgtdev;
+       struct virtio_pci_modern_device *mdev;
+       struct vp_vdpa *vp_vdpa;
+};
+
 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
 {
        return container_of(vdpa, struct vp_vdpa, vdpa);
@@ -50,7 +56,12 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
 {
        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 
-       return &vp_vdpa->mdev;
+       return vp_vdpa->mdev;
+}
+
+static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
+{
+       return vp_vdpa->mdev;
 }
 
 static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
@@ -96,7 +107,7 @@ static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
 
 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
 {
-       struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+       struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
        struct pci_dev *pdev = mdev->pci_dev;
        int i;
 
@@ -143,7 +154,7 @@ static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
 
 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
 {
-       struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+       struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
        struct pci_dev *pdev = mdev->pci_dev;
        int i, ret, irq;
        int queues = vp_vdpa->queues;
@@ -198,7 +209,7 @@ err:
 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
 {
        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
-       struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+       struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
        u8 s = vp_vdpa_get_status(vdpa);
 
        if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
@@ -212,7 +223,7 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
 static int vp_vdpa_reset(struct vdpa_device *vdpa)
 {
        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
-       struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+       struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
        u8 s = vp_vdpa_get_status(vdpa);
 
        vp_modern_set_status(mdev, 0);
@@ -372,7 +383,7 @@ static void vp_vdpa_get_config(struct vdpa_device *vdpa,
                               void *buf, unsigned int len)
 {
        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
-       struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+       struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
        u8 old, new;
        u8 *p;
        int i;
@@ -392,7 +403,7 @@ static void vp_vdpa_set_config(struct vdpa_device *vdpa,
                               unsigned int len)
 {
        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
-       struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+       struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
        const u8 *p = buf;
        int i;
 
@@ -412,7 +423,7 @@ static struct vdpa_notification_area
 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
 {
        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
-       struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+       struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
        struct vdpa_notification_area notify;
 
        notify.addr = vp_vdpa->vring[qid].notify_pa;
@@ -454,38 +465,31 @@ static void vp_vdpa_free_irq_vectors(void *data)
        pci_free_irq_vectors(data);
 }
 
-static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+                          const struct vdpa_dev_set_config *add_config)
 {
-       struct virtio_pci_modern_device *mdev;
+       struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
+               container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
+
+       struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev;
+       struct pci_dev *pdev = mdev->pci_dev;
        struct device *dev = &pdev->dev;
-       struct vp_vdpa *vp_vdpa;
+       struct vp_vdpa *vp_vdpa = NULL;
        int ret, i;
 
-       ret = pcim_enable_device(pdev);
-       if (ret)
-               return ret;
-
        vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
-                                   dev, &vp_vdpa_ops, NULL, false);
+                                   dev, &vp_vdpa_ops, 1, 1, name, false);
+
        if (IS_ERR(vp_vdpa)) {
                dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
                return PTR_ERR(vp_vdpa);
        }
 
-       mdev = &vp_vdpa->mdev;
-       mdev->pci_dev = pdev;
-
-       ret = vp_modern_probe(mdev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
-               goto err;
-       }
-
-       pci_set_master(pdev);
-       pci_set_drvdata(pdev, vp_vdpa);
+       vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
 
        vp_vdpa->vdpa.dma_dev = &pdev->dev;
        vp_vdpa->queues = vp_modern_get_num_queues(mdev);
+       vp_vdpa->mdev = mdev;
 
        ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
        if (ret) {
@@ -516,7 +520,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
 
-       ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
+       vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev;
+       ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
        if (ret) {
                dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
                goto err;
@@ -529,12 +534,104 @@ err:
        return ret;
 }
 
+static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev,
+                           struct vdpa_device *dev)
+{
+       struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
+               container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
+
+       struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa;
+
+       _vdpa_unregister_device(&vp_vdpa->vdpa);
+       vp_vdpa_mgtdev->vp_vdpa = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = {
+       .dev_add = vp_vdpa_dev_add,
+       .dev_del = vp_vdpa_dev_del,
+};
+
+static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL;
+       struct vdpa_mgmt_dev *mgtdev;
+       struct device *dev = &pdev->dev;
+       struct virtio_pci_modern_device *mdev = NULL;
+       struct virtio_device_id *mdev_id = NULL;
+       int err;
+
+       vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL);
+       if (!vp_vdpa_mgtdev)
+               return -ENOMEM;
+
+       mgtdev = &vp_vdpa_mgtdev->mgtdev;
+       mgtdev->ops = &vp_vdpa_mdev_ops;
+       mgtdev->device = dev;
+
+       mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL);
+       if (!mdev) {
+               err = -ENOMEM;
+               goto mdev_err;
+       }
+
+       mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
+       if (!mdev_id) {
+               err = -ENOMEM;
+               goto mdev_id_err;
+       }
+
+       vp_vdpa_mgtdev->mdev = mdev;
+       mdev->pci_dev = pdev;
+
+       err = pcim_enable_device(pdev);
+       if (err) {
+               goto probe_err;
+       }
+
+       err = vp_modern_probe(mdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
+               goto probe_err;
+       }
+
+       mdev_id->device = mdev->id.device;
+       mdev_id->vendor = mdev->id.vendor;
+       mgtdev->id_table = mdev_id;
+       mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
+       mgtdev->supported_features = vp_modern_get_features(mdev);
+       pci_set_master(pdev);
+       pci_set_drvdata(pdev, vp_vdpa_mgtdev);
+
+       err = vdpa_mgmtdev_register(mgtdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n");
+               goto register_err;
+       }
+
+       return 0;
+
+register_err:
+       vp_modern_remove(vp_vdpa_mgtdev->mdev);
+probe_err:
+       kfree(mdev_id);
+mdev_id_err:
+       kfree(mdev);
+mdev_err:
+       kfree(vp_vdpa_mgtdev);
+       return err;
+}
+
 static void vp_vdpa_remove(struct pci_dev *pdev)
 {
-       struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
+       struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev);
+       struct virtio_pci_modern_device *mdev = NULL;
 
-       vp_modern_remove(&vp_vdpa->mdev);
-       vdpa_unregister_device(&vp_vdpa->vdpa);
+       mdev = vp_vdpa_mgtdev->mdev;
+       vp_modern_remove(mdev);
+       vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
+       kfree(&vp_vdpa_mgtdev->mgtdev.id_table);
+       kfree(mdev);
+       kfree(vp_vdpa_mgtdev);
 }
 
 static struct pci_driver vp_vdpa_driver = {
index 6e2e62c..3feff72 100644 (file)
@@ -588,6 +588,7 @@ static struct fsl_mc_driver vfio_fsl_mc_driver = {
                .name   = "vfio-fsl-mc",
                .owner  = THIS_MODULE,
        },
+       .driver_managed_dma = true,
 };
 
 static int __init vfio_fsl_mc_driver_init(void)
index 767b5d4..4def43f 100644 (file)
@@ -337,6 +337,14 @@ static int vf_qm_cache_wb(struct hisi_qm *qm)
        return 0;
 }
 
+static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev)
+{
+       struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+       return container_of(core_device, struct hisi_acc_vf_core_device,
+                           core_device);
+}
+
 static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev,
                            struct hisi_qm *qm)
 {
@@ -962,7 +970,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
 
 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
 {
-       struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+       struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
 
        if (hisi_acc_vdev->core_device.vdev.migration_flags !=
                                VFIO_MIGRATION_STOP_COPY)
@@ -1274,11 +1282,10 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device
                                          &hisi_acc_vfio_pci_ops);
        }
 
+       dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
        ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
        if (ret)
                goto out_free;
-
-       dev_set_drvdata(&pdev->dev, hisi_acc_vdev);
        return 0;
 
 out_free:
@@ -1289,7 +1296,7 @@ out_free:
 
 static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
 {
-       struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+       struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
 
        vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
        vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
@@ -1316,6 +1323,7 @@ static struct pci_driver hisi_acc_vfio_pci_driver = {
        .probe = hisi_acc_vfio_pci_probe,
        .remove = hisi_acc_vfio_pci_remove,
        .err_handler = &hisi_acc_vf_err_handlers,
+       .driver_managed_dma = true,
 };
 
 module_pci_driver(hisi_acc_vfio_pci_driver);
index 5c9f921..9b9f33c 100644 (file)
 
 #include "cmd.h"
 
-int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
+static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
+                                 u16 *vhca_id);
+
+int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
 {
-       struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
        u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
        u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
-       int ret;
 
-       if (!mdev)
+       lockdep_assert_held(&mvdev->state_mutex);
+       if (mvdev->mdev_detach)
                return -ENOTCONN;
 
        MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
-       MLX5_SET(suspend_vhca_in, in, vhca_id, vhca_id);
+       MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id);
        MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
 
-       ret = mlx5_cmd_exec_inout(mdev, suspend_vhca, in, out);
-       mlx5_vf_put_core_dev(mdev);
-       return ret;
+       return mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out);
 }
 
-int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod)
+int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
 {
-       struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
        u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
        u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
-       int ret;
 
-       if (!mdev)
+       lockdep_assert_held(&mvdev->state_mutex);
+       if (mvdev->mdev_detach)
                return -ENOTCONN;
 
        MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
-       MLX5_SET(resume_vhca_in, in, vhca_id, vhca_id);
+       MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id);
        MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
 
-       ret = mlx5_cmd_exec_inout(mdev, resume_vhca, in, out);
-       mlx5_vf_put_core_dev(mdev);
-       return ret;
+       return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out);
 }
 
-int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
                                          size_t *state_size)
 {
-       struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
        u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
        u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
        int ret;
 
-       if (!mdev)
+       lockdep_assert_held(&mvdev->state_mutex);
+       if (mvdev->mdev_detach)
                return -ENOTCONN;
 
        MLX5_SET(query_vhca_migration_state_in, in, opcode,
                 MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
-       MLX5_SET(query_vhca_migration_state_in, in, vhca_id, vhca_id);
+       MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id);
        MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
 
-       ret = mlx5_cmd_exec_inout(mdev, query_vhca_migration_state, in, out);
+       ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in,
+                                 out);
        if (ret)
-               goto end;
+               return ret;
 
        *state_size = MLX5_GET(query_vhca_migration_state_out, out,
                               required_umem_size);
+       return 0;
+}
+
+static int mlx5fv_vf_event(struct notifier_block *nb,
+                          unsigned long event, void *data)
+{
+       struct mlx5vf_pci_core_device *mvdev =
+               container_of(nb, struct mlx5vf_pci_core_device, nb);
+
+       mutex_lock(&mvdev->state_mutex);
+       switch (event) {
+       case MLX5_PF_NOTIFY_ENABLE_VF:
+               mvdev->mdev_detach = false;
+               break;
+       case MLX5_PF_NOTIFY_DISABLE_VF:
+               mlx5vf_disable_fds(mvdev);
+               mvdev->mdev_detach = true;
+               break;
+       default:
+               break;
+       }
+       mlx5vf_state_mutex_unlock(mvdev);
+       return 0;
+}
+
+void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+       if (!mvdev->migrate_cap)
+               return;
+
+       mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id,
+                                               &mvdev->nb);
+       destroy_workqueue(mvdev->cb_wq);
+}
+
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+       struct pci_dev *pdev = mvdev->core_device.pdev;
+       int ret;
+
+       if (!pdev->is_virtfn)
+               return;
+
+       mvdev->mdev = mlx5_vf_get_core_dev(pdev);
+       if (!mvdev->mdev)
+               return;
+
+       if (!MLX5_CAP_GEN(mvdev->mdev, migration))
+               goto end;
+
+       mvdev->vf_id = pci_iov_vf_id(pdev);
+       if (mvdev->vf_id < 0)
+               goto end;
+
+       if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1,
+                                  &mvdev->vhca_id))
+               goto end;
+
+       mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
+       if (!mvdev->cb_wq)
+               goto end;
+
+       mutex_init(&mvdev->state_mutex);
+       spin_lock_init(&mvdev->reset_lock);
+       mvdev->nb.notifier_call = mlx5fv_vf_event;
+       ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id,
+                                                   &mvdev->nb);
+       if (ret) {
+               destroy_workqueue(mvdev->cb_wq);
+               goto end;
+       }
+
+       mvdev->migrate_cap = 1;
+       mvdev->core_device.vdev.migration_flags =
+               VFIO_MIGRATION_STOP_COPY |
+               VFIO_MIGRATION_P2P;
 
 end:
-       mlx5_vf_put_core_dev(mdev);
-       return ret;
+       mlx5_vf_put_core_dev(mvdev->mdev);
 }
 
-int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id)
+static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
+                                 u16 *vhca_id)
 {
-       struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
        u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
        int out_size;
        void *out;
        int ret;
 
-       if (!mdev)
-               return -ENOTCONN;
-
        out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
        out = kzalloc(out_size, GFP_KERNEL);
-       if (!out) {
-               ret = -ENOMEM;
-               goto end;
-       }
+       if (!out)
+               return -ENOMEM;
 
        MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
        MLX5_SET(query_hca_cap_in, in, other_function, 1);
@@ -105,8 +173,6 @@ int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id)
 
 err_exec:
        kfree(out);
-end:
-       mlx5_vf_put_core_dev(mdev);
        return ret;
 }
 
@@ -151,21 +217,68 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
        return err;
 }
 
-int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
+{
+       struct mlx5vf_async_data *async_data = container_of(_work,
+               struct mlx5vf_async_data, work);
+       struct mlx5_vf_migration_file *migf = container_of(async_data,
+               struct mlx5_vf_migration_file, async_data);
+       struct mlx5_core_dev *mdev = migf->mvdev->mdev;
+
+       mutex_lock(&migf->lock);
+       if (async_data->status) {
+               migf->is_err = true;
+               wake_up_interruptible(&migf->poll_wait);
+       }
+       mutex_unlock(&migf->lock);
+
+       mlx5_core_destroy_mkey(mdev, async_data->mkey);
+       dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
+       mlx5_core_dealloc_pd(mdev, async_data->pdn);
+       kvfree(async_data->out);
+       fput(migf->filp);
+}
+
+static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
+{
+       struct mlx5vf_async_data *async_data = container_of(context,
+                       struct mlx5vf_async_data, cb_work);
+       struct mlx5_vf_migration_file *migf = container_of(async_data,
+                       struct mlx5_vf_migration_file, async_data);
+
+       if (!status) {
+               WRITE_ONCE(migf->total_length,
+                          MLX5_GET(save_vhca_state_out, async_data->out,
+                                   actual_image_size));
+               wake_up_interruptible(&migf->poll_wait);
+       }
+
+       /*
+        * The error and the cleanup flows can't run from an
+        * interrupt context
+        */
+       async_data->status = status;
+       queue_work(migf->mvdev->cb_wq, &async_data->work);
+}
+
+int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
                               struct mlx5_vf_migration_file *migf)
 {
-       struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
-       u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
+       u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out);
        u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
+       struct mlx5vf_async_data *async_data;
+       struct mlx5_core_dev *mdev;
        u32 pdn, mkey;
        int err;
 
-       if (!mdev)
+       lockdep_assert_held(&mvdev->state_mutex);
+       if (mvdev->mdev_detach)
                return -ENOTCONN;
 
+       mdev = mvdev->mdev;
        err = mlx5_core_alloc_pd(mdev, &pdn);
        if (err)
-               goto end;
+               return err;
 
        err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE,
                              0);
@@ -179,45 +292,54 @@ int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
        MLX5_SET(save_vhca_state_in, in, opcode,
                 MLX5_CMD_OP_SAVE_VHCA_STATE);
        MLX5_SET(save_vhca_state_in, in, op_mod, 0);
-       MLX5_SET(save_vhca_state_in, in, vhca_id, vhca_id);
+       MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
        MLX5_SET(save_vhca_state_in, in, mkey, mkey);
        MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
 
-       err = mlx5_cmd_exec_inout(mdev, save_vhca_state, in, out);
+       async_data = &migf->async_data;
+       async_data->out = kvzalloc(out_size, GFP_KERNEL);
+       if (!async_data->out) {
+               err = -ENOMEM;
+               goto err_out;
+       }
+
+       /* no data exists till the callback comes back */
+       migf->total_length = 0;
+       get_file(migf->filp);
+       async_data->mkey = mkey;
+       async_data->pdn = pdn;
+       err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in),
+                              async_data->out,
+                              out_size, mlx5vf_save_callback,
+                              &async_data->cb_work);
        if (err)
                goto err_exec;
 
-       migf->total_length =
-               MLX5_GET(save_vhca_state_out, out, actual_image_size);
-
-       mlx5_core_destroy_mkey(mdev, mkey);
-       mlx5_core_dealloc_pd(mdev, pdn);
-       dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
-       mlx5_vf_put_core_dev(mdev);
-
        return 0;
 
 err_exec:
+       fput(migf->filp);
+       kvfree(async_data->out);
+err_out:
        mlx5_core_destroy_mkey(mdev, mkey);
 err_create_mkey:
        dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
 err_dma_map:
        mlx5_core_dealloc_pd(mdev, pdn);
-end:
-       mlx5_vf_put_core_dev(mdev);
        return err;
 }
 
-int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
                               struct mlx5_vf_migration_file *migf)
 {
-       struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev);
+       struct mlx5_core_dev *mdev;
        u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
        u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
        u32 pdn, mkey;
        int err;
 
-       if (!mdev)
+       lockdep_assert_held(&mvdev->state_mutex);
+       if (mvdev->mdev_detach)
                return -ENOTCONN;
 
        mutex_lock(&migf->lock);
@@ -226,6 +348,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
                goto end;
        }
 
+       mdev = mvdev->mdev;
        err = mlx5_core_alloc_pd(mdev, &pdn);
        if (err)
                goto end;
@@ -241,7 +364,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
        MLX5_SET(load_vhca_state_in, in, opcode,
                 MLX5_CMD_OP_LOAD_VHCA_STATE);
        MLX5_SET(load_vhca_state_in, in, op_mod, 0);
-       MLX5_SET(load_vhca_state_in, in, vhca_id, vhca_id);
+       MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id);
        MLX5_SET(load_vhca_state_in, in, mkey, mkey);
        MLX5_SET(load_vhca_state_in, in, size, migf->total_length);
 
@@ -253,7 +376,6 @@ err_mkey:
 err_reg:
        mlx5_core_dealloc_pd(mdev, pdn);
 end:
-       mlx5_vf_put_core_dev(mdev);
        mutex_unlock(&migf->lock);
        return err;
 }
index 1392a11..6c3112f 100644 (file)
@@ -7,12 +7,23 @@
 #define MLX5_VFIO_CMD_H
 
 #include <linux/kernel.h>
+#include <linux/vfio_pci_core.h>
 #include <linux/mlx5/driver.h>
 
+struct mlx5vf_async_data {
+       struct mlx5_async_work cb_work;
+       struct work_struct work;
+       int status;
+       u32 pdn;
+       u32 mkey;
+       void *out;
+};
+
 struct mlx5_vf_migration_file {
        struct file *filp;
        struct mutex lock;
-       bool disabled;
+       u8 disabled:1;
+       u8 is_err:1;
 
        struct sg_append_table table;
        size_t total_length;
@@ -22,15 +33,42 @@ struct mlx5_vf_migration_file {
        struct scatterlist *last_offset_sg;
        unsigned int sg_last_entry;
        unsigned long last_offset;
+       struct mlx5vf_pci_core_device *mvdev;
+       wait_queue_head_t poll_wait;
+       struct mlx5_async_ctx async_ctx;
+       struct mlx5vf_async_data async_data;
+};
+
+struct mlx5vf_pci_core_device {
+       struct vfio_pci_core_device core_device;
+       int vf_id;
+       u16 vhca_id;
+       u8 migrate_cap:1;
+       u8 deferred_reset:1;
+       u8 mdev_detach:1;
+       /* protect migration state */
+       struct mutex state_mutex;
+       enum vfio_device_mig_state mig_state;
+       /* protect the reset_done flow */
+       spinlock_t reset_lock;
+       struct mlx5_vf_migration_file *resuming_migf;
+       struct mlx5_vf_migration_file *saving_migf;
+       struct workqueue_struct *cb_wq;
+       struct notifier_block nb;
+       struct mlx5_core_dev *mdev;
 };
 
-int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod);
-int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod);
-int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
+int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
+int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
                                          size_t *state_size);
-int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id);
-int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
+int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
                               struct mlx5_vf_migration_file *migf);
-int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id,
+int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
                               struct mlx5_vf_migration_file *migf);
+void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
 #endif /* MLX5_VFIO_CMD_H */
index bbec5d2..0558d06 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
 #include <linux/sched/mm.h>
-#include <linux/vfio_pci_core.h>
 #include <linux/anon_inodes.h>
 
 #include "cmd.h"
 /* Arbitrary to prevent userspace from consuming endless memory */
 #define MAX_MIGRATION_SIZE (512*1024*1024)
 
-struct mlx5vf_pci_core_device {
-       struct vfio_pci_core_device core_device;
-       u16 vhca_id;
-       u8 migrate_cap:1;
-       u8 deferred_reset:1;
-       /* protect migration state */
-       struct mutex state_mutex;
-       enum vfio_device_mig_state mig_state;
-       /* protect the reset_done flow */
-       spinlock_t reset_lock;
-       struct mlx5_vf_migration_file *resuming_migf;
-       struct mlx5_vf_migration_file *saving_migf;
-};
+static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev)
+{
+       struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
+
+       return container_of(core_device, struct mlx5vf_pci_core_device,
+                           core_device);
+}
 
 static struct page *
 mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf,
@@ -149,12 +142,22 @@ static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
                return -ESPIPE;
        pos = &filp->f_pos;
 
+       if (!(filp->f_flags & O_NONBLOCK)) {
+               if (wait_event_interruptible(migf->poll_wait,
+                            READ_ONCE(migf->total_length) || migf->is_err))
+                       return -ERESTARTSYS;
+       }
+
        mutex_lock(&migf->lock);
+       if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) {
+               done = -EAGAIN;
+               goto out_unlock;
+       }
        if (*pos > migf->total_length) {
                done = -EINVAL;
                goto out_unlock;
        }
-       if (migf->disabled) {
+       if (migf->disabled || migf->is_err) {
                done = -ENODEV;
                goto out_unlock;
        }
@@ -194,9 +197,28 @@ out_unlock:
        return done;
 }
 
+static __poll_t mlx5vf_save_poll(struct file *filp,
+                                struct poll_table_struct *wait)
+{
+       struct mlx5_vf_migration_file *migf = filp->private_data;
+       __poll_t pollflags = 0;
+
+       poll_wait(filp, &migf->poll_wait, wait);
+
+       mutex_lock(&migf->lock);
+       if (migf->disabled || migf->is_err)
+               pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+       else if (READ_ONCE(migf->total_length))
+               pollflags = EPOLLIN | EPOLLRDNORM;
+       mutex_unlock(&migf->lock);
+
+       return pollflags;
+}
+
 static const struct file_operations mlx5vf_save_fops = {
        .owner = THIS_MODULE,
        .read = mlx5vf_save_read,
+       .poll = mlx5vf_save_poll,
        .release = mlx5vf_release_file,
        .llseek = no_llseek,
 };
@@ -222,9 +244,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
 
        stream_open(migf->filp->f_inode, migf->filp);
        mutex_init(&migf->lock);
-
-       ret = mlx5vf_cmd_query_vhca_migration_state(
-               mvdev->core_device.pdev, mvdev->vhca_id, &migf->total_length);
+       init_waitqueue_head(&migf->poll_wait);
+       mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
+       INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
+       ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
+                                                   &migf->total_length);
        if (ret)
                goto out_free;
 
@@ -233,8 +257,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
        if (ret)
                goto out_free;
 
-       ret = mlx5vf_cmd_save_vhca_state(mvdev->core_device.pdev,
-                                        mvdev->vhca_id, migf);
+       migf->mvdev = mvdev;
+       ret = mlx5vf_cmd_save_vhca_state(mvdev, migf);
        if (ret)
                goto out_free;
        return migf;
@@ -339,7 +363,7 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
        return migf;
 }
 
-static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
 {
        if (mvdev->resuming_migf) {
                mlx5vf_disable_fd(mvdev->resuming_migf);
@@ -347,6 +371,8 @@ static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
                mvdev->resuming_migf = NULL;
        }
        if (mvdev->saving_migf) {
+               mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
+               cancel_work_sync(&mvdev->saving_migf->async_data.work);
                mlx5vf_disable_fd(mvdev->saving_migf);
                fput(mvdev->saving_migf->filp);
                mvdev->saving_migf = NULL;
@@ -361,8 +387,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
        int ret;
 
        if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
-               ret = mlx5vf_cmd_suspend_vhca(
-                       mvdev->core_device.pdev, mvdev->vhca_id,
+               ret = mlx5vf_cmd_suspend_vhca(mvdev,
                        MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
                if (ret)
                        return ERR_PTR(ret);
@@ -370,8 +395,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
        }
 
        if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
-               ret = mlx5vf_cmd_resume_vhca(
-                       mvdev->core_device.pdev, mvdev->vhca_id,
+               ret = mlx5vf_cmd_resume_vhca(mvdev,
                        MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER);
                if (ret)
                        return ERR_PTR(ret);
@@ -379,8 +403,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
        }
 
        if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
-               ret = mlx5vf_cmd_suspend_vhca(
-                       mvdev->core_device.pdev, mvdev->vhca_id,
+               ret = mlx5vf_cmd_suspend_vhca(mvdev,
                        MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR);
                if (ret)
                        return ERR_PTR(ret);
@@ -388,8 +411,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
        }
 
        if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
-               ret = mlx5vf_cmd_resume_vhca(
-                       mvdev->core_device.pdev, mvdev->vhca_id,
+               ret = mlx5vf_cmd_resume_vhca(mvdev,
                        MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR);
                if (ret)
                        return ERR_PTR(ret);
@@ -424,8 +446,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
        }
 
        if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
-               ret = mlx5vf_cmd_load_vhca_state(mvdev->core_device.pdev,
-                                                mvdev->vhca_id,
+               ret = mlx5vf_cmd_load_vhca_state(mvdev,
                                                 mvdev->resuming_migf);
                if (ret)
                        return ERR_PTR(ret);
@@ -444,7 +465,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
  * This function is called in all state_mutex unlock cases to
  * handle a 'deferred_reset' if exists.
  */
-static void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
 {
 again:
        spin_lock(&mvdev->reset_lock);
@@ -505,7 +526,7 @@ static int mlx5vf_pci_get_device_state(struct vfio_device *vdev,
 
 static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev)
 {
-       struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
+       struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
 
        if (!mvdev->migrate_cap)
                return;
@@ -532,34 +553,16 @@ static int mlx5vf_pci_open_device(struct vfio_device *core_vdev)
        struct mlx5vf_pci_core_device *mvdev = container_of(
                core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
        struct vfio_pci_core_device *vdev = &mvdev->core_device;
-       int vf_id;
        int ret;
 
        ret = vfio_pci_core_enable(vdev);
        if (ret)
                return ret;
 
-       if (!mvdev->migrate_cap) {
-               vfio_pci_core_finish_enable(vdev);
-               return 0;
-       }
-
-       vf_id = pci_iov_vf_id(vdev->pdev);
-       if (vf_id < 0) {
-               ret = vf_id;
-               goto out_disable;
-       }
-
-       ret = mlx5vf_cmd_get_vhca_id(vdev->pdev, vf_id + 1, &mvdev->vhca_id);
-       if (ret)
-               goto out_disable;
-
-       mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+       if (mvdev->migrate_cap)
+               mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
        vfio_pci_core_finish_enable(vdev);
        return 0;
-out_disable:
-       vfio_pci_core_disable(vdev);
-       return ret;
 }
 
 static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
@@ -596,32 +599,15 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
        if (!mvdev)
                return -ENOMEM;
        vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
-
-       if (pdev->is_virtfn) {
-               struct mlx5_core_dev *mdev =
-                       mlx5_vf_get_core_dev(pdev);
-
-               if (mdev) {
-                       if (MLX5_CAP_GEN(mdev, migration)) {
-                               mvdev->migrate_cap = 1;
-                               mvdev->core_device.vdev.migration_flags =
-                                       VFIO_MIGRATION_STOP_COPY |
-                                       VFIO_MIGRATION_P2P;
-                               mutex_init(&mvdev->state_mutex);
-                               spin_lock_init(&mvdev->reset_lock);
-                       }
-                       mlx5_vf_put_core_dev(mdev);
-               }
-       }
-
+       mlx5vf_cmd_set_migratable(mvdev);
+       dev_set_drvdata(&pdev->dev, &mvdev->core_device);
        ret = vfio_pci_core_register_device(&mvdev->core_device);
        if (ret)
                goto out_free;
-
-       dev_set_drvdata(&pdev->dev, mvdev);
        return 0;
 
 out_free:
+       mlx5vf_cmd_remove_migratable(mvdev);
        vfio_pci_core_uninit_device(&mvdev->core_device);
        kfree(mvdev);
        return ret;
@@ -629,9 +615,10 @@ out_free:
 
 static void mlx5vf_pci_remove(struct pci_dev *pdev)
 {
-       struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
+       struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
 
        vfio_pci_core_unregister_device(&mvdev->core_device);
+       mlx5vf_cmd_remove_migratable(mvdev);
        vfio_pci_core_uninit_device(&mvdev->core_device);
        kfree(mvdev);
 }
@@ -654,6 +641,7 @@ static struct pci_driver mlx5vf_pci_driver = {
        .probe = mlx5vf_pci_probe,
        .remove = mlx5vf_pci_remove,
        .err_handler = &mlx5vf_err_handlers,
+       .driver_managed_dma = true,
 };
 
 static void __exit mlx5vf_pci_cleanup(void)
index 2b04746..4d1a974 100644 (file)
@@ -151,10 +151,10 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENOMEM;
        vfio_pci_core_init_device(vdev, pdev, &vfio_pci_ops);
 
+       dev_set_drvdata(&pdev->dev, vdev);
        ret = vfio_pci_core_register_device(vdev);
        if (ret)
                goto out_free;
-       dev_set_drvdata(&pdev->dev, vdev);
        return 0;
 
 out_free:
@@ -174,10 +174,12 @@ static void vfio_pci_remove(struct pci_dev *pdev)
 
 static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
 {
+       struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
+
        if (!enable_sriov)
                return -ENOENT;
 
-       return vfio_pci_core_sriov_configure(pdev, nr_virtfn);
+       return vfio_pci_core_sriov_configure(vdev, nr_virtfn);
 }
 
 static const struct pci_device_id vfio_pci_table[] = {
@@ -194,6 +196,7 @@ static struct pci_driver vfio_pci_driver = {
        .remove                 = vfio_pci_remove,
        .sriov_configure        = vfio_pci_sriov_configure,
        .err_handler            = &vfio_pci_core_err_handlers,
+       .driver_managed_dma     = true,
 };
 
 static void __init vfio_pci_fill_ids(void)
index 6e58b4b..9343f59 100644 (file)
@@ -402,11 +402,14 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev)
        u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
 
        /*
+        * Memory region cannot be accessed if device power state is D3.
+        *
         * SR-IOV VF memory enable is handled by the MSE bit in the
         * PF SR-IOV capability, there's therefore no need to trigger
         * faults based on the virtual value.
         */
-       return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
+       return pdev->current_state < PCI_D3hot &&
+              (pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY));
 }
 
 /*
@@ -692,6 +695,22 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
        return 0;
 }
 
+/*
+ * It takes all the required locks to protect the access of power related
+ * variables and then invokes vfio_pci_set_power_state().
+ */
+static void vfio_lock_and_set_power_state(struct vfio_pci_core_device *vdev,
+                                         pci_power_t state)
+{
+       if (state >= PCI_D3hot)
+               vfio_pci_zap_and_down_write_memory_lock(vdev);
+       else
+               down_write(&vdev->memory_lock);
+
+       vfio_pci_set_power_state(vdev, state);
+       up_write(&vdev->memory_lock);
+}
+
 static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
                                int count, struct perm_bits *perm,
                                int offset, __le32 val)
@@ -718,7 +737,7 @@ static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
                        break;
                }
 
-               vfio_pci_set_power_state(vdev, state);
+               vfio_lock_and_set_power_state(vdev, state);
        }
 
        return count;
@@ -739,11 +758,28 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
        p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
 
        /*
+        * The guests can't process PME events. If any PME event will be
+        * generated, then it will be mostly handled in the host and the
+        * host will clear the PME_STATUS. So virtualize PME_Support bits.
+        * The vconfig bits will be cleared during device capability
+        * initialization.
+        */
+       p_setw(perm, PCI_PM_PMC, PCI_PM_CAP_PME_MASK, NO_WRITE);
+
+       /*
         * Power management is defined *per function*, so we can let
         * the user change power state, but we trap and initiate the
         * change ourselves, so the state bits are read-only.
+        *
+        * The guest can't process PME from D3cold so virtualize PME_Status
+        * and PME_En bits. The vconfig bits will be cleared during device
+        * capability initialization.
         */
-       p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
+       p_setd(perm, PCI_PM_CTRL,
+              PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS,
+              ~(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS |
+                PCI_PM_CTRL_STATE_MASK));
+
        return 0;
 }
 
@@ -1412,6 +1448,17 @@ static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epo
        return 0;
 }
 
+static void vfio_update_pm_vconfig_bytes(struct vfio_pci_core_device *vdev,
+                                        int offset)
+{
+       __le16 *pmc = (__le16 *)&vdev->vconfig[offset + PCI_PM_PMC];
+       __le16 *ctrl = (__le16 *)&vdev->vconfig[offset + PCI_PM_CTRL];
+
+       /* Clear vconfig PME_Support, PME_Status, and PME_En bits */
+       *pmc &= ~cpu_to_le16(PCI_PM_CAP_PME_MASK);
+       *ctrl &= ~cpu_to_le16(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS);
+}
+
 static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev,
                                   int offset, int size)
 {
@@ -1535,6 +1582,9 @@ static int vfio_cap_init(struct vfio_pci_core_device *vdev)
                if (ret)
                        return ret;
 
+               if (cap == PCI_CAP_ID_PM)
+                       vfio_update_pm_vconfig_bytes(vdev, pos);
+
                prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
                pos = next;
                caps++;
index 06b6f35..a0d69dd 100644 (file)
@@ -156,7 +156,7 @@ no_mmap:
 }
 
 struct vfio_pci_group_info;
-static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
+static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
 static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
                                      struct vfio_pci_group_info *groups);
 
@@ -217,6 +217,10 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
        bool needs_restore = false, needs_save = false;
        int ret;
 
+       /* Prevent changing power state for PFs with VFs enabled */
+       if (pci_num_vf(pdev) && state > PCI_D0)
+               return -EBUSY;
+
        if (vdev->needs_pm_restore) {
                if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
                        pci_save_state(pdev);
@@ -255,6 +259,17 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
        return ret;
 }
 
+/*
+ * The dev_pm_ops needs to be provided to make pci-driver runtime PM working,
+ * so use structure without any callbacks.
+ *
+ * The pci-driver core runtime PM routines always save the device state
+ * before going into suspended state. If the device is going into low power
+ * state with only with runtime PM ops, then no explicit handling is needed
+ * for the devices which have NoSoftRst-.
+ */
+static const struct dev_pm_ops vfio_pci_core_pm_ops = { };
+
 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
@@ -262,21 +277,23 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
        u16 cmd;
        u8 msix_pos;
 
-       vfio_pci_set_power_state(vdev, PCI_D0);
+       if (!disable_idle_d3) {
+               ret = pm_runtime_resume_and_get(&pdev->dev);
+               if (ret < 0)
+                       return ret;
+       }
 
        /* Don't allow our initial saved state to include busmaster */
        pci_clear_master(pdev);
 
        ret = pci_enable_device(pdev);
        if (ret)
-               return ret;
+               goto out_power;
 
        /* If reset fails because of the device lock, fail this path entirely */
        ret = pci_try_reset_function(pdev);
-       if (ret == -EAGAIN) {
-               pci_disable_device(pdev);
-               return ret;
-       }
+       if (ret == -EAGAIN)
+               goto out_disable_device;
 
        vdev->reset_works = !ret;
        pci_save_state(pdev);
@@ -300,12 +317,8 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
        }
 
        ret = vfio_config_init(vdev);
-       if (ret) {
-               kfree(vdev->pci_saved_state);
-               vdev->pci_saved_state = NULL;
-               pci_disable_device(pdev);
-               return ret;
-       }
+       if (ret)
+               goto out_free_state;
 
        msix_pos = pdev->msix_cap;
        if (msix_pos) {
@@ -326,6 +339,16 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
 
 
        return 0;
+
+out_free_state:
+       kfree(vdev->pci_saved_state);
+       vdev->pci_saved_state = NULL;
+out_disable_device:
+       pci_disable_device(pdev);
+out_power:
+       if (!disable_idle_d3)
+               pm_runtime_put(&pdev->dev);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
 
@@ -433,8 +456,11 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
 out:
        pci_disable_device(pdev);
 
-       if (!vfio_pci_dev_set_try_reset(vdev->vdev.dev_set) && !disable_idle_d3)
-               vfio_pci_set_power_state(vdev, PCI_D3hot);
+       vfio_pci_dev_set_try_reset(vdev->vdev.dev_set);
+
+       /* Put the pm-runtime usage counter acquired during enable */
+       if (!disable_idle_d3)
+               pm_runtime_put(&pdev->dev);
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
 
@@ -556,7 +582,7 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
 
 struct vfio_pci_group_info {
        int count;
-       struct vfio_group **groups;
+       struct file **files;
 };
 
 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
@@ -1018,10 +1044,10 @@ reset_info_exit:
        } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
                struct vfio_pci_hot_reset hdr;
                int32_t *group_fds;
-               struct vfio_group **groups;
+               struct file **files;
                struct vfio_pci_group_info info;
                bool slot = false;
-               int group_idx, count = 0, ret = 0;
+               int file_idx, count = 0, ret = 0;
 
                minsz = offsetofend(struct vfio_pci_hot_reset, count);
 
@@ -1054,17 +1080,17 @@ reset_info_exit:
                        return -EINVAL;
 
                group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
-               groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
-               if (!group_fds || !groups) {
+               files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+               if (!group_fds || !files) {
                        kfree(group_fds);
-                       kfree(groups);
+                       kfree(files);
                        return -ENOMEM;
                }
 
                if (copy_from_user(group_fds, (void __user *)(arg + minsz),
                                   hdr.count * sizeof(*group_fds))) {
                        kfree(group_fds);
-                       kfree(groups);
+                       kfree(files);
                        return -EFAULT;
                }
 
@@ -1073,22 +1099,22 @@ reset_info_exit:
                 * user interface and store the group and iommu ID.  This
                 * ensures the group is held across the reset.
                 */
-               for (group_idx = 0; group_idx < hdr.count; group_idx++) {
-                       struct vfio_group *group;
-                       struct fd f = fdget(group_fds[group_idx]);
-                       if (!f.file) {
+               for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+                       struct file *file = fget(group_fds[file_idx]);
+
+                       if (!file) {
                                ret = -EBADF;
                                break;
                        }
 
-                       group = vfio_group_get_external_user(f.file);
-                       fdput(f);
-                       if (IS_ERR(group)) {
-                               ret = PTR_ERR(group);
+                       /* Ensure the FD is a vfio group FD.*/
+                       if (!vfio_file_iommu_group(file)) {
+                               fput(file);
+                               ret = -EINVAL;
                                break;
                        }
 
-                       groups[group_idx] = group;
+                       files[file_idx] = file;
                }
 
                kfree(group_fds);
@@ -1098,15 +1124,15 @@ reset_info_exit:
                        goto hot_reset_release;
 
                info.count = hdr.count;
-               info.groups = groups;
+               info.files = files;
 
                ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
 
 hot_reset_release:
-               for (group_idx--; group_idx >= 0; group_idx--)
-                       vfio_group_put_external_user(groups[group_idx]);
+               for (file_idx--; file_idx >= 0; file_idx--)
+                       fput(files[file_idx]);
 
-               kfree(groups);
+               kfree(files);
                return ret;
        } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
                struct vfio_device_ioeventfd ioeventfd;
@@ -1819,8 +1845,13 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
+       struct device *dev = &pdev->dev;
        int ret;
 
+       /* Drivers must set the vfio_pci_core_device to their drvdata */
+       if (WARN_ON(vdev != dev_get_drvdata(dev)))
+               return -EINVAL;
+
        if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
                return -EINVAL;
 
@@ -1860,19 +1891,21 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
 
        vfio_pci_probe_power_state(vdev);
 
-       if (!disable_idle_d3) {
-               /*
-                * pci-core sets the device power state to an unknown value at
-                * bootup and after being removed from a driver.  The only
-                * transition it allows from this unknown state is to D0, which
-                * typically happens when a driver calls pci_enable_device().
-                * We're not ready to enable the device yet, but we do want to
-                * be able to get to D3.  Therefore first do a D0 transition
-                * before going to D3.
-                */
-               vfio_pci_set_power_state(vdev, PCI_D0);
-               vfio_pci_set_power_state(vdev, PCI_D3hot);
-       }
+       /*
+        * pci-core sets the device power state to an unknown value at
+        * bootup and after being removed from a driver.  The only
+        * transition it allows from this unknown state is to D0, which
+        * typically happens when a driver calls pci_enable_device().
+        * We're not ready to enable the device yet, but we do want to
+        * be able to get to D3.  Therefore first do a D0 transition
+        * before enabling runtime PM.
+        */
+       vfio_pci_set_power_state(vdev, PCI_D0);
+
+       dev->driver->pm = &vfio_pci_core_pm_ops;
+       pm_runtime_allow(dev);
+       if (!disable_idle_d3)
+               pm_runtime_put(dev);
 
        ret = vfio_register_group_dev(&vdev->vdev);
        if (ret)
@@ -1881,7 +1914,9 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
 
 out_power:
        if (!disable_idle_d3)
-               vfio_pci_set_power_state(vdev, PCI_D0);
+               pm_runtime_get_noresume(dev);
+
+       pm_runtime_forbid(dev);
 out_vf:
        vfio_pci_vf_uninit(vdev);
        return ret;
@@ -1890,9 +1925,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
 
 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
 {
-       struct pci_dev *pdev = vdev->pdev;
-
-       vfio_pci_core_sriov_configure(pdev, 0);
+       vfio_pci_core_sriov_configure(vdev, 0);
 
        vfio_unregister_group_dev(&vdev->vdev);
 
@@ -1900,21 +1933,16 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
        vfio_pci_vga_uninit(vdev);
 
        if (!disable_idle_d3)
-               vfio_pci_set_power_state(vdev, PCI_D0);
+               pm_runtime_get_noresume(&vdev->pdev->dev);
+
+       pm_runtime_forbid(&vdev->pdev->dev);
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
 
 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
-       struct vfio_pci_core_device *vdev;
-       struct vfio_device *device;
-
-       device = vfio_device_get_from_dev(&pdev->dev);
-       if (device == NULL)
-               return PCI_ERS_RESULT_DISCONNECT;
-
-       vdev = container_of(device, struct vfio_pci_core_device, vdev);
+       struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
 
        mutex_lock(&vdev->igate);
 
@@ -1923,26 +1951,18 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
 
        mutex_unlock(&vdev->igate);
 
-       vfio_device_put(device);
-
        return PCI_ERS_RESULT_CAN_RECOVER;
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
 
-int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
+int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
+                                 int nr_virtfn)
 {
-       struct vfio_pci_core_device *vdev;
-       struct vfio_device *device;
+       struct pci_dev *pdev = vdev->pdev;
        int ret = 0;
 
        device_lock_assert(&pdev->dev);
 
-       device = vfio_device_get_from_dev(&pdev->dev);
-       if (!device)
-               return -ENODEV;
-
-       vdev = container_of(device, struct vfio_pci_core_device, vdev);
-
        if (nr_virtfn) {
                mutex_lock(&vfio_pci_sriov_pfs_mutex);
                /*
@@ -1957,22 +1977,42 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
                }
                list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
                mutex_unlock(&vfio_pci_sriov_pfs_mutex);
-               ret = pci_enable_sriov(pdev, nr_virtfn);
+
+               /*
+                * The PF power state should always be higher than the VF power
+                * state. The PF can be in low power state either with runtime
+                * power management (when there is no user) or PCI_PM_CTRL
+                * register write by the user. If PF is in the low power state,
+                * then change the power state to D0 first before enabling
+                * SR-IOV. Also, this function can be called at any time, and
+                * userspace PCI_PM_CTRL write can race against this code path,
+                * so protect the same with 'memory_lock'.
+                */
+               ret = pm_runtime_resume_and_get(&pdev->dev);
                if (ret)
                        goto out_del;
-               ret = nr_virtfn;
-               goto out_put;
+
+               down_write(&vdev->memory_lock);
+               vfio_pci_set_power_state(vdev, PCI_D0);
+               ret = pci_enable_sriov(pdev, nr_virtfn);
+               up_write(&vdev->memory_lock);
+               if (ret) {
+                       pm_runtime_put(&pdev->dev);
+                       goto out_del;
+               }
+               return nr_virtfn;
        }
 
-       pci_disable_sriov(pdev);
+       if (pci_num_vf(pdev)) {
+               pci_disable_sriov(pdev);
+               pm_runtime_put(&pdev->dev);
+       }
 
 out_del:
        mutex_lock(&vfio_pci_sriov_pfs_mutex);
        list_del_init(&vdev->sriov_pfs_item);
 out_unlock:
        mutex_unlock(&vfio_pci_sriov_pfs_mutex);
-out_put:
-       vfio_device_put(device);
        return ret;
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
@@ -1988,7 +2028,7 @@ static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
        unsigned int i;
 
        for (i = 0; i < groups->count; i++)
-               if (groups->groups[i] == vdev->vdev.group)
+               if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
                        return true;
        return false;
 }
@@ -2041,6 +2081,27 @@ vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
        return pdev;
 }
 
+static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
+{
+       struct vfio_pci_core_device *cur;
+       int ret;
+
+       list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+               ret = pm_runtime_resume_and_get(&cur->pdev->dev);
+               if (ret)
+                       goto unwind;
+       }
+
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
+                                            vdev.dev_set_list)
+               pm_runtime_put(&cur->pdev->dev);
+
+       return ret;
+}
+
 /*
  * We need to get memory_lock for each device, but devices can share mmap_lock,
  * therefore we need to zap and hold the vma_lock for each device, and only then
@@ -2147,43 +2208,38 @@ static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
  *  - At least one of the affected devices is marked dirty via
  *    needs_reset (such as by lack of FLR support)
  * Then attempt to perform that bus or slot reset.
- * Returns true if the dev_set was reset.
  */
-static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
+static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
 {
        struct vfio_pci_core_device *cur;
        struct pci_dev *pdev;
-       int ret;
+       bool reset_done = false;
 
        if (!vfio_pci_dev_set_needs_reset(dev_set))
-               return false;
+               return;
 
        pdev = vfio_pci_dev_set_resettable(dev_set);
        if (!pdev)
-               return false;
+               return;
 
        /*
-        * The pci_reset_bus() will reset all the devices in the bus.
-        * The power state can be non-D0 for some of the devices in the bus.
-        * For these devices, the pci_reset_bus() will internally set
-        * the power state to D0 without vfio driver involvement.
-        * For the devices which have NoSoftRst-, the reset function can
-        * cause the PCI config space reset without restoring the original
-        * state (saved locally in 'vdev->pm_save').
+        * Some of the devices in the bus can be in the runtime suspended
+        * state. Increment the usage count for all the devices in the dev_set
+        * before reset and decrement the same after reset.
         */
-       list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
-               vfio_pci_set_power_state(cur, PCI_D0);
+       if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set))
+               return;
 
-       ret = pci_reset_bus(pdev);
-       if (ret)
-               return false;
+       if (!pci_reset_bus(pdev))
+               reset_done = true;
 
        list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
-               cur->needs_reset = false;
+               if (reset_done)
+                       cur->needs_reset = false;
+
                if (!disable_idle_d3)
-                       vfio_pci_set_power_state(cur, PCI_D3hot);
+                       pm_runtime_put(&cur->pdev->dev);
        }
-       return true;
 }
 
 void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
index badfffe..1aaa4f7 100644 (file)
@@ -95,6 +95,7 @@ static struct amba_driver vfio_amba_driver = {
                .name = "vfio-amba",
                .owner = THIS_MODULE,
        },
+       .driver_managed_dma = true,
 };
 
 module_amba_driver(vfio_amba_driver);
index 68a1c87..04f40c5 100644 (file)
@@ -76,6 +76,7 @@ static struct platform_driver vfio_platform_driver = {
        .driver = {
                .name   = "vfio-platform",
        },
+       .driver_managed_dma = true,
 };
 
 module_platform_driver(vfio_platform_driver);
index a455501..61e71c1 100644 (file)
@@ -62,30 +62,22 @@ struct vfio_container {
        bool                            noiommu;
 };
 
-struct vfio_unbound_dev {
-       struct device                   *dev;
-       struct list_head                unbound_next;
-};
-
 struct vfio_group {
        struct device                   dev;
        struct cdev                     cdev;
        refcount_t                      users;
-       atomic_t                        container_users;
+       unsigned int                    container_users;
        struct iommu_group              *iommu_group;
        struct vfio_container           *container;
        struct list_head                device_list;
        struct mutex                    device_lock;
-       struct notifier_block           nb;
        struct list_head                vfio_next;
        struct list_head                container_next;
-       struct list_head                unbound_list;
-       struct mutex                    unbound_lock;
-       atomic_t                        opened;
-       wait_queue_head_t               container_q;
        enum vfio_group_type            type;
        unsigned int                    dev_counter;
+       struct rw_semaphore             group_rwsem;
        struct kvm                      *kvm;
+       struct file                     *opened_file;
        struct blocking_notifier_head   notifier;
 };
 
@@ -281,8 +273,6 @@ void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
 }
 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
 
-static int vfio_iommu_group_notifier(struct notifier_block *nb,
-                                    unsigned long action, void *data);
 static void vfio_group_get(struct vfio_group *group);
 
 /*
@@ -340,16 +330,8 @@ vfio_group_get_from_iommu(struct iommu_group *iommu_group)
 static void vfio_group_release(struct device *dev)
 {
        struct vfio_group *group = container_of(dev, struct vfio_group, dev);
-       struct vfio_unbound_dev *unbound, *tmp;
-
-       list_for_each_entry_safe(unbound, tmp,
-                                &group->unbound_list, unbound_next) {
-               list_del(&unbound->unbound_next);
-               kfree(unbound);
-       }
 
        mutex_destroy(&group->device_lock);
-       mutex_destroy(&group->unbound_lock);
        iommu_group_put(group->iommu_group);
        ida_free(&vfio.group_ida, MINOR(group->dev.devt));
        kfree(group);
@@ -379,11 +361,9 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
        group->cdev.owner = THIS_MODULE;
 
        refcount_set(&group->users, 1);
+       init_rwsem(&group->group_rwsem);
        INIT_LIST_HEAD(&group->device_list);
        mutex_init(&group->device_lock);
-       INIT_LIST_HEAD(&group->unbound_list);
-       mutex_init(&group->unbound_lock);
-       init_waitqueue_head(&group->container_q);
        group->iommu_group = iommu_group;
        /* put in vfio_group_release() */
        iommu_group_ref_get(iommu_group);
@@ -412,13 +392,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
                goto err_put;
        }
 
-       group->nb.notifier_call = vfio_iommu_group_notifier;
-       err = iommu_group_register_notifier(iommu_group, &group->nb);
-       if (err) {
-               ret = ERR_PTR(err);
-               goto err_put;
-       }
-
        mutex_lock(&vfio.group_lock);
 
        /* Did we race creating this group? */
@@ -439,7 +412,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
 
 err_unlock:
        mutex_unlock(&vfio.group_lock);
-       iommu_group_unregister_notifier(group->iommu_group, &group->nb);
 err_put:
        put_device(&group->dev);
        return ret;
@@ -457,14 +429,13 @@ static void vfio_group_put(struct vfio_group *group)
         * properly hold the group reference.
         */
        WARN_ON(!list_empty(&group->device_list));
-       WARN_ON(atomic_read(&group->container_users));
+       WARN_ON(group->container || group->container_users);
        WARN_ON(group->notifier.head);
 
        list_del(&group->vfio_next);
        cdev_device_del(&group->cdev, &group->dev);
        mutex_unlock(&vfio.group_lock);
 
-       iommu_group_unregister_notifier(group->iommu_group, &group->nb);
        put_device(&group->dev);
 }
 
@@ -473,31 +444,15 @@ static void vfio_group_get(struct vfio_group *group)
        refcount_inc(&group->users);
 }
 
-static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
-{
-       struct iommu_group *iommu_group;
-       struct vfio_group *group;
-
-       iommu_group = iommu_group_get(dev);
-       if (!iommu_group)
-               return NULL;
-
-       group = vfio_group_get_from_iommu(iommu_group);
-       iommu_group_put(iommu_group);
-
-       return group;
-}
-
 /*
  * Device objects - create, release, get, put, search
  */
 /* Device reference always implies a group reference */
-void vfio_device_put(struct vfio_device *device)
+static void vfio_device_put(struct vfio_device *device)
 {
        if (refcount_dec_and_test(&device->refcount))
                complete(&device->comp);
 }
-EXPORT_SYMBOL_GPL(vfio_device_put);
 
 static bool vfio_device_try_get(struct vfio_device *device)
 {
@@ -521,175 +476,6 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
 }
 
 /*
- * Some drivers, like pci-stub, are only used to prevent other drivers from
- * claiming a device and are therefore perfectly legitimate for a user owned
- * group.  The pci-stub driver has no dependencies on DMA or the IOVA mapping
- * of the device, but it does prevent the user from having direct access to
- * the device, which is useful in some circumstances.
- *
- * We also assume that we can include PCI interconnect devices, ie. bridges.
- * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
- * then all of the downstream devices will be part of the same IOMMU group as
- * the bridge.  Thus, if placing the bridge into the user owned IOVA space
- * breaks anything, it only does so for user owned devices downstream.  Note
- * that error notification via MSI can be affected for platforms that handle
- * MSI within the same IOVA space as DMA.
- */
-static const char * const vfio_driver_allowed[] = { "pci-stub" };
-
-static bool vfio_dev_driver_allowed(struct device *dev,
-                                   struct device_driver *drv)
-{
-       if (dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(dev);
-
-               if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
-                       return true;
-       }
-
-       return match_string(vfio_driver_allowed,
-                           ARRAY_SIZE(vfio_driver_allowed),
-                           drv->name) >= 0;
-}
-
-/*
- * A vfio group is viable for use by userspace if all devices are in
- * one of the following states:
- *  - driver-less
- *  - bound to a vfio driver
- *  - bound to an otherwise allowed driver
- *  - a PCI interconnect device
- *
- * We use two methods to determine whether a device is bound to a vfio
- * driver.  The first is to test whether the device exists in the vfio
- * group.  The second is to test if the device exists on the group
- * unbound_list, indicating it's in the middle of transitioning from
- * a vfio driver to driver-less.
- */
-static int vfio_dev_viable(struct device *dev, void *data)
-{
-       struct vfio_group *group = data;
-       struct vfio_device *device;
-       struct device_driver *drv = READ_ONCE(dev->driver);
-       struct vfio_unbound_dev *unbound;
-       int ret = -EINVAL;
-
-       mutex_lock(&group->unbound_lock);
-       list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
-               if (dev == unbound->dev) {
-                       ret = 0;
-                       break;
-               }
-       }
-       mutex_unlock(&group->unbound_lock);
-
-       if (!ret || !drv || vfio_dev_driver_allowed(dev, drv))
-               return 0;
-
-       device = vfio_group_get_device(group, dev);
-       if (device) {
-               vfio_device_put(device);
-               return 0;
-       }
-
-       return ret;
-}
-
-/*
- * Async device support
- */
-static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
-{
-       struct vfio_device *device;
-
-       /* Do we already know about it?  We shouldn't */
-       device = vfio_group_get_device(group, dev);
-       if (WARN_ON_ONCE(device)) {
-               vfio_device_put(device);
-               return 0;
-       }
-
-       /* Nothing to do for idle groups */
-       if (!atomic_read(&group->container_users))
-               return 0;
-
-       /* TODO Prevent device auto probing */
-       dev_WARN(dev, "Device added to live group %d!\n",
-                iommu_group_id(group->iommu_group));
-
-       return 0;
-}
-
-static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
-{
-       /* We don't care what happens when the group isn't in use */
-       if (!atomic_read(&group->container_users))
-               return 0;
-
-       return vfio_dev_viable(dev, group);
-}
-
-static int vfio_iommu_group_notifier(struct notifier_block *nb,
-                                    unsigned long action, void *data)
-{
-       struct vfio_group *group = container_of(nb, struct vfio_group, nb);
-       struct device *dev = data;
-       struct vfio_unbound_dev *unbound;
-
-       switch (action) {
-       case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
-               vfio_group_nb_add_dev(group, dev);
-               break;
-       case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
-               /*
-                * Nothing to do here.  If the device is in use, then the
-                * vfio sub-driver should block the remove callback until
-                * it is unused.  If the device is unused or attached to a
-                * stub driver, then it should be released and we don't
-                * care that it will be going away.
-                */
-               break;
-       case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
-               dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
-                       iommu_group_id(group->iommu_group));
-               break;
-       case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
-               dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
-                       iommu_group_id(group->iommu_group), dev->driver->name);
-               BUG_ON(vfio_group_nb_verify(group, dev));
-               break;
-       case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
-               dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
-                       __func__, iommu_group_id(group->iommu_group),
-                       dev->driver->name);
-               break;
-       case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
-               dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
-                       iommu_group_id(group->iommu_group));
-               /*
-                * XXX An unbound device in a live group is ok, but we'd
-                * really like to avoid the above BUG_ON by preventing other
-                * drivers from binding to it.  Once that occurs, we have to
-                * stop the system to maintain isolation.  At a minimum, we'd
-                * want a toggle to disable driver auto probe for this device.
-                */
-
-               mutex_lock(&group->unbound_lock);
-               list_for_each_entry(unbound,
-                                   &group->unbound_list, unbound_next) {
-                       if (dev == unbound->dev) {
-                               list_del(&unbound->unbound_next);
-                               kfree(unbound);
-                               break;
-                       }
-               }
-               mutex_unlock(&group->unbound_lock);
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-/*
  * VFIO driver API
  */
 void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -745,11 +531,11 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
 
        iommu_group = iommu_group_get(dev);
 #ifdef CONFIG_VFIO_NOIOMMU
-       if (!iommu_group && noiommu && !iommu_present(dev->bus)) {
+       if (!iommu_group && noiommu) {
                /*
                 * With noiommu enabled, create an IOMMU group for devices that
-                * don't already have one and don't have an iommu_ops on their
-                * bus.  Taint the kernel because we're about to give a DMA
+                * don't already have one, implying no IOMMU hardware/driver
+                * exists.  Taint the kernel because we're about to give a DMA
                 * capable device to a user without IOMMU protection.
                 */
                group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU);
@@ -815,6 +601,13 @@ static int __vfio_register_dev(struct vfio_device *device,
 
 int vfio_register_group_dev(struct vfio_device *device)
 {
+       /*
+        * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+        * restore cache coherency.
+        */
+       if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY))
+               return -EINVAL;
+
        return __vfio_register_dev(device,
                vfio_group_find_or_alloc(device->dev));
 }
@@ -831,29 +624,6 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
 }
 EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
 
-/*
- * Get a reference to the vfio_device for a device.  Even if the
- * caller thinks they own the device, they could be racing with a
- * release call path, so we can't trust drvdata for the shortcut.
- * Go the long way around, from the iommu_group to the vfio_group
- * to the vfio_device.
- */
-struct vfio_device *vfio_device_get_from_dev(struct device *dev)
-{
-       struct vfio_group *group;
-       struct vfio_device *device;
-
-       group = vfio_group_get_from_dev(dev);
-       if (!group)
-               return NULL;
-
-       device = vfio_group_get_device(group, dev);
-       vfio_group_put(group);
-
-       return device;
-}
-EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
-
 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
                                                     char *buf)
 {
@@ -889,29 +659,10 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
 void vfio_unregister_group_dev(struct vfio_device *device)
 {
        struct vfio_group *group = device->group;
-       struct vfio_unbound_dev *unbound;
        unsigned int i = 0;
        bool interrupted = false;
        long rc;
 
-       /*
-        * When the device is removed from the group, the group suddenly
-        * becomes non-viable; the device has a driver (until the unbind
-        * completes), but it's not present in the group.  This is bad news
-        * for any external users that need to re-acquire a group reference
-        * in order to match and release their existing reference.  To
-        * solve this, we track such devices on the unbound_list to bridge
-        * the gap until they're fully unbound.
-        */
-       unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
-       if (unbound) {
-               unbound->dev = device->dev;
-               mutex_lock(&group->unbound_lock);
-               list_add(&unbound->unbound_next, &group->unbound_list);
-               mutex_unlock(&group->unbound_lock);
-       }
-       WARN_ON(!unbound);
-
        vfio_device_put(device);
        rc = try_wait_for_completion(&device->comp);
        while (rc <= 0) {
@@ -940,23 +691,6 @@ void vfio_unregister_group_dev(struct vfio_device *device)
        group->dev_counter--;
        mutex_unlock(&group->device_lock);
 
-       /*
-        * In order to support multiple devices per group, devices can be
-        * plucked from the group while other devices in the group are still
-        * in use.  The container persists with this group and those remaining
-        * devices still attached.  If the user creates an isolation violation
-        * by binding this device to another driver while the group is still in
-        * use, that's their fault.  However, in the case of removing the last,
-        * or potentially the only, device in the group there can be no other
-        * in-use devices in the group.  The user has done their due diligence
-        * and we should lay no claims to those devices.  In order to do that,
-        * we need to make sure the group is detached from the container.
-        * Without this stall, we're potentially racing with a user process
-        * that may attempt to immediately bind this device to another driver.
-        */
-       if (list_empty(&group->device_list))
-               wait_event(group->container_q, !group->container);
-
        if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
                iommu_group_remove_device(device->dev);
 
@@ -1191,6 +925,8 @@ static void __vfio_group_unset_container(struct vfio_group *group)
        struct vfio_container *container = group->container;
        struct vfio_iommu_driver *driver;
 
+       lockdep_assert_held_write(&group->group_rwsem);
+
        down_write(&container->group_lock);
 
        driver = container->iommu_driver;
@@ -1198,8 +934,11 @@ static void __vfio_group_unset_container(struct vfio_group *group)
                driver->ops->detach_group(container->iommu_data,
                                          group->iommu_group);
 
+       if (group->type == VFIO_IOMMU)
+               iommu_group_release_dma_owner(group->iommu_group);
+
        group->container = NULL;
-       wake_up(&group->container_q);
+       group->container_users = 0;
        list_del(&group->container_next);
 
        /* Detaching the last group deprivileges a container, remove iommu */
@@ -1223,30 +962,16 @@ static void __vfio_group_unset_container(struct vfio_group *group)
  */
 static int vfio_group_unset_container(struct vfio_group *group)
 {
-       int users = atomic_cmpxchg(&group->container_users, 1, 0);
+       lockdep_assert_held_write(&group->group_rwsem);
 
-       if (!users)
+       if (!group->container)
                return -EINVAL;
-       if (users != 1)
+       if (group->container_users != 1)
                return -EBUSY;
-
        __vfio_group_unset_container(group);
-
        return 0;
 }
 
-/*
- * When removing container users, anything that removes the last user
- * implicitly removes the group from the container.  That is, if the
- * group file descriptor is closed, as well as any device file descriptors,
- * the group is free.
- */
-static void vfio_group_try_dissolve_container(struct vfio_group *group)
-{
-       if (0 == atomic_dec_if_positive(&group->container_users))
-               __vfio_group_unset_container(group);
-}
-
 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
 {
        struct fd f;
@@ -1254,7 +979,9 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
        struct vfio_iommu_driver *driver;
        int ret = 0;
 
-       if (atomic_read(&group->container_users))
+       lockdep_assert_held_write(&group->group_rwsem);
+
+       if (group->container || WARN_ON(group->container_users))
                return -EINVAL;
 
        if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
@@ -1282,22 +1009,32 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
                goto unlock_out;
        }
 
+       if (group->type == VFIO_IOMMU) {
+               ret = iommu_group_claim_dma_owner(group->iommu_group, f.file);
+               if (ret)
+                       goto unlock_out;
+       }
+
        driver = container->iommu_driver;
        if (driver) {
                ret = driver->ops->attach_group(container->iommu_data,
                                                group->iommu_group,
                                                group->type);
-               if (ret)
+               if (ret) {
+                       if (group->type == VFIO_IOMMU)
+                               iommu_group_release_dma_owner(
+                                       group->iommu_group);
                        goto unlock_out;
+               }
        }
 
        group->container = container;
+       group->container_users = 1;
        container->noiommu = (group->type == VFIO_NO_IOMMU);
        list_add(&group->container_next, &container->group_list);
 
        /* Get a reference on the container and mark a user within the group */
        vfio_container_get(container);
-       atomic_inc(&group->container_users);
 
 unlock_out:
        up_write(&container->group_lock);
@@ -1305,60 +1042,74 @@ unlock_out:
        return ret;
 }
 
-static bool vfio_group_viable(struct vfio_group *group)
+static const struct file_operations vfio_device_fops;
+
+/* true if the vfio_device has open_device() called but not close_device() */
+static bool vfio_assert_device_open(struct vfio_device *device)
 {
-       return (iommu_group_for_each_dev(group->iommu_group,
-                                        group, vfio_dev_viable) == 0);
+       return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
 }
 
-static int vfio_group_add_container_user(struct vfio_group *group)
+static int vfio_device_assign_container(struct vfio_device *device)
 {
-       if (!atomic_inc_not_zero(&group->container_users))
+       struct vfio_group *group = device->group;
+
+       lockdep_assert_held_write(&group->group_rwsem);
+
+       if (!group->container || !group->container->iommu_driver ||
+           WARN_ON(!group->container_users))
                return -EINVAL;
 
-       if (group->type == VFIO_NO_IOMMU) {
-               atomic_dec(&group->container_users);
+       if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
                return -EPERM;
-       }
-       if (!group->container->iommu_driver || !vfio_group_viable(group)) {
-               atomic_dec(&group->container_users);
-               return -EINVAL;
-       }
 
+       get_file(group->opened_file);
+       group->container_users++;
        return 0;
 }
 
-static const struct file_operations vfio_device_fops;
+static void vfio_device_unassign_container(struct vfio_device *device)
+{
+       down_write(&device->group->group_rwsem);
+       WARN_ON(device->group->container_users <= 1);
+       device->group->container_users--;
+       fput(device->group->opened_file);
+       up_write(&device->group->group_rwsem);
+}
 
-static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
+static struct file *vfio_device_open(struct vfio_device *device)
 {
-       struct vfio_device *device;
        struct file *filep;
-       int fdno;
-       int ret = 0;
-
-       if (0 == atomic_read(&group->container_users) ||
-           !group->container->iommu_driver || !vfio_group_viable(group))
-               return -EINVAL;
-
-       if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
-               return -EPERM;
+       int ret;
 
-       device = vfio_device_get_from_name(group, buf);
-       if (IS_ERR(device))
-               return PTR_ERR(device);
+       down_write(&device->group->group_rwsem);
+       ret = vfio_device_assign_container(device);
+       up_write(&device->group->group_rwsem);
+       if (ret)
+               return ERR_PTR(ret);
 
        if (!try_module_get(device->dev->driver->owner)) {
                ret = -ENODEV;
-               goto err_device_put;
+               goto err_unassign_container;
        }
 
        mutex_lock(&device->dev_set->lock);
        device->open_count++;
-       if (device->open_count == 1 && device->ops->open_device) {
-               ret = device->ops->open_device(device);
-               if (ret)
-                       goto err_undo_count;
+       if (device->open_count == 1) {
+               /*
+                * Here we pass the KVM pointer with the group under the read
+                * lock.  If the device driver will use it, it must obtain a
+                * reference and release it during close_device.
+                */
+               down_read(&device->group->group_rwsem);
+               device->kvm = device->group->kvm;
+
+               if (device->ops->open_device) {
+                       ret = device->ops->open_device(device);
+                       if (ret)
+                               goto err_undo_count;
+               }
+               up_read(&device->group->group_rwsem);
        }
        mutex_unlock(&device->dev_set->lock);
 
@@ -1366,15 +1117,11 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
         * We can't use anon_inode_getfd() because we need to modify
         * the f_mode flags directly to allow more than just ioctls
         */
-       fdno = ret = get_unused_fd_flags(O_CLOEXEC);
-       if (ret < 0)
-               goto err_close_device;
-
        filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
                                   device, O_RDWR);
        if (IS_ERR(filep)) {
                ret = PTR_ERR(filep);
-               goto err_fd;
+               goto err_close_device;
        }
 
        /*
@@ -1384,26 +1131,61 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
         */
        filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
 
-       atomic_inc(&group->container_users);
-
-       fd_install(fdno, filep);
-
-       if (group->type == VFIO_NO_IOMMU)
+       if (device->group->type == VFIO_NO_IOMMU)
                dev_warn(device->dev, "vfio-noiommu device opened by user "
                         "(%s:%d)\n", current->comm, task_pid_nr(current));
-       return fdno;
+       /*
+        * On success the ref of device is moved to the file and
+        * put in vfio_device_fops_release()
+        */
+       return filep;
 
-err_fd:
-       put_unused_fd(fdno);
 err_close_device:
        mutex_lock(&device->dev_set->lock);
+       down_read(&device->group->group_rwsem);
        if (device->open_count == 1 && device->ops->close_device)
                device->ops->close_device(device);
 err_undo_count:
        device->open_count--;
+       if (device->open_count == 0 && device->kvm)
+               device->kvm = NULL;
+       up_read(&device->group->group_rwsem);
        mutex_unlock(&device->dev_set->lock);
        module_put(device->dev->driver->owner);
-err_device_put:
+err_unassign_container:
+       vfio_device_unassign_container(device);
+       return ERR_PTR(ret);
+}
+
+static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
+{
+       struct vfio_device *device;
+       struct file *filep;
+       int fdno;
+       int ret;
+
+       device = vfio_device_get_from_name(group, buf);
+       if (IS_ERR(device))
+               return PTR_ERR(device);
+
+       fdno = get_unused_fd_flags(O_CLOEXEC);
+       if (fdno < 0) {
+               ret = fdno;
+               goto err_put_device;
+       }
+
+       filep = vfio_device_open(device);
+       if (IS_ERR(filep)) {
+               ret = PTR_ERR(filep);
+               goto err_put_fdno;
+       }
+
+       fd_install(fdno, filep);
+       return fdno;
+
+err_put_fdno:
+       put_unused_fd(fdno);
+err_put_device:
        vfio_device_put(device);
        return ret;
 }
@@ -1430,11 +1212,13 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
 
                status.flags = 0;
 
-               if (vfio_group_viable(group))
-                       status.flags |= VFIO_GROUP_FLAGS_VIABLE;
-
+               down_read(&group->group_rwsem);
                if (group->container)
-                       status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
+                       status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
+                                       VFIO_GROUP_FLAGS_VIABLE;
+               else if (!iommu_group_dma_owner_claimed(group->iommu_group))
+                       status.flags |= VFIO_GROUP_FLAGS_VIABLE;
+               up_read(&group->group_rwsem);
 
                if (copy_to_user((void __user *)arg, &status, minsz))
                        return -EFAULT;
@@ -1452,11 +1236,15 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
                if (fd < 0)
                        return -EINVAL;
 
+               down_write(&group->group_rwsem);
                ret = vfio_group_set_container(group, fd);
+               up_write(&group->group_rwsem);
                break;
        }
        case VFIO_GROUP_UNSET_CONTAINER:
+               down_write(&group->group_rwsem);
                ret = vfio_group_unset_container(group);
+               up_write(&group->group_rwsem);
                break;
        case VFIO_GROUP_GET_DEVICE_FD:
        {
@@ -1479,38 +1267,38 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
 {
        struct vfio_group *group =
                container_of(inode->i_cdev, struct vfio_group, cdev);
-       int opened;
+       int ret;
 
-       /* users can be zero if this races with vfio_group_put() */
-       if (!refcount_inc_not_zero(&group->users))
-               return -ENODEV;
+       down_write(&group->group_rwsem);
 
-       if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
-               vfio_group_put(group);
-               return -EPERM;
+       /* users can be zero if this races with vfio_group_put() */
+       if (!refcount_inc_not_zero(&group->users)) {
+               ret = -ENODEV;
+               goto err_unlock;
        }
 
-       /* Do we need multiple instances of the group open?  Seems not. */
-       opened = atomic_cmpxchg(&group->opened, 0, 1);
-       if (opened) {
-               vfio_group_put(group);
-               return -EBUSY;
+       if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
+               ret = -EPERM;
+               goto err_put;
        }
 
-       /* Is something still in use from a previous open? */
-       if (group->container) {
-               atomic_dec(&group->opened);
-               vfio_group_put(group);
-               return -EBUSY;
+       /*
+        * Do we need multiple instances of the group open?  Seems not.
+        */
+       if (group->opened_file) {
+               ret = -EBUSY;
+               goto err_put;
        }
-
-       /* Warn if previous user didn't cleanup and re-init to drop them */
-       if (WARN_ON(group->notifier.head))
-               BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
-
+       group->opened_file = filep;
        filep->private_data = group;
 
+       up_write(&group->group_rwsem);
        return 0;
+err_put:
+       vfio_group_put(group);
+err_unlock:
+       up_write(&group->group_rwsem);
+       return ret;
 }
 
 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
@@ -1519,9 +1307,18 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
 
        filep->private_data = NULL;
 
-       vfio_group_try_dissolve_container(group);
-
-       atomic_dec(&group->opened);
+       down_write(&group->group_rwsem);
+       /*
+        * Device FDs hold a group file reference, therefore the group release
+        * is only called when there are no open devices.
+        */
+       WARN_ON(group->notifier.head);
+       if (group->container) {
+               WARN_ON(group->container_users != 1);
+               __vfio_group_unset_container(group);
+       }
+       group->opened_file = NULL;
+       up_write(&group->group_rwsem);
 
        vfio_group_put(group);
 
@@ -1544,13 +1341,19 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
        struct vfio_device *device = filep->private_data;
 
        mutex_lock(&device->dev_set->lock);
-       if (!--device->open_count && device->ops->close_device)
+       vfio_assert_device_open(device);
+       down_read(&device->group->group_rwsem);
+       if (device->open_count == 1 && device->ops->close_device)
                device->ops->close_device(device);
+       up_read(&device->group->group_rwsem);
+       device->open_count--;
+       if (device->open_count == 0)
+               device->kvm = NULL;
        mutex_unlock(&device->dev_set->lock);
 
        module_put(device->dev->driver->owner);
 
-       vfio_group_try_dissolve_container(device->group);
+       vfio_device_unassign_container(device);
 
        vfio_device_put(device);
 
@@ -1899,119 +1702,94 @@ static const struct file_operations vfio_device_fops = {
        .mmap           = vfio_device_fops_mmap,
 };
 
-/*
- * External user API, exported by symbols to be linked dynamically.
- *
- * The protocol includes:
- *  1. do normal VFIO init operation:
- *     - opening a new container;
- *     - attaching group(s) to it;
- *     - setting an IOMMU driver for a container.
- * When IOMMU is set for a container, all groups in it are
- * considered ready to use by an external user.
- *
- * 2. User space passes a group fd to an external user.
- * The external user calls vfio_group_get_external_user()
- * to verify that:
- *     - the group is initialized;
- *     - IOMMU is set for it.
- * If both checks passed, vfio_group_get_external_user()
- * increments the container user counter to prevent
- * the VFIO group from disposal before KVM exits.
+/**
+ * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
+ * @file: VFIO group file
  *
- * 3. The external user calls vfio_external_user_iommu_id()
- * to know an IOMMU ID.
- *
- * 4. When the external KVM finishes, it calls
- * vfio_group_put_external_user() to release the VFIO group.
- * This call decrements the container user counter.
+ * The returned iommu_group is valid as long as a ref is held on the file.
  */
-struct vfio_group *vfio_group_get_external_user(struct file *filep)
+struct iommu_group *vfio_file_iommu_group(struct file *file)
 {
-       struct vfio_group *group = filep->private_data;
-       int ret;
+       struct vfio_group *group = file->private_data;
 
-       if (filep->f_op != &vfio_group_fops)
-               return ERR_PTR(-EINVAL);
-
-       ret = vfio_group_add_container_user(group);
-       if (ret)
-               return ERR_PTR(ret);
-
-       /*
-        * Since the caller holds the fget on the file group->users must be >= 1
-        */
-       vfio_group_get(group);
-
-       return group;
+       if (file->f_op != &vfio_group_fops)
+               return NULL;
+       return group->iommu_group;
 }
-EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
+EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
 
-/*
- * External user API, exported by symbols to be linked dynamically.
- * The external user passes in a device pointer
- * to verify that:
- *     - A VFIO group is assiciated with the device;
- *     - IOMMU is set for the group.
- * If both checks passed, vfio_group_get_external_user_from_dev()
- * increments the container user counter to prevent the VFIO group
- * from disposal before external user exits and returns the pointer
- * to the VFIO group.
- *
- * When the external user finishes using the VFIO group, it calls
- * vfio_group_put_external_user() to release the VFIO group and
- * decrement the container user counter.
+/**
+ * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
+ *        is always CPU cache coherent
+ * @file: VFIO group file
  *
- * @dev [in]   : device
- * Return error PTR or pointer to VFIO group.
+ * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
+ * bit in DMA transactions. A return of false indicates that the user has
+ * rights to access additional instructions such as wbinvd on x86.
  */
-
-struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev)
+bool vfio_file_enforced_coherent(struct file *file)
 {
-       struct vfio_group *group;
-       int ret;
+       struct vfio_group *group = file->private_data;
+       bool ret;
 
-       group = vfio_group_get_from_dev(dev);
-       if (!group)
-               return ERR_PTR(-ENODEV);
+       if (file->f_op != &vfio_group_fops)
+               return true;
 
-       ret = vfio_group_add_container_user(group);
-       if (ret) {
-               vfio_group_put(group);
-               return ERR_PTR(ret);
+       down_read(&group->group_rwsem);
+       if (group->container) {
+               ret = vfio_ioctl_check_extension(group->container,
+                                                VFIO_DMA_CC_IOMMU);
+       } else {
+               /*
+                * Since the coherency state is determined only once a container
+                * is attached the user must do so before they can prove they
+                * have permission.
+                */
+               ret = true;
        }
-
-       return group;
+       up_read(&group->group_rwsem);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev);
+EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
 
-void vfio_group_put_external_user(struct vfio_group *group)
+/**
+ * vfio_file_set_kvm - Link a kvm with VFIO drivers
+ * @file: VFIO group file
+ * @kvm: KVM to link
+ *
+ * When a VFIO device is first opened the KVM will be available in
+ * device->kvm if one was associated with the group.
+ */
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
 {
-       vfio_group_try_dissolve_container(group);
-       vfio_group_put(group);
-}
-EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+       struct vfio_group *group = file->private_data;
 
-bool vfio_external_group_match_file(struct vfio_group *test_group,
-                                   struct file *filep)
-{
-       struct vfio_group *group = filep->private_data;
+       if (file->f_op != &vfio_group_fops)
+               return;
 
-       return (filep->f_op == &vfio_group_fops) && (group == test_group);
+       down_write(&group->group_rwsem);
+       group->kvm = kvm;
+       up_write(&group->group_rwsem);
 }
-EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
+EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
 
-int vfio_external_user_iommu_id(struct vfio_group *group)
+/**
+ * vfio_file_has_dev - True if the VFIO file is a handle for device
+ * @file: VFIO file to check
+ * @device: Device that must be part of the file
+ *
+ * Returns true if given file has permission to manipulate the given device.
+ */
+bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
 {
-       return iommu_group_id(group->iommu_group);
-}
-EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
+       struct vfio_group *group = file->private_data;
 
-long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
-{
-       return vfio_ioctl_check_extension(group->container, arg);
+       if (file->f_op != &vfio_group_fops)
+               return false;
+
+       return group == device->group;
 }
-EXPORT_SYMBOL_GPL(vfio_external_check_extension);
+EXPORT_SYMBOL_GPL(vfio_file_has_dev);
 
 /*
  * Sub-module support
@@ -2134,7 +1912,7 @@ EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
 /*
  * Pin a set of guest PFNs and return their associated host PFNs for local
  * domain only.
- * @dev [in]     : device
+ * @device [in]  : device
  * @user_pfn [in]: array of user/guest PFNs to be pinned.
  * @npage [in]   : count of elements in user_pfn array.  This count should not
  *                be greater VFIO_PIN_PAGES_MAX_ENTRIES.
@@ -2142,33 +1920,25 @@ EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
  * @phys_pfn[out]: array of host PFNs
  * Return error or number of pages pinned.
  */
-int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
-                  int prot, unsigned long *phys_pfn)
+int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
+                  int npage, int prot, unsigned long *phys_pfn)
 {
        struct vfio_container *container;
-       struct vfio_group *group;
+       struct vfio_group *group = device->group;
        struct vfio_iommu_driver *driver;
        int ret;
 
-       if (!dev || !user_pfn || !phys_pfn || !npage)
+       if (!user_pfn || !phys_pfn || !npage ||
+           !vfio_assert_device_open(device))
                return -EINVAL;
 
        if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
                return -E2BIG;
 
-       group = vfio_group_get_from_dev(dev);
-       if (!group)
-               return -ENODEV;
-
-       if (group->dev_counter > 1) {
-               ret = -EINVAL;
-               goto err_pin_pages;
-       }
-
-       ret = vfio_group_add_container_user(group);
-       if (ret)
-               goto err_pin_pages;
+       if (group->dev_counter > 1)
+               return -EINVAL;
 
+       /* group->container cannot change while a vfio device is open */
        container = group->container;
        driver = container->iommu_driver;
        if (likely(driver && driver->ops->pin_pages))
@@ -2178,45 +1948,34 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
        else
                ret = -ENOTTY;
 
-       vfio_group_try_dissolve_container(group);
-
-err_pin_pages:
-       vfio_group_put(group);
        return ret;
 }
 EXPORT_SYMBOL(vfio_pin_pages);
 
 /*
  * Unpin set of host PFNs for local domain only.
- * @dev [in]     : device
+ * @device [in]  : device
  * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
  *                PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
  * @npage [in]   : count of elements in user_pfn array.  This count should not
  *                 be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
  * Return error or number of pages unpinned.
  */
-int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
+int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
+                    int npage)
 {
        struct vfio_container *container;
-       struct vfio_group *group;
        struct vfio_iommu_driver *driver;
        int ret;
 
-       if (!dev || !user_pfn || !npage)
+       if (!user_pfn || !npage || !vfio_assert_device_open(device))
                return -EINVAL;
 
        if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
                return -E2BIG;
 
-       group = vfio_group_get_from_dev(dev);
-       if (!group)
-               return -ENODEV;
-
-       ret = vfio_group_add_container_user(group);
-       if (ret)
-               goto err_unpin_pages;
-
-       container = group->container;
+       /* group->container cannot change while a vfio device is open */
+       container = device->group->container;
        driver = container->iommu_driver;
        if (likely(driver && driver->ops->unpin_pages))
                ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
@@ -2224,110 +1983,11 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
        else
                ret = -ENOTTY;
 
-       vfio_group_try_dissolve_container(group);
-
-err_unpin_pages:
-       vfio_group_put(group);
        return ret;
 }
 EXPORT_SYMBOL(vfio_unpin_pages);
 
 /*
- * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
- * VFIO group.
- *
- * The caller needs to call vfio_group_get_external_user() or
- * vfio_group_get_external_user_from_dev() prior to calling this interface,
- * so as to prevent the VFIO group from disposal in the middle of the call.
- * But it can keep the reference to the VFIO group for several calls into
- * this interface.
- * After finishing using of the VFIO group, the caller needs to release the
- * VFIO group by calling vfio_group_put_external_user().
- *
- * @group [in]         : VFIO group
- * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned.
- * @npage [in]         : count of elements in user_iova_pfn array.
- *                       This count should not be greater
- *                       VFIO_PIN_PAGES_MAX_ENTRIES.
- * @prot [in]          : protection flags
- * @phys_pfn [out]     : array of host PFNs
- * Return error or number of pages pinned.
- */
-int vfio_group_pin_pages(struct vfio_group *group,
-                        unsigned long *user_iova_pfn, int npage,
-                        int prot, unsigned long *phys_pfn)
-{
-       struct vfio_container *container;
-       struct vfio_iommu_driver *driver;
-       int ret;
-
-       if (!group || !user_iova_pfn || !phys_pfn || !npage)
-               return -EINVAL;
-
-       if (group->dev_counter > 1)
-               return -EINVAL;
-
-       if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
-               return -E2BIG;
-
-       container = group->container;
-       driver = container->iommu_driver;
-       if (likely(driver && driver->ops->pin_pages))
-               ret = driver->ops->pin_pages(container->iommu_data,
-                                            group->iommu_group, user_iova_pfn,
-                                            npage, prot, phys_pfn);
-       else
-               ret = -ENOTTY;
-
-       return ret;
-}
-EXPORT_SYMBOL(vfio_group_pin_pages);
-
-/*
- * Unpin a set of guest IOVA PFNs for a VFIO group.
- *
- * The caller needs to call vfio_group_get_external_user() or
- * vfio_group_get_external_user_from_dev() prior to calling this interface,
- * so as to prevent the VFIO group from disposal in the middle of the call.
- * But it can keep the reference to the VFIO group for several calls into
- * this interface.
- * After finishing using of the VFIO group, the caller needs to release the
- * VFIO group by calling vfio_group_put_external_user().
- *
- * @group [in]         : vfio group
- * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned.
- * @npage [in]         : count of elements in user_iova_pfn array.
- *                       This count should not be greater than
- *                       VFIO_PIN_PAGES_MAX_ENTRIES.
- * Return error or number of pages unpinned.
- */
-int vfio_group_unpin_pages(struct vfio_group *group,
-                          unsigned long *user_iova_pfn, int npage)
-{
-       struct vfio_container *container;
-       struct vfio_iommu_driver *driver;
-       int ret;
-
-       if (!group || !user_iova_pfn || !npage)
-               return -EINVAL;
-
-       if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
-               return -E2BIG;
-
-       container = group->container;
-       driver = container->iommu_driver;
-       if (likely(driver && driver->ops->unpin_pages))
-               ret = driver->ops->unpin_pages(container->iommu_data,
-                                              user_iova_pfn, npage);
-       else
-               ret = -ENOTTY;
-
-       return ret;
-}
-EXPORT_SYMBOL(vfio_group_unpin_pages);
-
-
-/*
  * This interface allows the CPUs to perform some sort of virtual DMA on
  * behalf of the device.
  *
@@ -2337,32 +1997,25 @@ EXPORT_SYMBOL(vfio_group_unpin_pages);
  * As the read/write of user space memory is conducted via the CPUs and is
  * not a real device DMA, it is not necessary to pin the user space memory.
  *
- * The caller needs to call vfio_group_get_external_user() or
- * vfio_group_get_external_user_from_dev() prior to calling this interface,
- * so as to prevent the VFIO group from disposal in the middle of the call.
- * But it can keep the reference to the VFIO group for several calls into
- * this interface.
- * After finishing using of the VFIO group, the caller needs to release the
- * VFIO group by calling vfio_group_put_external_user().
- *
- * @group [in]         : VFIO group
+ * @device [in]                : VFIO device
  * @user_iova [in]     : base IOVA of a user space buffer
  * @data [in]          : pointer to kernel buffer
  * @len [in]           : kernel buffer length
  * @write              : indicate read or write
  * Return error code on failure or 0 on success.
  */
-int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
-               void *data, size_t len, bool write)
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
+               size_t len, bool write)
 {
        struct vfio_container *container;
        struct vfio_iommu_driver *driver;
        int ret = 0;
 
-       if (!group || !data || len <= 0)
+       if (!data || len <= 0 || !vfio_assert_device_open(device))
                return -EINVAL;
 
-       container = group->container;
+       /* group->container cannot change while a vfio device is open */
+       container = device->group->container;
        driver = container->iommu_driver;
 
        if (likely(driver && driver->ops->dma_rw))
@@ -2370,7 +2023,6 @@ int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
                                          user_iova, data, len, write);
        else
                ret = -ENOTTY;
-
        return ret;
 }
 EXPORT_SYMBOL(vfio_dma_rw);
@@ -2383,9 +2035,7 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
        struct vfio_iommu_driver *driver;
        int ret;
 
-       ret = vfio_group_add_container_user(group);
-       if (ret)
-               return -EINVAL;
+       lockdep_assert_held_read(&group->group_rwsem);
 
        container = group->container;
        driver = container->iommu_driver;
@@ -2395,8 +2045,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group,
        else
                ret = -ENOTTY;
 
-       vfio_group_try_dissolve_container(group);
-
        return ret;
 }
 
@@ -2407,9 +2055,7 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
        struct vfio_iommu_driver *driver;
        int ret;
 
-       ret = vfio_group_add_container_user(group);
-       if (ret)
-               return -EINVAL;
+       lockdep_assert_held_read(&group->group_rwsem);
 
        container = group->container;
        driver = container->iommu_driver;
@@ -2419,147 +2065,52 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group,
        else
                ret = -ENOTTY;
 
-       vfio_group_try_dissolve_container(group);
-
-       return ret;
-}
-
-void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
-{
-       group->kvm = kvm;
-       blocking_notifier_call_chain(&group->notifier,
-                               VFIO_GROUP_NOTIFY_SET_KVM, kvm);
-}
-EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
-
-static int vfio_register_group_notifier(struct vfio_group *group,
-                                       unsigned long *events,
-                                       struct notifier_block *nb)
-{
-       int ret;
-       bool set_kvm = false;
-
-       if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
-               set_kvm = true;
-
-       /* clear known events */
-       *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
-
-       /* refuse to continue if still events remaining */
-       if (*events)
-               return -EINVAL;
-
-       ret = vfio_group_add_container_user(group);
-       if (ret)
-               return -EINVAL;
-
-       ret = blocking_notifier_chain_register(&group->notifier, nb);
-
-       /*
-        * The attaching of kvm and vfio_group might already happen, so
-        * here we replay once upon registration.
-        */
-       if (!ret && set_kvm && group->kvm)
-               blocking_notifier_call_chain(&group->notifier,
-                                       VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
-
-       vfio_group_try_dissolve_container(group);
-
-       return ret;
-}
-
-static int vfio_unregister_group_notifier(struct vfio_group *group,
-                                        struct notifier_block *nb)
-{
-       int ret;
-
-       ret = vfio_group_add_container_user(group);
-       if (ret)
-               return -EINVAL;
-
-       ret = blocking_notifier_chain_unregister(&group->notifier, nb);
-
-       vfio_group_try_dissolve_container(group);
-
        return ret;
 }
 
-int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
-                          unsigned long *events, struct notifier_block *nb)
+int vfio_register_notifier(struct vfio_device *device,
+                          enum vfio_notify_type type, unsigned long *events,
+                          struct notifier_block *nb)
 {
-       struct vfio_group *group;
+       struct vfio_group *group = device->group;
        int ret;
 
-       if (!dev || !nb || !events || (*events == 0))
+       if (!nb || !events || (*events == 0) ||
+           !vfio_assert_device_open(device))
                return -EINVAL;
 
-       group = vfio_group_get_from_dev(dev);
-       if (!group)
-               return -ENODEV;
-
        switch (type) {
        case VFIO_IOMMU_NOTIFY:
                ret = vfio_register_iommu_notifier(group, events, nb);
                break;
-       case VFIO_GROUP_NOTIFY:
-               ret = vfio_register_group_notifier(group, events, nb);
-               break;
        default:
                ret = -EINVAL;
        }
-
-       vfio_group_put(group);
        return ret;
 }
 EXPORT_SYMBOL(vfio_register_notifier);
 
-int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
+int vfio_unregister_notifier(struct vfio_device *device,
+                            enum vfio_notify_type type,
                             struct notifier_block *nb)
 {
-       struct vfio_group *group;
+       struct vfio_group *group = device->group;
        int ret;
 
-       if (!dev || !nb)
+       if (!nb || !vfio_assert_device_open(device))
                return -EINVAL;
 
-       group = vfio_group_get_from_dev(dev);
-       if (!group)
-               return -ENODEV;
-
        switch (type) {
        case VFIO_IOMMU_NOTIFY:
                ret = vfio_unregister_iommu_notifier(group, nb);
                break;
-       case VFIO_GROUP_NOTIFY:
-               ret = vfio_unregister_group_notifier(group, nb);
-               break;
        default:
                ret = -EINVAL;
        }
-
-       vfio_group_put(group);
        return ret;
 }
 EXPORT_SYMBOL(vfio_unregister_notifier);
 
-struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
-{
-       struct vfio_container *container;
-       struct vfio_iommu_driver *driver;
-
-       if (!group)
-               return ERR_PTR(-EINVAL);
-
-       container = group->container;
-       driver = container->iommu_driver;
-       if (likely(driver && driver->ops->group_iommu_domain))
-               return driver->ops->group_iommu_domain(container->iommu_data,
-                                                      group->iommu_group);
-
-       return ERR_PTR(-ENOTTY);
-}
-EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
-
 /*
  * Module/class support
  */
index 9394aa9..c13b929 100644 (file)
@@ -84,8 +84,8 @@ struct vfio_domain {
        struct iommu_domain     *domain;
        struct list_head        next;
        struct list_head        group_list;
-       int                     prot;           /* IOMMU_CACHE */
-       bool                    fgsp;           /* Fine-grained super pages */
+       bool                    fgsp : 1;       /* Fine-grained super pages */
+       bool                    enforce_cache_coherency : 1;
 };
 
 struct vfio_dma {
@@ -1461,7 +1461,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
 
        list_for_each_entry(d, &iommu->domain_list, next) {
                ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
-                               npage << PAGE_SHIFT, prot | d->prot);
+                               npage << PAGE_SHIFT, prot | IOMMU_CACHE);
                if (ret)
                        goto unwind;
 
@@ -1771,7 +1771,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
                        }
 
                        ret = iommu_map(domain->domain, iova, phys,
-                                       size, dma->prot | domain->prot);
+                                       size, dma->prot | IOMMU_CACHE);
                        if (ret) {
                                if (!dma->iommu_mapped) {
                                        vfio_unpin_pages_remote(dma, iova,
@@ -1859,7 +1859,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
                return;
 
        ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
-                       IOMMU_READ | IOMMU_WRITE | domain->prot);
+                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
        if (!ret) {
                size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
 
@@ -2267,8 +2267,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
                goto out_detach;
        }
 
-       if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
-               domain->prot |= IOMMU_CACHE;
+       /*
+        * If the IOMMU can block non-coherent operations (ie PCIe TLPs with
+        * no-snoop set) then VFIO always turns this feature on because on Intel
+        * platforms it optimizes KVM to disable wbinvd emulation.
+        */
+       if (domain->domain->ops->enforce_cache_coherency)
+               domain->enforce_cache_coherency =
+                       domain->domain->ops->enforce_cache_coherency(
+                               domain->domain);
 
        /*
         * Try to match an existing compatible domain.  We don't want to
@@ -2279,7 +2286,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
         */
        list_for_each_entry(d, &iommu->domain_list, next) {
                if (d->domain->ops == domain->domain->ops &&
-                   d->prot == domain->prot) {
+                   d->enforce_cache_coherency ==
+                           domain->enforce_cache_coherency) {
                        iommu_detach_group(domain->domain, group->iommu_group);
                        if (!iommu_attach_group(d->domain,
                                                group->iommu_group)) {
@@ -2611,14 +2619,14 @@ static void vfio_iommu_type1_release(void *iommu_data)
        kfree(iommu);
 }
 
-static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
+static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
 {
        struct vfio_domain *domain;
        int ret = 1;
 
        mutex_lock(&iommu->lock);
        list_for_each_entry(domain, &iommu->domain_list, next) {
-               if (!(domain->prot & IOMMU_CACHE)) {
+               if (!(domain->enforce_cache_coherency)) {
                        ret = 0;
                        break;
                }
@@ -2641,7 +2649,7 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
        case VFIO_DMA_CC_IOMMU:
                if (!iommu)
                        return 0;
-               return vfio_domains_have_iommu_cache(iommu);
+               return vfio_domains_have_enforce_cache_coherency(iommu);
        default:
                return 0;
        }
index 5829cf2..ea61330 100644 (file)
@@ -126,6 +126,23 @@ void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
 EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
 
 /**
+ * vhost_iotlb_init - initialize a vhost IOTLB
+ * @iotlb: the IOTLB that needs to be initialized
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ */
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+                     unsigned int flags)
+{
+       iotlb->root = RB_ROOT_CACHED;
+       iotlb->limit = limit;
+       iotlb->nmaps = 0;
+       iotlb->flags = flags;
+       INIT_LIST_HEAD(&iotlb->list);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_init);
+
+/**
  * vhost_iotlb_alloc - add a new vhost IOTLB
  * @limit: maximum number of IOTLB entries
  * @flags: VHOST_IOTLB_FLAG_XXX
@@ -139,11 +156,7 @@ struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
        if (!iotlb)
                return NULL;
 
-       iotlb->root = RB_ROOT_CACHED;
-       iotlb->limit = limit;
-       iotlb->nmaps = 0;
-       iotlb->flags = flags;
-       INIT_LIST_HEAD(&iotlb->list);
+       vhost_iotlb_init(iotlb, limit, flags);
 
        return iotlb;
 }
index 297b5db..68e4ecd 100644 (file)
@@ -1374,16 +1374,9 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
        *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
 }
 
-static void vhost_net_flush_vq(struct vhost_net *n, int index)
-{
-       vhost_poll_flush(n->poll + index);
-       vhost_poll_flush(&n->vqs[index].vq.poll);
-}
-
 static void vhost_net_flush(struct vhost_net *n)
 {
-       vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
-       vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
+       vhost_dev_flush(&n->dev);
        if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
                mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
                n->tx_flush = true;
@@ -1572,7 +1565,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        }
 
        if (oldsock) {
-               vhost_net_flush_vq(n, index);
+               vhost_dev_flush(&n->dev);
                sockfd_put(oldsock);
        }
 
index 532e204..ffd9e6c 100644 (file)
@@ -1436,7 +1436,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
                kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
 
        /* Flush both the vhost poll and vhost work */
-       vhost_work_dev_flush(&vs->dev);
+       vhost_dev_flush(&vs->dev);
 
        /* Wait for all reqs issued before the flush to be finished */
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1827,8 +1827,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        vhost_scsi_clear_endpoint(vs, &t);
        vhost_dev_stop(&vs->dev);
        vhost_dev_cleanup(&vs->dev);
-       /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
-       vhost_scsi_flush(vs);
        kfree(vs->dev.vqs);
        kvfree(vs);
        return 0;
index 05740cb..bc8e7fb 100644 (file)
@@ -144,14 +144,9 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
        *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
 }
 
-static void vhost_test_flush_vq(struct vhost_test *n, int index)
-{
-       vhost_poll_flush(&n->vqs[index].poll);
-}
-
 static void vhost_test_flush(struct vhost_test *n)
 {
-       vhost_test_flush_vq(n, VHOST_TEST_VQ);
+       vhost_dev_flush(&n->dev);
 }
 
 static int vhost_test_release(struct inode *inode, struct file *f)
@@ -163,9 +158,6 @@ static int vhost_test_release(struct inode *inode, struct file *f)
        vhost_test_flush(n);
        vhost_dev_stop(&n->dev);
        vhost_dev_cleanup(&n->dev);
-       /* We do an extra flush before freeing memory,
-        * since jobs can re-queue themselves. */
-       vhost_test_flush(n);
        kfree(n->dev.vqs);
        kfree(n);
        return 0;
@@ -210,7 +202,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
                        goto err;
 
                if (oldpriv) {
-                       vhost_test_flush_vq(n, index);
+                       vhost_test_flush(n);
                }
        }
 
@@ -303,7 +295,7 @@ static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
        mutex_unlock(&vq->mutex);
 
        if (enable) {
-               vhost_test_flush_vq(n, index);
+               vhost_test_flush(n);
        }
 
        mutex_unlock(&n->dev.mutex);
index 4c2f0bd..935a1d0 100644 (file)
 enum {
        VHOST_VDPA_BACKEND_FEATURES =
        (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
-       (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
+       (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
+       (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
 };
 
 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
 
+#define VHOST_VDPA_IOTLB_BUCKETS 16
+
+struct vhost_vdpa_as {
+       struct hlist_node hash_link;
+       struct vhost_iotlb iotlb;
+       u32 id;
+};
+
 struct vhost_vdpa {
        struct vhost_dev vdev;
        struct iommu_domain *domain;
        struct vhost_virtqueue *vqs;
        struct completion completion;
        struct vdpa_device *vdpa;
+       struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
        struct device dev;
        struct cdev cdev;
        atomic_t opened;
@@ -48,12 +58,89 @@ struct vhost_vdpa {
        struct eventfd_ctx *config_ctx;
        int in_batch;
        struct vdpa_iova_range range;
+       u32 batch_asid;
 };
 
 static DEFINE_IDA(vhost_vdpa_ida);
 
 static dev_t vhost_vdpa_major;
 
+static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
+{
+       struct vhost_vdpa_as *as = container_of(iotlb, struct
+                                               vhost_vdpa_as, iotlb);
+       return as->id;
+}
+
+static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
+{
+       struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+       struct vhost_vdpa_as *as;
+
+       hlist_for_each_entry(as, head, hash_link)
+               if (as->id == asid)
+                       return as;
+
+       return NULL;
+}
+
+static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
+{
+       struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+       if (!as)
+               return NULL;
+
+       return &as->iotlb;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
+{
+       struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
+       struct vhost_vdpa_as *as;
+
+       if (asid_to_as(v, asid))
+               return NULL;
+
+       if (asid >= v->vdpa->nas)
+               return NULL;
+
+       as = kmalloc(sizeof(*as), GFP_KERNEL);
+       if (!as)
+               return NULL;
+
+       vhost_iotlb_init(&as->iotlb, 0, 0);
+       as->id = asid;
+       hlist_add_head(&as->hash_link, head);
+
+       return as;
+}
+
+static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
+                                                     u32 asid)
+{
+       struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+       if (as)
+               return as;
+
+       return vhost_vdpa_alloc_as(v, asid);
+}
+
+static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
+{
+       struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+       if (!as)
+               return -EINVAL;
+
+       hlist_del(&as->hash_link);
+       vhost_iotlb_reset(&as->iotlb);
+       kfree(as);
+
+       return 0;
+}
+
 static void handle_vq_kick(struct vhost_work *work)
 {
        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -411,6 +498,22 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                        return -EFAULT;
                ops->set_vq_ready(vdpa, idx, s.num);
                return 0;
+       case VHOST_VDPA_GET_VRING_GROUP:
+               s.index = idx;
+               s.num = ops->get_vq_group(vdpa, idx);
+               if (s.num >= vdpa->ngroups)
+                       return -EIO;
+               else if (copy_to_user(argp, &s, sizeof(s)))
+                       return -EFAULT;
+               return 0;
+       case VHOST_VDPA_SET_GROUP_ASID:
+               if (copy_from_user(&s, argp, sizeof(s)))
+                       return -EFAULT;
+               if (s.num >= vdpa->nas)
+                       return -EINVAL;
+               if (!ops->set_group_asid)
+                       return -EOPNOTSUPP;
+               return ops->set_group_asid(vdpa, idx, s.num);
        case VHOST_GET_VRING_BASE:
                r = ops->get_vq_state(v->vdpa, idx, &vq_state);
                if (r)
@@ -505,6 +608,15 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        case VHOST_VDPA_GET_VRING_NUM:
                r = vhost_vdpa_get_vring_num(v, argp);
                break;
+       case VHOST_VDPA_GET_GROUP_NUM:
+               if (copy_to_user(argp, &v->vdpa->ngroups,
+                                sizeof(v->vdpa->ngroups)))
+                       r = -EFAULT;
+               break;
+       case VHOST_VDPA_GET_AS_NUM:
+               if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
+                       r = -EFAULT;
+               break;
        case VHOST_SET_LOG_BASE:
        case VHOST_SET_LOG_FD:
                r = -ENOIOCTLCMD;
@@ -537,10 +649,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        return r;
 }
 
-static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
+                               struct vhost_iotlb *iotlb,
+                               u64 start, u64 last)
 {
        struct vhost_dev *dev = &v->vdev;
-       struct vhost_iotlb *iotlb = dev->iotlb;
        struct vhost_iotlb_map *map;
        struct page *page;
        unsigned long pfn, pinned;
@@ -559,10 +672,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
        }
 }
 
-static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
+                               struct vhost_iotlb *iotlb,
+                               u64 start, u64 last)
 {
-       struct vhost_dev *dev = &v->vdev;
-       struct vhost_iotlb *iotlb = dev->iotlb;
        struct vhost_iotlb_map *map;
        struct vdpa_map_file *map_file;
 
@@ -574,23 +687,16 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
        }
 }
 
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+                                  struct vhost_iotlb *iotlb,
+                                  u64 start, u64 last)
 {
        struct vdpa_device *vdpa = v->vdpa;
 
        if (vdpa->use_va)
-               return vhost_vdpa_va_unmap(v, start, last);
-
-       return vhost_vdpa_pa_unmap(v, start, last);
-}
-
-static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
-{
-       struct vhost_dev *dev = &v->vdev;
+               return vhost_vdpa_va_unmap(v, iotlb, start, last);
 
-       vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
-       kfree(dev->iotlb);
-       dev->iotlb = NULL;
+       return vhost_vdpa_pa_unmap(v, iotlb, start, last);
 }
 
 static int perm_to_iommu_flags(u32 perm)
@@ -615,30 +721,31 @@ static int perm_to_iommu_flags(u32 perm)
        return flags | IOMMU_CACHE;
 }
 
-static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
-                         u64 size, u64 pa, u32 perm, void *opaque)
+static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+                         u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
 {
        struct vhost_dev *dev = &v->vdev;
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
+       u32 asid = iotlb_to_asid(iotlb);
        int r = 0;
 
-       r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
+       r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
                                      pa, perm, opaque);
        if (r)
                return r;
 
        if (ops->dma_map) {
-               r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
+               r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
        } else if (ops->set_map) {
                if (!v->in_batch)
-                       r = ops->set_map(vdpa, dev->iotlb);
+                       r = ops->set_map(vdpa, asid, iotlb);
        } else {
                r = iommu_map(v->domain, iova, pa, size,
                              perm_to_iommu_flags(perm));
        }
        if (r) {
-               vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
+               vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
                return r;
        }
 
@@ -648,25 +755,34 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
        return 0;
 }
 
-static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
+static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+                            struct vhost_iotlb *iotlb,
+                            u64 iova, u64 size)
 {
-       struct vhost_dev *dev = &v->vdev;
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
+       u32 asid = iotlb_to_asid(iotlb);
 
-       vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
+       vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
 
        if (ops->dma_map) {
-               ops->dma_unmap(vdpa, iova, size);
+               ops->dma_unmap(vdpa, asid, iova, size);
        } else if (ops->set_map) {
                if (!v->in_batch)
-                       ops->set_map(vdpa, dev->iotlb);
+                       ops->set_map(vdpa, asid, iotlb);
        } else {
                iommu_unmap(v->domain, iova, size);
        }
+
+       /* If we are in the middle of batch processing, delay the free
+        * of AS until BATCH_END.
+        */
+       if (!v->in_batch && !iotlb->nmaps)
+               vhost_vdpa_remove_as(v, asid);
 }
 
 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+                            struct vhost_iotlb *iotlb,
                             u64 iova, u64 size, u64 uaddr, u32 perm)
 {
        struct vhost_dev *dev = &v->vdev;
@@ -696,7 +812,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
                offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
                map_file->offset = offset;
                map_file->file = get_file(vma->vm_file);
-               ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
+               ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
                                     perm, map_file);
                if (ret) {
                        fput(map_file->file);
@@ -709,7 +825,7 @@ next:
                map_iova += map_size;
        }
        if (ret)
-               vhost_vdpa_unmap(v, iova, map_iova - iova);
+               vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
 
        mmap_read_unlock(dev->mm);
 
@@ -717,6 +833,7 @@ next:
 }
 
 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+                            struct vhost_iotlb *iotlb,
                             u64 iova, u64 size, u64 uaddr, u32 perm)
 {
        struct vhost_dev *dev = &v->vdev;
@@ -780,7 +897,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
                        if (last_pfn && (this_pfn != last_pfn + 1)) {
                                /* Pin a contiguous chunk of memory */
                                csize = PFN_PHYS(last_pfn - map_pfn + 1);
-                               ret = vhost_vdpa_map(v, iova, csize,
+                               ret = vhost_vdpa_map(v, iotlb, iova, csize,
                                                     PFN_PHYS(map_pfn),
                                                     perm, NULL);
                                if (ret) {
@@ -810,7 +927,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
        }
 
        /* Pin the rest chunk */
-       ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+       ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
                             PFN_PHYS(map_pfn), perm, NULL);
 out:
        if (ret) {
@@ -830,7 +947,7 @@ out:
                        for (pfn = map_pfn; pfn <= last_pfn; pfn++)
                                unpin_user_page(pfn_to_page(pfn));
                }
-               vhost_vdpa_unmap(v, start, size);
+               vhost_vdpa_unmap(v, iotlb, start, size);
        }
 unlock:
        mmap_read_unlock(dev->mm);
@@ -841,11 +958,10 @@ free:
 }
 
 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+                                          struct vhost_iotlb *iotlb,
                                           struct vhost_iotlb_msg *msg)
 {
-       struct vhost_dev *dev = &v->vdev;
        struct vdpa_device *vdpa = v->vdpa;
-       struct vhost_iotlb *iotlb = dev->iotlb;
 
        if (msg->iova < v->range.first || !msg->size ||
            msg->iova > U64_MAX - msg->size + 1 ||
@@ -857,19 +973,21 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
                return -EEXIST;
 
        if (vdpa->use_va)
-               return vhost_vdpa_va_map(v, msg->iova, msg->size,
+               return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
                                         msg->uaddr, msg->perm);
 
-       return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+       return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
                                 msg->perm);
 }
 
-static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
                                        struct vhost_iotlb_msg *msg)
 {
        struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
+       struct vhost_iotlb *iotlb = NULL;
+       struct vhost_vdpa_as *as = NULL;
        int r = 0;
 
        mutex_lock(&dev->mutex);
@@ -878,20 +996,47 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
        if (r)
                goto unlock;
 
+       if (msg->type == VHOST_IOTLB_UPDATE ||
+           msg->type == VHOST_IOTLB_BATCH_BEGIN) {
+               as = vhost_vdpa_find_alloc_as(v, asid);
+               if (!as) {
+                       dev_err(&v->dev, "can't find and alloc asid %d\n",
+                               asid);
+                       r = -EINVAL;
+                       goto unlock;
+               }
+               iotlb = &as->iotlb;
+       } else
+               iotlb = asid_to_iotlb(v, asid);
+
+       if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
+               if (v->in_batch && v->batch_asid != asid) {
+                       dev_info(&v->dev, "batch id %d asid %d\n",
+                                v->batch_asid, asid);
+               }
+               if (!iotlb)
+                       dev_err(&v->dev, "no iotlb for asid %d\n", asid);
+               r = -EINVAL;
+               goto unlock;
+       }
+
        switch (msg->type) {
        case VHOST_IOTLB_UPDATE:
-               r = vhost_vdpa_process_iotlb_update(v, msg);
+               r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
                break;
        case VHOST_IOTLB_INVALIDATE:
-               vhost_vdpa_unmap(v, msg->iova, msg->size);
+               vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
                break;
        case VHOST_IOTLB_BATCH_BEGIN:
+               v->batch_asid = asid;
                v->in_batch = true;
                break;
        case VHOST_IOTLB_BATCH_END:
                if (v->in_batch && ops->set_map)
-                       ops->set_map(vdpa, dev->iotlb);
+                       ops->set_map(vdpa, asid, iotlb);
                v->in_batch = false;
+               if (!iotlb->nmaps)
+                       vhost_vdpa_remove_as(v, asid);
                break;
        default:
                r = -EINVAL;
@@ -977,6 +1122,21 @@ static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
        }
 }
 
+static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+{
+       struct vhost_vdpa_as *as;
+       u32 asid;
+
+       vhost_dev_cleanup(&v->vdev);
+       kfree(v->vdev.vqs);
+
+       for (asid = 0; asid < v->vdpa->nas; asid++) {
+               as = asid_to_as(v, asid);
+               if (as)
+                       vhost_vdpa_remove_as(v, asid);
+       }
+}
+
 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
 {
        struct vhost_vdpa *v;
@@ -1010,15 +1170,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
        vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
                       vhost_vdpa_process_iotlb_msg);
 
-       dev->iotlb = vhost_iotlb_alloc(0, 0);
-       if (!dev->iotlb) {
-               r = -ENOMEM;
-               goto err_init_iotlb;
-       }
-
        r = vhost_vdpa_alloc_domain(v);
        if (r)
-               goto err_init_iotlb;
+               goto err_alloc_domain;
 
        vhost_vdpa_set_iova_range(v);
 
@@ -1026,9 +1180,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
 
        return 0;
 
-err_init_iotlb:
-       vhost_dev_cleanup(&v->vdev);
-       kfree(vqs);
+err_alloc_domain:
+       vhost_vdpa_cleanup(v);
 err:
        atomic_dec(&v->opened);
        return r;
@@ -1052,11 +1205,9 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
        vhost_vdpa_clean_irq(v);
        vhost_vdpa_reset(v);
        vhost_dev_stop(&v->vdev);
-       vhost_vdpa_iotlb_free(v);
        vhost_vdpa_free_domain(v);
        vhost_vdpa_config_put(v);
        vhost_dev_cleanup(&v->vdev);
-       kfree(v->vdev.vqs);
        mutex_unlock(&d->mutex);
 
        atomic_dec(&v->opened);
@@ -1152,7 +1303,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
        const struct vdpa_config_ops *ops = vdpa->config;
        struct vhost_vdpa *v;
        int minor;
-       int r;
+       int i, r;
+
+       /* We can't support platform IOMMU device with more than 1
+        * group or as
+        */
+       if (!ops->set_map && !ops->dma_map &&
+           (vdpa->ngroups > 1 || vdpa->nas > 1))
+               return -EOPNOTSUPP;
 
        v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!v)
@@ -1196,6 +1354,9 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
        init_completion(&v->completion);
        vdpa_set_drvdata(vdpa, v);
 
+       for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
+               INIT_HLIST_HEAD(&v->as[i]);
+
        return 0;
 
 err:
index d02173f..4009782 100644 (file)
@@ -231,7 +231,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
 }
 EXPORT_SYMBOL_GPL(vhost_poll_stop);
 
-void vhost_work_dev_flush(struct vhost_dev *dev)
+void vhost_dev_flush(struct vhost_dev *dev)
 {
        struct vhost_flush_struct flush;
 
@@ -243,15 +243,7 @@ void vhost_work_dev_flush(struct vhost_dev *dev)
                wait_for_completion(&flush.wait_event);
        }
 }
-EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
-
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
-{
-       vhost_work_dev_flush(poll->dev);
-}
-EXPORT_SYMBOL_GPL(vhost_poll_flush);
+EXPORT_SYMBOL_GPL(vhost_dev_flush);
 
 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 {
@@ -468,7 +460,7 @@ void vhost_dev_init(struct vhost_dev *dev,
                    struct vhost_virtqueue **vqs, int nvqs,
                    int iov_limit, int weight, int byte_weight,
                    bool use_worker,
-                   int (*msg_handler)(struct vhost_dev *dev,
+                   int (*msg_handler)(struct vhost_dev *dev, u32 asid,
                                       struct vhost_iotlb_msg *msg))
 {
        struct vhost_virtqueue *vq;
@@ -538,7 +530,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
        attach.owner = current;
        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
        vhost_work_queue(dev, &attach.work);
-       vhost_work_dev_flush(dev);
+       vhost_dev_flush(dev);
        return attach.ret;
 }
 
@@ -661,11 +653,11 @@ void vhost_dev_stop(struct vhost_dev *dev)
        int i;
 
        for (i = 0; i < dev->nvqs; ++i) {
-               if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
+               if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
                        vhost_poll_stop(&dev->vqs[i]->poll);
-                       vhost_poll_flush(&dev->vqs[i]->poll);
-               }
        }
+
+       vhost_dev_flush(dev);
 }
 EXPORT_SYMBOL_GPL(vhost_dev_stop);
 
@@ -1090,11 +1082,14 @@ static bool umem_access_ok(u64 uaddr, u64 size, int access)
        return true;
 }
 
-static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
                                   struct vhost_iotlb_msg *msg)
 {
        int ret = 0;
 
+       if (asid != 0)
+               return -EINVAL;
+
        mutex_lock(&dev->mutex);
        vhost_dev_lock_vqs(dev);
        switch (msg->type) {
@@ -1141,6 +1136,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
        struct vhost_iotlb_msg msg;
        size_t offset;
        int type, ret;
+       u32 asid = 0;
 
        ret = copy_from_iter(&type, sizeof(type), from);
        if (ret != sizeof(type)) {
@@ -1156,7 +1152,16 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
                offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
                break;
        case VHOST_IOTLB_MSG_V2:
-               offset = sizeof(__u32);
+               if (vhost_backend_has_feature(dev->vqs[0],
+                                             VHOST_BACKEND_F_IOTLB_ASID)) {
+                       ret = copy_from_iter(&asid, sizeof(asid), from);
+                       if (ret != sizeof(asid)) {
+                               ret = -EINVAL;
+                               goto done;
+                       }
+                       offset = 0;
+               } else
+                       offset = sizeof(__u32);
                break;
        default:
                ret = -EINVAL;
@@ -1178,9 +1183,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
        }
 
        if (dev->msg_handler)
-               ret = dev->msg_handler(dev, &msg);
+               ret = dev->msg_handler(dev, asid, &msg);
        else
-               ret = vhost_process_iotlb_msg(dev, &msg);
+               ret = vhost_process_iotlb_msg(dev, asid, &msg);
        if (ret) {
                ret = -EFAULT;
                goto done;
@@ -1719,7 +1724,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
        mutex_unlock(&vq->mutex);
 
        if (pollstop && vq->handle_kick)
-               vhost_poll_flush(&vq->poll);
+               vhost_dev_flush(vq->poll.dev);
        return r;
 }
 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
index 638bb64..d910910 100644 (file)
@@ -44,9 +44,8 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     __poll_t mask, struct vhost_dev *dev);
 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
-void vhost_poll_flush(struct vhost_poll *poll);
 void vhost_poll_queue(struct vhost_poll *poll);
-void vhost_work_dev_flush(struct vhost_dev *dev);
+void vhost_dev_flush(struct vhost_dev *dev);
 
 struct vhost_log {
        u64 addr;
@@ -161,7 +160,7 @@ struct vhost_dev {
        int byte_weight;
        u64 kcov_handle;
        bool use_worker;
-       int (*msg_handler)(struct vhost_dev *dev,
+       int (*msg_handler)(struct vhost_dev *dev, u32 asid,
                           struct vhost_iotlb_msg *msg);
 };
 
@@ -169,7 +168,7 @@ bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
                    int nvqs, int iov_limit, int weight, int byte_weight,
                    bool use_worker,
-                   int (*msg_handler)(struct vhost_dev *dev,
+                   int (*msg_handler)(struct vhost_dev *dev, u32 asid,
                                       struct vhost_iotlb_msg *msg));
 long vhost_dev_set_owner(struct vhost_dev *dev);
 bool vhost_dev_has_owner(struct vhost_dev *dev);
index e6c9d41..3683304 100644 (file)
@@ -705,12 +705,7 @@ out:
 
 static void vhost_vsock_flush(struct vhost_vsock *vsock)
 {
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
-               if (vsock->vqs[i].handle_kick)
-                       vhost_poll_flush(&vsock->vqs[i].poll);
-       vhost_work_dev_flush(&vsock->dev);
+       vhost_dev_flush(&vsock->dev);
 }
 
 static void vhost_vsock_reset_orphans(struct sock *sk)
index 40496e9..f304163 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/slab.h>
 #include <linux/font.h>
 #include <linux/crc32.h>
+#include <linux/fb.h>
 
 #include <asm/io.h>
 
@@ -392,7 +393,9 @@ static int __init sticonsole_init(void)
     for (i = 0; i < MAX_NR_CONSOLES; i++)
        font_data[i] = STI_DEF_FONT;
 
-    pr_info("sticon: Initializing STI text console.\n");
+    pr_info("sticon: Initializing STI text console on %s at [%s]\n",
+       sticon_sti->sti_data->inq_outptr.dev_name,
+       sticon_sti->pa_path);
     console_lock();
     err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1,
                PAGE0->mem_cons.cl_class != CL_DUPLEX);
index f869b72..fa23bf0 100644 (file)
 #include <asm/pdc.h>
 #include <asm/cacheflush.h>
 #include <asm/grfioctl.h>
+#include <asm/fb.h>
 
 #include "../fbdev/sticore.h"
 
-#define STI_DRIVERVERSION "Version 0.9b"
+#define STI_DRIVERVERSION "Version 0.9c"
 
 static struct sti_struct *default_sti __read_mostly;
 
@@ -502,7 +503,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
        if (!fbfont)
                return NULL;
 
-       pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
+       pr_info("    using %ux%u framebuffer font %s\n",
                        fbfont->width, fbfont->height, fbfont->name);
                        
        bpc = ((fbfont->width+7)/8) * fbfont->height; 
@@ -549,6 +550,26 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
 }
 #endif
 
+static void sti_dump_font(struct sti_cooked_font *font)
+{
+#ifdef STI_DUMP_FONT
+       unsigned char *p = (unsigned char *)font->raw;
+       int n;
+
+       p += sizeof(struct sti_rom_font);
+       pr_debug("  w %d h %d bpc %d\n", font->width, font->height,
+                                       font->raw->bytes_per_char);
+
+       for (n = 0; n < 256 * font->raw->bytes_per_char; n += 16, p += 16) {
+               pr_debug("        0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x,"
+                       " 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x,"
+                       " 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n",
+                       p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8],
+                       p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+       }
+#endif
+}
+
 static int sti_search_font(struct sti_cooked_rom *rom, int height, int width)
 {
        struct sti_cooked_font *font;
@@ -796,6 +817,7 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
        sti->font->width = sti->font->raw->width;
        sti->font->height = sti->font->raw->height;
        sti_font_convert_bytemode(sti, sti->font);
+       sti_dump_font(sti->font);
 
        sti->sti_mem_request = raw->sti_mem_req;
        sti->graphics_id[0] = raw->graphics_id[0];
@@ -946,6 +968,7 @@ out_err:
 
 static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
 {
+       pr_info("    located at [%s]\n", sti->pa_path);
        if (strcmp (path, default_sti_path) == 0)
                default_sti = sti;
 }
@@ -957,7 +980,6 @@ static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
  */
 static int __init sticore_pa_init(struct parisc_device *dev)
 {
-       char pa_path[21];
        struct sti_struct *sti = NULL;
        int hpa = dev->hpa.start;
 
@@ -970,8 +992,8 @@ static int __init sticore_pa_init(struct parisc_device *dev)
        if (!sti)
                return 1;
 
-       print_pa_hwpath(dev, pa_path);
-       sticore_check_for_default_sti(sti, pa_path);
+       print_pa_hwpath(dev, sti->pa_path);
+       sticore_check_for_default_sti(sti, sti->pa_path);
        return 0;
 }
 
@@ -1007,9 +1029,8 @@ static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
 
        sti = sti_try_rom_generic(rom_base, fb_base, pd);
        if (sti) {
-               char pa_path[30];
-               print_pci_hwpath(pd, pa_path);
-               sticore_check_for_default_sti(sti, pa_path);
+               print_pci_hwpath(pd, sti->pa_path);
+               sticore_check_for_default_sti(sti, sti->pa_path);
        }
        
        if (!sti) {
@@ -1127,6 +1148,22 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
        return ret;
 }
 
+/* check if given fb_info is the primary device */
+int fb_is_primary_device(struct fb_info *info)
+{
+       struct sti_struct *sti;
+
+       sti = sti_get_rom(0);
+
+       /* if no built-in graphics card found, allow any fb driver as default */
+       if (!sti)
+               return true;
+
+       /* return true if it's the default built-in framebuffer driver */
+       return (sti->info == info);
+}
+EXPORT_SYMBOL(fb_is_primary_device);
+
 MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
 MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
 MODULE_LICENSE("GPL v2");
index 9ec969e..8080116 100644 (file)
@@ -758,12 +758,15 @@ static int clcdfb_of_vram_setup(struct clcd_fb *fb)
                return -ENODEV;
 
        fb->fb.screen_base = of_iomap(memory, 0);
-       if (!fb->fb.screen_base)
+       if (!fb->fb.screen_base) {
+               of_node_put(memory);
                return -ENOMEM;
+       }
 
        fb->fb.fix.smem_start = of_translate_address(memory,
                        of_get_address(memory, 0, &size, NULL));
        fb->fb.fix.smem_len = size;
+       of_node_put(memory);
 
        return 0;
 }
index 18dc2fc..886c564 100644 (file)
@@ -992,7 +992,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
        struct pci_dev *pdev  = NULL;
        void __iomem *fb_virt;
        int gen2vm = efi_enabled(EFI_BOOT);
-       resource_size_t pot_start, pot_end;
        phys_addr_t paddr;
        int ret;
 
@@ -1043,23 +1042,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
        dio_fb_size =
                screen_width * screen_height * screen_depth / 8;
 
-       if (gen2vm) {
-               pot_start = 0;
-               pot_end = -1;
-       } else {
-               if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-                   pci_resource_len(pdev, 0) < screen_fb_size) {
-                       pr_err("Resource not available or (0x%lx < 0x%lx)\n",
-                              (unsigned long) pci_resource_len(pdev, 0),
-                              (unsigned long) screen_fb_size);
-                       goto err1;
-               }
-
-               pot_end = pci_resource_end(pdev, 0);
-               pot_start = pot_end - screen_fb_size + 1;
-       }
-
-       ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end,
+       ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1,
                                  screen_fb_size, 0x100000, true);
        if (ret != 0) {
                pr_err("Unable to allocate framebuffer memory\n");
index 313a051..beb841c 100644 (file)
@@ -231,5 +231,9 @@ extern int  omapfb_update_window_async(struct fb_info *fbi,
                                       struct omapfb_update_window *win,
                                       void (*callback)(void *),
                                       void *callback_data);
+extern int  hwa742_update_window_async(struct fb_info *fbi,
+                                      struct omapfb_update_window *win,
+                                      void (*callback)(void *),
+                                      void *callback_data);
 
 #endif /* __OMAPFB_H */
index be9910f..b407173 100644 (file)
@@ -117,16 +117,11 @@ static int nec_8048_connect(struct omap_dss_device *dssdev)
 {
        struct panel_drv_data *ddata = to_panel_data(dssdev);
        struct omap_dss_device *in = ddata->in;
-       int r;
 
        if (omapdss_device_is_connected(dssdev))
                return 0;
 
-       r = in->ops.dpi->connect(in, dssdev);
-       if (r)
-               return r;
-
-       return 0;
+       return in->ops.dpi->connect(in, dssdev);
 }
 
 static void nec_8048_disconnect(struct omap_dss_device *dssdev)
index c5f8912..531b36d 100644 (file)
@@ -173,7 +173,6 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
 {
        struct dss_pll *pll = &hpll->pll;
        struct clk *clk;
-       int r;
 
        clk = devm_clk_get(&pdev->dev, "sys_clk");
        if (IS_ERR(clk)) {
@@ -203,12 +202,7 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
        }
 
        pll->ops = &dsi_pll_ops;
-
-       r = dss_pll_register(pll);
-       if (r)
-               return r;
-
-       return 0;
+       return dss_pll_register(pll);
 }
 
 int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
index 350b313..043cc8f 100644 (file)
@@ -646,6 +646,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
        for (i = 0; i < 8; i++) {
                ret = pxa3xx_gcu_add_buffer(dev, priv);
                if (ret) {
+                       pxa3xx_gcu_free_buffers(dev, priv);
                        dev_err(dev, "failed to allocate DMA memory\n");
                        goto err_disable_clk;
                }
@@ -662,15 +663,15 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
                        SHARED_SIZE, irq);
        return 0;
 
-err_free_dma:
-       dma_free_coherent(dev, SHARED_SIZE,
-                       priv->shared, priv->shared_phys);
+err_disable_clk:
+       clk_disable_unprepare(priv->clk);
 
 err_misc_deregister:
        misc_deregister(&priv->misc_dev);
 
-err_disable_clk:
-       clk_disable_unprepare(priv->clk);
+err_free_dma:
+       dma_free_coherent(dev, SHARED_SIZE,
+                         priv->shared, priv->shared_phys);
 
        return ret;
 }
@@ -683,6 +684,7 @@ static int pxa3xx_gcu_remove(struct platform_device *pdev)
        pxa3xx_gcu_wait_idle(priv);
        misc_deregister(&priv->misc_dev);
        dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
+       clk_disable_unprepare(priv->clk);
        pxa3xx_gcu_free_buffers(dev, priv);
 
        return 0;
similarity index 90%
rename from arch/arm/mach-pxa/include/mach/regs-lcd.h
rename to drivers/video/fbdev/pxa3xx-regs.h
index e2b6e3d..6a96610 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_ARCH_REGS_LCD_H
 #define __ASM_ARCH_REGS_LCD_H
 
-#include <mach/bitfield.h>
-
 /*
  * LCD Controller Registers and Bits Definitions
  */
@@ -86,6 +84,9 @@
 #define LCCR0_OUC      (1 << 25)       /* Overlay Underlay control bit */
 #define LCCR0_LDDALT   (1 << 26)       /* LDD alternate mapping control */
 
+#define Fld(Size, Shft)        (((Size) << 16) + (Shft))
+#define FShft(Field)   ((Field) & 0x0000FFFF)
+
 #define LCCR1_PPL      Fld (10, 0)     /* Pixels Per Line - 1 */
 #define LCCR1_DisWdth(Pixel)   (((Pixel) - 1) << FShft (LCCR1_PPL))
 
 #define PRSR_ST_OK     (1 << 9)        /* Status OK */
 #define PRSR_CON_NT    (1 << 10)       /* Continue to Next Command */
 
-#define SMART_CMD_A0                    (0x1 << 8)
-#define SMART_CMD_READ_STATUS_REG       (0x0 << 9)
-#define SMART_CMD_READ_FRAME_BUFFER    ((0x0 << 9) | SMART_CMD_A0)
-#define SMART_CMD_WRITE_COMMAND                 (0x1 << 9)
-#define SMART_CMD_WRITE_DATA           ((0x1 << 9) | SMART_CMD_A0)
-#define SMART_CMD_WRITE_FRAME          ((0x2 << 9) | SMART_CMD_A0)
-#define SMART_CMD_WAIT_FOR_VSYNC        (0x3 << 9)
-#define SMART_CMD_NOOP                  (0x4 << 9)
-#define SMART_CMD_INTERRUPT             (0x5 << 9)
-
-#define SMART_CMD(x)   (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
-#define SMART_DAT(x)   (SMART_CMD_WRITE_DATA | ((x) & 0xff))
-
-/* SMART_DELAY() is introduced for software controlled delay primitive which
- * can be inserted between command sequences, unused command 0x6 is used here
- * and delay ranges from 0ms ~ 255ms
- */
-#define SMART_CMD_DELAY                (0x6 << 9)
-#define SMART_DELAY(ms)                (SMART_CMD_DELAY | ((ms) & 0xff))
 #endif /* __ASM_ARCH_REGS_LCD_H */
index 8ad91c2..66cfc3e 100644 (file)
 #include <linux/console.h>
 #include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
+#include <linux/soc/pxa/cpu.h>
 #include <video/of_display_timing.h>
 #include <video/videomode.h>
 
-#include <mach/hardware.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/div64.h>
-#include <mach/bitfield.h>
 #include <linux/platform_data/video-pxafb.h>
 
 /*
@@ -73,6 +72,7 @@
 #define DEBUG_VAR 1
 
 #include "pxafb.h"
+#include "pxa3xx-regs.h"
 
 /* Bits which should not be set in machine configuration structures */
 #define LCCR0_INVALID_CONFIG_MASK      (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\
index c338f78..0ebdd28 100644 (file)
@@ -370,6 +370,9 @@ struct sti_struct {
 
        /* pointer to all internal data */
        struct sti_all_data *sti_data;
+
+       /* pa_path of this device */
+       char pa_path[24];
 };
 
 
index bebb2ee..38a861e 100644 (file)
@@ -1358,11 +1358,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
                goto out_err3;
        }
 
+       /* save for primary gfx device detection & unregister_framebuffer() */
+       sti->info = info;
        if (register_framebuffer(&fb->info) < 0)
                goto out_err4;
 
-       sti->info = info; /* save for unregister_framebuffer() */
-
        fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
                fix->id,
                var->xres, 
index e25e8de..929d477 100644 (file)
@@ -490,11 +490,12 @@ static int vesafb_remove(struct platform_device *pdev)
 {
        struct fb_info *info = platform_get_drvdata(pdev);
 
-       /* vesafb_destroy takes care of info cleanup */
-       unregister_framebuffer(info);
        if (((struct vesafb_par *)(info->par))->region)
                release_region(0x3c0, 32);
 
+       /* vesafb_destroy takes care of info cleanup */
+       unregister_framebuffer(info);
+
        return 0;
 }
 
index 3bed357..4d2694d 100644 (file)
@@ -223,7 +223,6 @@ static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
        red = CNVT_TOHW(red, info->var.red.length);
        green = CNVT_TOHW(green, info->var.green.length);
        blue = CNVT_TOHW(blue, info->var.blue.length);
-       transp = CNVT_TOHW(transp, info->var.transp.length);
 #undef CNVT_TOHW
 
        v = (red << info->var.red.offset) |
index e49bec8..0703524 100644 (file)
@@ -659,7 +659,6 @@ static int fsl_hv_open(struct inode *inode, struct file *filp)
 {
        struct doorbell_queue *dbq;
        unsigned long flags;
-       int ret = 0;
 
        dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL);
        if (!dbq) {
@@ -676,7 +675,7 @@ static int fsl_hv_open(struct inode *inode, struct file *filp)
 
        filp->private_data = dbq;
 
-       return ret;
+       return 0;
 }
 
 /*
index 22f15f4..ef04a96 100644 (file)
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(virtio_add_status);
 /* Do some validation, then set FEATURES_OK */
 static int virtio_features_ok(struct virtio_device *dev)
 {
-       unsigned status;
+       unsigned int status;
        int ret;
 
        might_sleep();
@@ -220,6 +220,15 @@ static int virtio_features_ok(struct virtio_device *dev)
  * */
 void virtio_reset_device(struct virtio_device *dev)
 {
+       /*
+        * The below virtio_synchronize_cbs() guarantees that any
+        * interrupt for this line arriving after
+        * virtio_synchronize_vqs() has completed is guaranteed to see
+        * vq->broken as true.
+        */
+       virtio_break_device(dev);
+       virtio_synchronize_cbs(dev);
+
        dev->config->reset(dev);
 }
 EXPORT_SYMBOL_GPL(virtio_reset_device);
@@ -413,7 +422,7 @@ int register_virtio_device(struct virtio_device *dev)
        device_initialize(&dev->dev);
 
        /* Assign a unique device index and hence name. */
-       err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL);
+       err = ida_alloc(&virtio_index_ida, GFP_KERNEL);
        if (err < 0)
                goto out;
 
@@ -428,16 +437,16 @@ int register_virtio_device(struct virtio_device *dev)
        dev->config_enabled = false;
        dev->config_change_pending = false;
 
+       INIT_LIST_HEAD(&dev->vqs);
+       spin_lock_init(&dev->vqs_list_lock);
+
        /* We always start by resetting the device, in case a previous
         * driver messed it up.  This also tests that code path a little. */
-       dev->config->reset(dev);
+       virtio_reset_device(dev);
 
        /* Acknowledge that we've seen the device. */
        virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 
-       INIT_LIST_HEAD(&dev->vqs);
-       spin_lock_init(&dev->vqs_list_lock);
-
        /*
         * device_add() causes the bus infrastructure to look for a matching
         * driver.
@@ -451,7 +460,7 @@ int register_virtio_device(struct virtio_device *dev)
 out_of_node_put:
        of_node_put(dev->dev.of_node);
 out_ida_remove:
-       ida_simple_remove(&virtio_index_ida, dev->index);
+       ida_free(&virtio_index_ida, dev->index);
 out:
        virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
        return err;
@@ -469,7 +478,7 @@ void unregister_virtio_device(struct virtio_device *dev)
        int index = dev->index; /* save for after device release */
 
        device_unregister(&dev->dev);
-       ida_simple_remove(&virtio_index_ida, index);
+       ida_free(&virtio_index_ida, index);
 }
 EXPORT_SYMBOL_GPL(unregister_virtio_device);
 
@@ -496,7 +505,7 @@ int virtio_device_restore(struct virtio_device *dev)
 
        /* We always start by resetting the device, in case a previous
         * driver messed it up. */
-       dev->config->reset(dev);
+       virtio_reset_device(dev);
 
        /* Acknowledge that we've seen the device. */
        virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
@@ -526,8 +535,9 @@ int virtio_device_restore(struct virtio_device *dev)
                        goto err;
        }
 
-       /* Finally, tell the device we're all set */
-       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+       /* If restore didn't do it, mark device DRIVER_OK ourselves. */
+       if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
+               virtio_device_ready(dev);
 
        virtio_config_enable(dev);
 
index f4c34a2..b9737da 100644 (file)
@@ -27,7 +27,7 @@
  * multiple balloon pages.  All memory counters in this driver are in balloon
  * page units.
  */
-#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
+#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
 /* Maximum number of (4k) pages to deflate on OOM notifications. */
 #define VIRTIO_BALLOON_OOM_NR_PAGES 256
@@ -208,10 +208,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
                                          page_to_balloon_pfn(page) + i);
 }
 
-static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num)
 {
-       unsigned num_allocated_pages;
-       unsigned num_pfns;
+       unsigned int num_allocated_pages;
+       unsigned int num_pfns;
        struct page *page;
        LIST_HEAD(pages);
 
@@ -272,9 +272,9 @@ static void release_pages_balloon(struct virtio_balloon *vb,
        }
 }
 
-static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num)
 {
-       unsigned num_freed_pages;
+       unsigned int num_freed_pages;
        struct page *page;
        struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
        LIST_HEAD(pages);
index 56128b9..f9a36bc 100644 (file)
@@ -144,8 +144,8 @@ static int vm_finalize_features(struct virtio_device *vdev)
        return 0;
 }
 
-static void vm_get(struct virtio_device *vdev, unsigned offset,
-                  void *buf, unsigned len)
+static void vm_get(struct virtio_device *vdev, unsigned int offset,
+                  void *buf, unsigned int len)
 {
        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
        void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
@@ -186,8 +186,8 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
        }
 }
 
-static void vm_set(struct virtio_device *vdev, unsigned offset,
-                  const void *buf, unsigned len)
+static void vm_set(struct virtio_device *vdev, unsigned int offset,
+                  const void *buf, unsigned int len)
 {
        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
        void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
@@ -253,6 +253,11 @@ static void vm_set_status(struct virtio_device *vdev, u8 status)
        /* We should never be setting status to 0. */
        BUG_ON(status == 0);
 
+       /*
+        * Per memory-barriers.txt, wmb() is not needed to guarantee
+        * that the the cache coherent memory writes have completed
+        * before writing to the MMIO region.
+        */
        writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
 }
 
@@ -345,7 +350,14 @@ static void vm_del_vqs(struct virtio_device *vdev)
        free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
 }
 
-static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
+static void vm_synchronize_cbs(struct virtio_device *vdev)
+{
+       struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+       synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
+}
+
+static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name, bool ctx)
 {
@@ -455,7 +467,7 @@ error_available:
        return ERR_PTR(err);
 }
 
-static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                       struct virtqueue *vqs[],
                       vq_callback_t *callbacks[],
                       const char * const names[],
@@ -541,6 +553,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
        .finalize_features = vm_finalize_features,
        .bus_name       = vm_bus_name,
        .get_shm_region = vm_get_shm_region,
+       .synchronize_cbs = vm_synchronize_cbs,
 };
 
 
@@ -657,7 +670,7 @@ static int vm_cmdline_set(const char *device,
        int err;
        struct resource resources[2] = {};
        char *str;
-       long long int base, size;
+       long long base, size;
        unsigned int irq;
        int processed, consumed = 0;
        struct platform_device *pdev;
index d724f67..ca51fcc 100644 (file)
@@ -104,8 +104,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        const char *name = dev_name(&vp_dev->vdev.dev);
-       unsigned flags = PCI_IRQ_MSIX;
-       unsigned i, v;
+       unsigned int flags = PCI_IRQ_MSIX;
+       unsigned int i, v;
        int err = -ENOMEM;
 
        vp_dev->msix_vectors = nvectors;
@@ -171,7 +171,7 @@ error:
        return err;
 }
 
-static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
                                     void (*callback)(struct virtqueue *vq),
                                     const char *name,
                                     bool ctx,
@@ -254,8 +254,7 @@ void vp_del_vqs(struct virtio_device *vdev)
 
        if (vp_dev->msix_affinity_masks) {
                for (i = 0; i < vp_dev->msix_vectors; i++)
-                       if (vp_dev->msix_affinity_masks[i])
-                               free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+                       free_cpumask_var(vp_dev->msix_affinity_masks[i]);
        }
 
        if (vp_dev->msix_enabled) {
@@ -276,7 +275,7 @@ void vp_del_vqs(struct virtio_device *vdev)
        vp_dev->vqs = NULL;
 }
 
-static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
                const char * const names[], bool per_vq_vectors,
                const bool *ctx,
@@ -350,7 +349,7 @@ error_find:
        return err;
 }
 
-static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
                const char * const names[], const bool *ctx)
 {
@@ -389,7 +388,7 @@ out_del_vqs:
 }
 
 /* the config->find_vqs() implementation */
-int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
                const char * const names[], const bool *ctx,
                struct irq_affinity *desc)
index eb17a29..23112d8 100644 (file)
@@ -38,7 +38,7 @@ struct virtio_pci_vq_info {
        struct list_head node;
 
        /* MSI-X vector (or none) */
-       unsigned msix_vector;
+       unsigned int msix_vector;
 };
 
 /* Our device structure */
@@ -68,16 +68,16 @@ struct virtio_pci_device {
         * and I'm too lazy to allocate each name separately. */
        char (*msix_names)[256];
        /* Number of available vectors */
-       unsigned msix_vectors;
+       unsigned int msix_vectors;
        /* Vectors allocated, excluding per-vq vectors if any */
-       unsigned msix_used_vectors;
+       unsigned int msix_used_vectors;
 
        /* Whether we have vector per vq */
        bool per_vq_vectors;
 
        struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
                                      struct virtio_pci_vq_info *info,
-                                     unsigned idx,
+                                     unsigned int idx,
                                      void (*callback)(struct virtqueue *vq),
                                      const char *name,
                                      bool ctx,
@@ -108,7 +108,7 @@ bool vp_notify(struct virtqueue *vq);
 /* the config->del_vqs() implementation */
 void vp_del_vqs(struct virtio_device *vdev);
 /* the config->find_vqs() implementation */
-int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
                const char * const names[], const bool *ctx,
                struct irq_affinity *desc);
index 6f4e34c..a5e5721 100644 (file)
@@ -45,8 +45,8 @@ static int vp_finalize_features(struct virtio_device *vdev)
 }
 
 /* virtio config->get() implementation */
-static void vp_get(struct virtio_device *vdev, unsigned offset,
-                  void *buf, unsigned len)
+static void vp_get(struct virtio_device *vdev, unsigned int offset,
+                  void *buf, unsigned int len)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        void __iomem *ioaddr = vp_dev->ldev.ioaddr +
@@ -61,8 +61,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
 
 /* the config->set() implementation.  it's symmetric to the config->get()
  * implementation */
-static void vp_set(struct virtio_device *vdev, unsigned offset,
-                  const void *buf, unsigned len)
+static void vp_set(struct virtio_device *vdev, unsigned int offset,
+                  const void *buf, unsigned int len)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        void __iomem *ioaddr = vp_dev->ldev.ioaddr +
@@ -109,7 +109,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
                                  struct virtio_pci_vq_info *info,
-                                 unsigned index,
+                                 unsigned int index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
                                  bool ctx,
@@ -192,6 +192,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
        .reset          = vp_reset,
        .find_vqs       = vp_find_vqs,
        .del_vqs        = vp_del_vqs,
+       .synchronize_cbs = vp_synchronize_vectors,
        .get_features   = vp_get_features,
        .finalize_features = vp_finalize_features,
        .bus_name       = vp_bus_name,
index a2671a2..623906b 100644 (file)
@@ -60,8 +60,8 @@ static int vp_finalize_features(struct virtio_device *vdev)
 }
 
 /* virtio config->get() implementation */
-static void vp_get(struct virtio_device *vdev, unsigned offset,
-                  void *buf, unsigned len)
+static void vp_get(struct virtio_device *vdev, unsigned int offset,
+                  void *buf, unsigned int len)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
@@ -98,8 +98,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
 
 /* the config->set() implementation.  it's symmetric to the config->get()
  * implementation */
-static void vp_set(struct virtio_device *vdev, unsigned offset,
-                  const void *buf, unsigned len)
+static void vp_set(struct virtio_device *vdev, unsigned int offset,
+                  const void *buf, unsigned int len)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
@@ -183,7 +183,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
                                  struct virtio_pci_vq_info *info,
-                                 unsigned index,
+                                 unsigned int index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
                                  bool ctx,
@@ -248,7 +248,7 @@ err_map_notify:
        return ERR_PTR(err);
 }
 
-static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                              struct virtqueue *vqs[],
                              vq_callback_t *callbacks[],
                              const char * const names[], const bool *ctx,
@@ -394,6 +394,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
        .reset          = vp_reset,
        .find_vqs       = vp_modern_find_vqs,
        .del_vqs        = vp_del_vqs,
+       .synchronize_cbs = vp_synchronize_vectors,
        .get_features   = vp_get_features,
        .finalize_features = vp_finalize_features,
        .bus_name       = vp_bus_name,
@@ -411,6 +412,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
        .reset          = vp_reset,
        .find_vqs       = vp_modern_find_vqs,
        .del_vqs        = vp_del_vqs,
+       .synchronize_cbs = vp_synchronize_vectors,
        .get_features   = vp_get_features,
        .finalize_features = vp_finalize_features,
        .bus_name       = vp_bus_name,
index 591738a..a0fa14f 100644 (file)
@@ -347,6 +347,7 @@ err_map_notify:
 err_map_isr:
        pci_iounmap(pci_dev, mdev->common);
 err_map_common:
+       pci_release_selected_regions(pci_dev, mdev->modern_bars);
        return err;
 }
 EXPORT_SYMBOL_GPL(vp_modern_probe);
@@ -466,6 +467,11 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
 {
        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 
+       /*
+        * Per memory-barriers.txt, wmb() is not needed to guarantee
+        * that the the cache coherent memory writes have completed
+        * before writing to the MMIO region.
+        */
        vp_iowrite8(status, &cfg->device_status);
 }
 EXPORT_SYMBOL_GPL(vp_modern_set_status);
index cfb028c..13a7348 100644 (file)
@@ -205,11 +205,9 @@ struct vring_virtqueue {
 
 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
 
-static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
+static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
                                          unsigned int total_sg)
 {
-       struct vring_virtqueue *vq = to_vvq(_vq);
-
        /*
         * If the host supports indirect descriptor tables, and we have multiple
         * buffers, then go indirect. FIXME: tune this threshold
@@ -499,7 +497,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 
        head = vq->free_head;
 
-       if (virtqueue_use_indirect(_vq, total_sg))
+       if (virtqueue_use_indirect(vq, total_sg))
                desc = alloc_indirect_split(_vq, total_sg, gfp);
        else {
                desc = NULL;
@@ -519,7 +517,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
                descs_used = total_sg;
        }
 
-       if (vq->vq.num_free < descs_used) {
+       if (unlikely(vq->vq.num_free < descs_used)) {
                pr_debug("Can't add buf len %i - avail = %i\n",
                         descs_used, vq->vq.num_free);
                /* FIXME: for historical reasons, we force a notify here if
@@ -811,7 +809,7 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
        }
 }
 
-static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
        u16 last_used_idx;
@@ -836,7 +834,7 @@ static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
        return last_used_idx;
 }
 
-static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
+static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -1178,7 +1176,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
 
        BUG_ON(total_sg == 0);
 
-       if (virtqueue_use_indirect(_vq, total_sg)) {
+       if (virtqueue_use_indirect(vq, total_sg)) {
                err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
                                                    in_sgs, data, gfp);
                if (err != -ENOMEM) {
@@ -1488,7 +1486,7 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
        }
 }
 
-static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -1690,7 +1688,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
        vq->we_own_ring = true;
        vq->notify = notify;
        vq->weak_barriers = weak_barriers;
-       vq->broken = false;
+       vq->broken = true;
        vq->last_used_idx = 0;
        vq->event_triggered = false;
        vq->num_added = 0;
@@ -2027,7 +2025,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2048,7 +2046,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
  *
  * This does not need to be serialized.
  */
-bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -2074,7 +2072,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
  */
 bool virtqueue_enable_cb(struct virtqueue *_vq)
 {
-       unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
+       unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
 
        return !virtqueue_poll(_vq, last_used_idx);
 }
@@ -2136,8 +2134,11 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
                return IRQ_NONE;
        }
 
-       if (unlikely(vq->broken))
-               return IRQ_HANDLED;
+       if (unlikely(vq->broken)) {
+               dev_warn_once(&vq->vq.vdev->dev,
+                             "virtio vring IRQ raised before DRIVER_OK");
+               return IRQ_NONE;
+       }
 
        /* Just a hint for performance: so it's ok that this can be racy! */
        if (vq->event)
@@ -2179,7 +2180,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
        vq->we_own_ring = false;
        vq->notify = notify;
        vq->weak_barriers = weak_barriers;
-       vq->broken = false;
+       vq->broken = true;
        vq->last_used_idx = 0;
        vq->event_triggered = false;
        vq->num_added = 0;
@@ -2397,6 +2398,28 @@ void virtio_break_device(struct virtio_device *dev)
 }
 EXPORT_SYMBOL_GPL(virtio_break_device);
 
+/*
+ * This should allow the device to be used by the driver. You may
+ * need to grab appropriate locks to flush the write to
+ * vq->broken. This should only be used in some specific case e.g
+ * (probing and restoring). This function should only be called by the
+ * core, not directly by the driver.
+ */
+void __virtio_unbreak_device(struct virtio_device *dev)
+{
+       struct virtqueue *_vq;
+
+       spin_lock(&dev->vqs_list_lock);
+       list_for_each_entry(_vq, &dev->vqs, list) {
+               struct vring_virtqueue *vq = to_vvq(_vq);
+
+               /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+               WRITE_ONCE(vq->broken, false);
+       }
+       spin_unlock(&dev->vqs_list_lock);
+}
+EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
+
 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
index 7650455..c40f7de 100644 (file)
@@ -53,16 +53,16 @@ static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
        return to_virtio_vdpa_device(vdev)->vdpa;
 }
 
-static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
-                           void *buf, unsigned len)
+static void virtio_vdpa_get(struct virtio_device *vdev, unsigned int offset,
+                           void *buf, unsigned int len)
 {
        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 
        vdpa_get_config(vdpa, offset, buf, len);
 }
 
-static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
-                           const void *buf, unsigned len)
+static void virtio_vdpa_set(struct virtio_device *vdev, unsigned int offset,
+                           const void *buf, unsigned int len)
 {
        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 
@@ -184,7 +184,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
        }
 
        /* Setup virtqueue callback */
-       cb.callback = virtio_vdpa_virtqueue_cb;
+       cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
        cb.private = info;
        ops->set_vq_cb(vdpa, index, &cb);
        ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
@@ -263,7 +263,7 @@ static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
                virtio_vdpa_del_vq(vq);
 }
 
-static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
                                struct virtqueue *vqs[],
                                vq_callback_t *callbacks[],
                                const char * const names[],
diff --git a/drivers/visorbus/Kconfig b/drivers/visorbus/Kconfig
deleted file mode 100644 (file)
index fa947a7..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Unisys visorbus configuration
-#
-
-config UNISYS_VISORBUS
-       tristate "Unisys visorbus driver"
-       depends on X86_64 && ACPI
-       help
-       The visorbus driver is a virtualized bus for the Unisys s-Par firmware.
-       Virtualized devices allow Linux guests on a system to share disks and
-       network cards that do not have SR-IOV support, and to be accessed using
-       the partition desktop application. The visorbus driver is required to
-       discover devices on an s-Par guest, and must be present for any other
-       s-Par guest driver to function correctly.
diff --git a/drivers/visorbus/Makefile b/drivers/visorbus/Makefile
deleted file mode 100644 (file)
index e8df59d..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Unisys visorbus
-#
-
-obj-$(CONFIG_UNISYS_VISORBUS)  += visorbus.o
-
-visorbus-y := visorbus_main.o
-visorbus-y += visorchannel.o
-visorbus-y += visorchipset.o
diff --git a/drivers/visorbus/controlvmchannel.h b/drivers/visorbus/controlvmchannel.h
deleted file mode 100644 (file)
index c872135..0000000
+++ /dev/null
@@ -1,650 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __CONTROLVMCHANNEL_H__
-#define __CONTROLVMCHANNEL_H__
-
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
-#define VISOR_CONTROLVM_CHANNEL_GUID \
-       GUID_INIT(0x2b3c2d10, 0x7ef5, 0x4ad8, \
-                 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
-
-#define CONTROLVM_MESSAGE_MAX 64
-
-/*
- * Must increment this whenever you insert or delete fields within this channel
- * struct.  Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software. Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
- * increment this.
- */
-#define VISOR_CONTROLVM_CHANNEL_VERSIONID 1
-
-/* Defines for various channel queues */
-#define CONTROLVM_QUEUE_REQUEST                0
-#define CONTROLVM_QUEUE_RESPONSE       1
-#define CONTROLVM_QUEUE_EVENT          2
-#define CONTROLVM_QUEUE_ACK            3
-
-/* Max num of messages stored during IOVM creation to be reused after crash */
-#define CONTROLVM_CRASHMSG_MAX 2
-
-/*
- * struct visor_segment_state
- * @enabled:   May enter other states.
- * @active:    Assigned to active partition.
- * @alive:     Configure message sent to service/server.
- * @revoked:   Similar to partition state ShuttingDown.
- * @allocated: Memory (device/port number) has been selected by Command.
- * @known:     Has been introduced to the service/guest partition.
- * @ready:     Service/Guest partition has responded to introduction.
- * @operating: Resource is configured and operating.
- * @reserved:  Natural alignment.
- *
- * Note: Don't use high bit unless we need to switch to ushort which is
- * non-compliant.
- */
-struct visor_segment_state  {
-       u16 enabled:1;
-       u16 active:1;
-       u16 alive:1;
-       u16 revoked:1;
-       u16 allocated:1;
-       u16 known:1;
-       u16 ready:1;
-       u16 operating:1;
-       u16 reserved:8;
-} __packed;
-
-static const struct visor_segment_state segment_state_running = {
-       1, 1, 1, 0, 1, 1, 1, 1
-};
-
-static const struct visor_segment_state segment_state_paused = {
-       1, 1, 1, 0, 1, 1, 1, 0
-};
-
-static const struct visor_segment_state segment_state_standby = {
-       1, 1, 0, 0, 1, 1, 1, 0
-};
-
-/*
- * enum controlvm_id
- * @CONTROLVM_INVALID:
- * @CONTROLVM_BUS_CREATE:              CP --> SP, GP.
- * @CONTROLVM_BUS_DESTROY:             CP --> SP, GP.
- * @CONTROLVM_BUS_CONFIGURE:           CP --> SP.
- * @CONTROLVM_BUS_CHANGESTATE:         CP --> SP, GP.
- * @CONTROLVM_BUS_CHANGESTATE_EVENT:   SP, GP --> CP.
- * @CONTROLVM_DEVICE_CREATE:           CP --> SP, GP.
- * @CONTROLVM_DEVICE_DESTROY:          CP --> SP, GP.
- * @CONTROLVM_DEVICE_CONFIGURE:                CP --> SP.
- * @CONTROLVM_DEVICE_CHANGESTATE:      CP --> SP, GP.
- * @CONTROLVM_DEVICE_CHANGESTATE_EVENT:        SP, GP --> CP.
- * @CONTROLVM_DEVICE_RECONFIGURE:      CP --> Boot.
- * @CONTROLVM_CHIPSET_INIT:            CP --> SP, GP.
- * @CONTROLVM_CHIPSET_STOP:            CP --> SP, GP.
- * @CONTROLVM_CHIPSET_READY:           CP --> SP.
- * @CONTROLVM_CHIPSET_SELFTEST:                CP --> SP.
- *
- * Ids for commands that may appear in either queue of a ControlVm channel.
- *
- * Commands that are initiated by the command partition (CP), by an IO or
- * console service partition (SP), or by a guest partition (GP) are:
- * - issued on the RequestQueue queue (q #0) in the ControlVm channel
- * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
- *
- * Events that are initiated by an IO or console service partition (SP) or
- * by a guest partition (GP) are:
- * - issued on the EventQueue queue (q #2) in the ControlVm channel
- * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
- */
-enum controlvm_id {
-       CONTROLVM_INVALID = 0,
-       /*
-        * SWITCH commands required Parameter: SwitchNumber.
-        * BUS commands required Parameter: BusNumber
-        */
-       CONTROLVM_BUS_CREATE = 0x101,
-       CONTROLVM_BUS_DESTROY = 0x102,
-       CONTROLVM_BUS_CONFIGURE = 0x104,
-       CONTROLVM_BUS_CHANGESTATE = 0x105,
-       CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106,
-       /* DEVICE commands required Parameter: BusNumber, DeviceNumber */
-       CONTROLVM_DEVICE_CREATE = 0x201,
-       CONTROLVM_DEVICE_DESTROY = 0x202,
-       CONTROLVM_DEVICE_CONFIGURE = 0x203,
-       CONTROLVM_DEVICE_CHANGESTATE = 0x204,
-       CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205,
-       CONTROLVM_DEVICE_RECONFIGURE = 0x206,
-       /* CHIPSET commands */
-       CONTROLVM_CHIPSET_INIT = 0x301,
-       CONTROLVM_CHIPSET_STOP = 0x302,
-       CONTROLVM_CHIPSET_READY = 0x304,
-       CONTROLVM_CHIPSET_SELFTEST = 0x305,
-};
-
-/*
- * struct irq_info
- * @reserved1:      Natural alignment purposes
- * @recv_irq_handle: Specifies interrupt handle. It is used to retrieve the
- *                  corresponding interrupt pin from Monitor; and the interrupt
- *                  pin is used to connect to the corresponding interrupt.
- *                  Used by IOPart-GP only.
- * @recv_irq_vector: Specifies interrupt vector. It, interrupt pin, and shared
- *                  are used to connect to the corresponding interrupt.
- *                  Used by IOPart-GP only.
- * @recv_irq_shared: Specifies if the recvInterrupt is shared.  It, interrupt
- *                  pin and vector are used to connect to 0 = not shared;
- *                  1 = shared the corresponding interrupt.
- *                  Used by IOPart-GP only.
- * @reserved:       Natural alignment purposes
- */
-struct irq_info {
-       u64 reserved1;
-       u64 recv_irq_handle;
-       u32 recv_irq_vector;
-       u8 recv_irq_shared;
-       u8 reserved[3];
-} __packed;
-
-/*
- * struct efi_visor_indication
- * @boot_to_fw_ui: Stop in UEFI UI
- * @clear_nvram:   Clear NVRAM
- * @clear_cmos:           Clear CMOS
- * @boot_to_tool:  Run install tool
- * @reserved:     Natural alignment
- */
-struct efi_visor_indication  {
-       u64 boot_to_fw_ui:1;
-       u64 clear_nvram:1;
-       u64 clear_cmos:1;
-       u64 boot_to_tool:1;
-       /* Remaining bits are available */
-       u64 reserved:60;
-} __packed;
-
-enum visor_chipset_feature {
-       VISOR_CHIPSET_FEATURE_REPLY = 0x00000001,
-       VISOR_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
-};
-
-/*
- * struct controlvm_message_header
- * @id:                       See CONTROLVM_ID.
- * @message_size:      Includes size of this struct + size of message.
- * @segment_index:     Index of segment containing Vm message/information.
- * @completion_status: Error status code or result of  message completion.
- * @struct flags:
- *     @failed:             =1 in a response to signify failure.
- *     @response_expected:  =1 in all messages that expect a response.
- *     @server:             =1 in all bus & device-related messages where the
- *                          message receiver is to act as the bus or device
- *                          server.
- *     @test_message:       =1 for testing use only (Control and Command
- *                          ignore this).
- *     @partial_completion: =1 if there are forthcoming responses/acks
- *                           associated with this message.
- *      @preserve:          =1 this is to let us know to preserve channel
- *                          contents.
- *     @writer_in_diag:     =1 the DiagWriter is active in the Diagnostic
- *                          Partition.
- *     @reserve:            Natural alignment.
- * @reserved:         Natural alignment.
- * @message_handle:    Identifies the particular message instance.
- * @payload_vm_offset: Offset of payload area from start of this instance.
- * @payload_max_bytes: Maximum bytes allocated in payload area of ControlVm
- *                    segment.
- * @payload_bytes:     Actual number of bytes of payload area to copy between
- *                    IO/Command. If non-zero, there is a payload to copy.
- *
- * This is the common structure that is at the beginning of every
- * ControlVm message (both commands and responses) in any ControlVm
- * queue.  Commands are easily distinguished from responses by
- * looking at the flags.response field.
- */
-struct controlvm_message_header  {
-       u32 id;
-       /*
-        * For requests, indicates the message type. For responses, indicates
-        * the type of message we are responding to.
-        */
-       u32 message_size;
-       u32 segment_index;
-       u32 completion_status;
-       struct  {
-               u32 failed:1;
-               u32 response_expected:1;
-               u32 server:1;
-               u32 test_message:1;
-               u32 partial_completion:1;
-               u32 preserve:1;
-               u32 writer_in_diag:1;
-               u32 reserve:25;
-       } __packed flags;
-       u32 reserved;
-       u64 message_handle;
-       u64 payload_vm_offset;
-       u32 payload_max_bytes;
-       u32 payload_bytes;
-} __packed;
-
-/*
- * struct controlvm_packet_device_create - For CONTROLVM_DEVICE_CREATE
- * @bus_no:        Bus # (0..n-1) from the msg receiver's end.
- * @dev_no:        Bus-relative (0..n-1) device number.
- * @channel_addr:   Guest physical address of the channel, which can be
- *                 dereferenced by the receiver of this ControlVm command.
- * @channel_bytes:  Specifies size of the channel in bytes.
- * @data_type_uuid: Specifies format of data in channel.
- * @dev_inst_uuid:  Instance guid for the device.
- * @irq_info intr:  Specifies interrupt information.
- */
-struct controlvm_packet_device_create  {
-       u32 bus_no;
-       u32 dev_no;
-       u64 channel_addr;
-       u64 channel_bytes;
-       guid_t data_type_guid;
-       guid_t dev_inst_guid;
-       struct irq_info intr;
-} __packed;
-
-/*
- * struct controlvm_packet_device_configure - For CONTROLVM_DEVICE_CONFIGURE
- * @bus_no: Bus number (0..n-1) from the msg receiver's perspective.
- * @dev_no: Bus-relative (0..n-1) device number.
- */
-struct controlvm_packet_device_configure  {
-       u32 bus_no;
-       u32 dev_no;
-} __packed;
-
-/* Total 128 bytes */
-struct controlvm_message_device_create {
-       struct controlvm_message_header header;
-       struct controlvm_packet_device_create packet;
-} __packed;
-
-/* Total 56 bytes */
-struct controlvm_message_device_configure  {
-       struct controlvm_message_header header;
-       struct controlvm_packet_device_configure packet;
-} __packed;
-
-/*
- * struct controlvm_message_packet - This is the format for a message in any
- *                                   ControlVm queue.
- * @struct create_bus:         For CONTROLVM_BUS_CREATE.
- *     @bus_no:             Bus # (0..n-1) from the msg receiver's perspective.
- *     @dev_count:          Indicates the max number of devices on this bus.
- *     @channel_addr:       Guest physical address of the channel, which can be
- *                          dereferenced by the receiver of this ControlVM
- *                          command.
- *     @channel_bytes:      Size of the channel.
- *     @bus_data_type_uuid: Indicates format of data in bus channel.
- *     @bus_inst_uuid:      Instance uuid for the bus.
- *
- * @struct destroy_bus:                For CONTROLVM_BUS_DESTROY.
- *     @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- *     @reserved: Natural alignment purposes.
- *
- * @struct configure_bus:      For CONTROLVM_BUS_CONFIGURE.
- *     @bus_no:              Bus # (0..n-1) from the receiver's perspective.
- *     @reserved1:           For alignment purposes.
- *     @guest_handle:        This is used to convert guest physical address to
- *                           physical address.
- *     @recv_bus_irq_handle: Specifies interrupt info. It is used by SP to
- *                           register to receive interrupts from the CP. This
- *                           interrupt is used for bus level notifications.
- *                           The corresponding sendBusInterruptHandle is kept
- *                           in CP.
- *
- * @struct create_device:      For CONTROLVM_DEVICE_CREATE.
- *
- * @struct destroy_device:     For CONTROLVM_DEVICE_DESTROY.
- *     @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- *     @dev_no: Bus-relative (0..n-1) device number.
- *
- * @struct configure_device:   For CONTROLVM_DEVICE_CONFIGURE.
- *
- * @struct reconfigure_device: For CONTROLVM_DEVICE_RECONFIGURE.
- *     @bus_no: Bus # (0..n-1) from the msg receiver's perspective.
- *     @dev_no: Bus-relative (0..n-1) device number.
- *
- * @struct bus_change_state:   For CONTROLVM_BUS_CHANGESTATE.
- *     @bus_no:
- *     @struct state:
- *     @reserved: Natural alignment purposes.
- *
- * @struct device_change_state:        For CONTROLVM_DEVICE_CHANGESTATE.
- *     @bus_no:
- *     @dev_no:
- *     @struct state:
- *     @struct flags:
- *             @phys_device: =1 if message is for a physical device.
- *             @reserved:    Natural alignment.
- *             @reserved1:   Natural alignment.
- *     @reserved:    Natural alignment purposes.
- *
- * @struct device_change_state_event:  For CONTROLVM_DEVICE_CHANGESTATE_EVENT.
- *     @bus_no:
- *     @dev_no:
- *     @struct state:
- *     @reserved:     Natural alignment purposes.
- *
- * @struct init_chipset:       For CONTROLVM_CHIPSET_INIT.
- *     @bus_count:       Indicates the max number of busses.
- *     @switch_count:    Indicates the max number of switches.
- *     @enum features:
- *     @platform_number:
- *
- * @struct chipset_selftest:   For CONTROLVM_CHIPSET_SELFTEST.
- *      @options: Reserved.
- *      @test:   Bit 0 set to run embedded selftest.
- *
- * @addr:   A physical address of something, that can be dereferenced by the
- *         receiver of this ControlVm command.
- *
- * @handle: A handle of something (depends on command id).
- */
-struct controlvm_message_packet  {
-       union  {
-               struct  {
-                       u32 bus_no;
-                       u32 dev_count;
-                       u64 channel_addr;
-                       u64 channel_bytes;
-                       guid_t bus_data_type_guid;
-                       guid_t bus_inst_guid;
-               } __packed create_bus;
-               struct  {
-                       u32 bus_no;
-                       u32 reserved;
-               } __packed destroy_bus;
-               struct  {
-                       u32 bus_no;
-                       u32 reserved1;
-                       u64 guest_handle;
-                       u64 recv_bus_irq_handle;
-               } __packed configure_bus;
-               struct controlvm_packet_device_create create_device;
-               struct  {
-                       u32 bus_no;
-                       u32 dev_no;
-               } __packed destroy_device;
-               struct controlvm_packet_device_configure configure_device;
-               struct  {
-                       u32 bus_no;
-                       u32 dev_no;
-               } __packed reconfigure_device;
-               struct  {
-                       u32 bus_no;
-                       struct visor_segment_state state;
-                       u8 reserved[2];
-               } __packed bus_change_state;
-               struct  {
-                       u32 bus_no;
-                       u32 dev_no;
-                       struct visor_segment_state state;
-                       struct  {
-                               u32 phys_device:1;
-                               u32 reserved:31;
-                               u32 reserved1;
-                       } __packed flags;
-                       u8 reserved[2];
-               } __packed device_change_state;
-               struct  {
-                       u32 bus_no;
-                       u32 dev_no;
-                       struct visor_segment_state state;
-                       u8 reserved[6];
-               } __packed device_change_state_event;
-               struct  {
-                       u32 bus_count;
-                       u32 switch_count;
-                       enum visor_chipset_feature features;
-                       u32 platform_number;
-               } __packed init_chipset;
-               struct  {
-                       u32 options;
-                       u32 test;
-               } __packed chipset_selftest;
-               u64 addr;
-               u64 handle;
-       };
-} __packed;
-
-/* All messages in any ControlVm queue have this layout. */
-struct controlvm_message {
-       struct controlvm_message_header hdr;
-       struct controlvm_message_packet cmd;
-} __packed;
-
-/*
- * struct visor_controlvm_channel
- * @struct header:
- * @gp_controlvm:                      Guest phys addr of this channel.
- * @gp_partition_tables:               Guest phys addr of partition tables.
- * @gp_diag_guest:                     Guest phys addr of diagnostic channel.
- * @gp_boot_romdisk:                   Guest phys addr of (read* only) Boot
- *                                     ROM disk.
- * @gp_boot_ramdisk:                   Guest phys addr of writable Boot RAM
- *                                     disk.
- * @gp_acpi_table:                     Guest phys addr of acpi table.
- * @gp_control_channel:                        Guest phys addr of control channel.
- * @gp_diag_romdisk:                   Guest phys addr of diagnostic ROM disk.
- * @gp_nvram:                          Guest phys addr of NVRAM channel.
- * @request_payload_offset:            Offset to request payload area.
- * @event_payload_offset:              Offset to event payload area.
- * @request_payload_bytes:             Bytes available in request payload area.
- * @event_payload_bytes:               Bytes available in event payload area.
- * @control_channel_bytes:
- * @nvram_channel_bytes:               Bytes in PartitionNvram segment.
- * @message_bytes:                     sizeof(CONTROLVM_MESSAGE).
- * @message_count:                     CONTROLVM_MESSAGE_MAX.
- * @gp_smbios_table:                   Guest phys addr of SMBIOS tables.
- * @gp_physical_smbios_table:          Guest phys addr of SMBIOS table.
- * @gp_reserved:                       VISOR_MAX_GUESTS_PER_SERVICE.
- * @virtual_guest_firmware_image_base: Guest physical address of EFI firmware
- *                                     image base.
- * @virtual_guest_firmware_entry_point:        Guest physical address of EFI firmware
- *                                     entry point.
- * @virtual_guest_firmware_image_size: Guest EFI firmware image size.
- * @virtual_guest_firmware_boot_base:  GPA = 1MB where EFI firmware image is
- *                                     copied to.
- * @virtual_guest_image_base:
- * @virtual_guest_image_size:
- * @prototype_control_channel_offset:
- * @virtual_guest_partition_handle:
- * @restore_action:                    Restore Action field to restore the
- *                                     guest partition.
- * @dump_action:                       For Windows guests it shows if the
- *                                     visordisk is in dump mode.
- * @nvram_fail_count:
- * @saved_crash_message_count:         = CONTROLVM_CRASHMSG_MAX.
- * @saved_crash_message_offset:                Offset to request payload area needed
- *                                     for crash dump.
- * @installation_error:                        Type of error encountered during
- *                                     installation.
- * @installation_text_id:              Id of string to display.
- * @installation_remaining_steps:      Number of remaining installation steps
- *                                     (for progress bars).
- * @tool_action:                       VISOR_TOOL_ACTIONS Installation Action
- *                                     field.
- * @reserved: Alignment.
- * @struct efi_visor_ind:
- * @sp_reserved:
- * @reserved2:                         Force signals to begin on 128-byte
- *                                     cache line.
- * @struct request_queue:              Guest partition uses this queue to send
- *                                     requests to Control.
- * @struct response_queue:             Control uses this queue to respond to
- *                                     service or guest partition request.
- * @struct event_queue:                        Control uses this queue to send events
- *                                     to guest partition.
- * @struct event_ack_queue:            Service or guest partition uses this
- *                                     queue to ack Control events.
- * @struct request_msg:                        Request fixed-size message pool -
- *                                     does not include payload.
- * @struct response_msg:               Response fixed-size message pool -
- *                                     does not include payload.
- * @struct event_msg:                  Event fixed-size message pool -
- *                                     does not include payload.
- * @struct event_ack_msg:              Ack fixed-size message pool -
- *                                     does not include payload.
- * @struct saved_crash_msg:            Message stored during IOVM creation to
- *                                     be reused after crash.
- */
-struct visor_controlvm_channel {
-       struct channel_header header;
-       u64 gp_controlvm;
-       u64 gp_partition_tables;
-       u64 gp_diag_guest;
-       u64 gp_boot_romdisk;
-       u64 gp_boot_ramdisk;
-       u64 gp_acpi_table;
-       u64 gp_control_channel;
-       u64 gp_diag_romdisk;
-       u64 gp_nvram;
-       u64 request_payload_offset;
-       u64 event_payload_offset;
-       u32 request_payload_bytes;
-       u32 event_payload_bytes;
-       u32 control_channel_bytes;
-       u32 nvram_channel_bytes;
-       u32 message_bytes;
-       u32 message_count;
-       u64 gp_smbios_table;
-       u64 gp_physical_smbios_table;
-       char gp_reserved[2688];
-       u64 virtual_guest_firmware_image_base;
-       u64 virtual_guest_firmware_entry_point;
-       u64 virtual_guest_firmware_image_size;
-       u64 virtual_guest_firmware_boot_base;
-       u64 virtual_guest_image_base;
-       u64 virtual_guest_image_size;
-       u64 prototype_control_channel_offset;
-       u64 virtual_guest_partition_handle;
-       u16 restore_action;
-       u16 dump_action;
-       u16 nvram_fail_count;
-       u16 saved_crash_message_count;
-       u32 saved_crash_message_offset;
-       u32 installation_error;
-       u32 installation_text_id;
-       u16 installation_remaining_steps;
-       u8 tool_action;
-       u8 reserved;
-       struct efi_visor_indication efi_visor_ind;
-       u32 sp_reserved;
-       u8 reserved2[28];
-       struct signal_queue_header request_queue;
-       struct signal_queue_header response_queue;
-       struct signal_queue_header event_queue;
-       struct signal_queue_header event_ack_queue;
-       struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
-       struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
-       struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
-       struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
-       struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
-} __packed;
-
-/*
- * struct visor_controlvm_parameters_header
- *
- * The following header will be located at the beginning of PayloadVmOffset for
- * various ControlVm commands. The receiver of a ControlVm command with a
- * PayloadVmOffset will dereference this address and then use connection_offset,
- * initiator_offset, and target_offset to get the location of UTF-8 formatted
- * strings that can be parsed to obtain command-specific information. The value
- * of total_length should equal PayloadBytes. The format of the strings at
- * PayloadVmOffset will take different forms depending on the message.
- */
-struct visor_controlvm_parameters_header {
-       u32 total_length;
-       u32 header_length;
-       u32 connection_offset;
-       u32 connection_length;
-       u32 initiator_offset;
-       u32 initiator_length;
-       u32 target_offset;
-       u32 target_length;
-       u32 client_offset;
-       u32 client_length;
-       u32 name_offset;
-       u32 name_length;
-       guid_t id;
-       u32 revision;
-       /* Natural alignment */
-       u32 reserved;
-} __packed;
-
-/* General Errors------------------------------------------------------[0-99] */
-#define CONTROLVM_RESP_SUCCESS                    0
-#define CONTROLVM_RESP_ALREADY_DONE               1
-#define CONTROLVM_RESP_IOREMAP_FAILED             2
-#define CONTROLVM_RESP_KMALLOC_FAILED             3
-#define CONTROLVM_RESP_ID_UNKNOWN                 4
-#define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT      5
-/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
-#define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO  100
-#define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT      101
-/* Maximum Limit----------------------------------------------------[200-299] */
-/* BUS_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_BUSES            201
-/* DEVICE_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_DEVICES          202
-/* Payload and Parameter Related------------------------------------[400-499] */
-/* SWITCH_ATTACHEXTPORT, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_PAYLOAD_INVALID            400
-/* Multiple */
-#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401
-/* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID           402
-/* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID           403
-/* Specified[Packet Structure] Value--------------------------------[500-599] */
-/* SWITCH_ATTACHINTPORT */
-/* BUS_CONFIGURE, DEVICE_CREATE, DEVICE_CONFIG, DEVICE_DESTROY */
-#define CONTROLVM_RESP_BUS_INVALID                500
-/* SWITCH_ATTACHINTPORT*/
-/* DEVICE_CREATE, DEVICE_CONFIGURE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_DEVICE_INVALID             501
-/* DEVICE_CREATE, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CHANNEL_INVALID            502
-/* Partition Driver Callback Interface------------------------------[600-699] */
-/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE     604
-/* Unable to invoke VIRTPCI callback. VIRTPCI Callback returned error. */
-/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR   605
-/* Generic device callback returned error. */
-/* SWITCH_ATTACHEXTPORT, SWITCH_DETACHEXTPORT, DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR   606
-/* Bus Related------------------------------------------------------[700-799] */
-/* BUS_DESTROY */
-#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED       700
-/* Channel Related--------------------------------------------------[800-899] */
-/* GET_CHANNELINFO, DEVICE_DESTROY */
-#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN           800
-/* DEVICE_CREATE */
-#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL         801
-/* Chipset Shutdown Related---------------------------------------[1000-1099] */
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED        1000
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
-/* Chipset Stop Related-------------------------------------------[1100-1199] */
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS        1100
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH      1101
-/* Device Related-------------------------------------------------[1400-1499] */
-#define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT            1400
-
-/* __CONTROLVMCHANNEL_H__ */
-#endif
diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
deleted file mode 100644 (file)
index 98711fb..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __VBUSCHANNEL_H__
-#define __VBUSCHANNEL_H__
-
-/*
- * The vbus channel is the channel area provided via the BUS_CREATE controlvm
- * message for each virtual bus.  This channel area is provided to both server
- * and client ends of the bus.  The channel header area is initialized by
- * the server, and the remaining information is filled in by the client.
- * We currently use this for the client to provide various information about
- * the client devices and client drivers for the server end to see.
- */
-
-#include <linux/uuid.h>
-#include <linux/visorbus.h>
-
-/* {193b331b-c58f-11da-95a9-00e08161165f} */
-#define VISOR_VBUS_CHANNEL_GUID                                                \
-       GUID_INIT(0x193b331b, 0xc58f, 0x11da,                           \
-                 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-
-/*
- * Must increment this whenever you insert or delete fields within this channel
- * struct.  Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software.  Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
- * increment this.
- */
-#define VISOR_VBUS_CHANNEL_VERSIONID 1
-
-/*
- * struct visor_vbus_deviceinfo
- * @devtype:  Short string identifying the device type.
- * @drvname:  Driver .sys file name.
- * @infostrs: Kernel vversion.
- * @reserved: Pad size to 256 bytes.
- *
- * An array of this struct is present in the channel area for each vbus. It is
- * filled in by the client side to provide info about the device and driver from
- * the client's perspective.
- */
-struct visor_vbus_deviceinfo {
-       u8 devtype[16];
-       u8 drvname[16];
-       u8 infostrs[96];
-       u8 reserved[128];
-} __packed;
-
-/*
- * struct visor_vbus_headerinfo
- * @struct_bytes:            Size of this struct in bytes.
- * @device_info_struct_bytes: Size of VISOR_VBUS_DEVICEINFO.
- * @dev_info_count:          Num of items in DevInfo member. This is the
- *                           allocated size.
- * @chp_info_offset:         Byte offset from beginning of this struct to the
- *                           ChpInfo struct.
- * @bus_info_offset:         Byte offset from beginning of this struct to the
- *                           BusInfo struct.
- * @dev_info_offset:         Byte offset from beginning of this struct to the
- *                           DevInfo array.
- * @reserved:                Natural alignment.
- */
-struct visor_vbus_headerinfo {
-       u32 struct_bytes;
-       u32 device_info_struct_bytes;
-       u32 dev_info_count;
-       u32 chp_info_offset;
-       u32 bus_info_offset;
-       u32 dev_info_offset;
-       u8 reserved[104];
-} __packed;
-
-/*
- * struct visor_vbus_channel
- * @channel_header: Initialized by server.
- * @hdr_info:      Initialized by server.
- * @chp_info:      Describes client chipset device and driver.
- * @bus_info:      Describes client bus device and driver.
- * @dev_info:      Describes client device and driver for each device on the
- *                 bus.
- */
-struct visor_vbus_channel {
-       struct channel_header channel_header;
-       struct visor_vbus_headerinfo hdr_info;
-       struct visor_vbus_deviceinfo chp_info;
-       struct visor_vbus_deviceinfo bus_info;
-       struct visor_vbus_deviceinfo dev_info[];
-} __packed;
-
-#endif
diff --git a/drivers/visorbus/visorbus_main.c b/drivers/visorbus/visorbus_main.c
deleted file mode 100644 (file)
index 152fd29..0000000
+++ /dev/null
@@ -1,1234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright � 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/visorbus.h>
-#include <linux/uuid.h>
-
-#include "visorbus_private.h"
-
-static const guid_t visor_vbus_channel_guid = VISOR_VBUS_CHANNEL_GUID;
-
-/* Display string that is guaranteed to be no longer the 99 characters */
-#define LINESIZE 99
-#define POLLJIFFIES_NORMALCHANNEL 10
-
-/* stores whether bus_registration was successful */
-static bool initialized;
-static struct dentry *visorbus_debugfs_dir;
-
-/*
- * DEVICE type attributes
- *
- * The modalias file will contain the guid of the device.
- */
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct visor_device *vdev;
-       const guid_t *guid;
-
-       vdev = to_visor_device(dev);
-       guid = visorchannel_get_guid(vdev->visorchannel);
-       return sprintf(buf, "visorbus:%pUl\n", guid);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *visorbus_dev_attrs[] = {
-       &dev_attr_modalias.attr,
-       NULL,
-};
-
-ATTRIBUTE_GROUPS(visorbus_dev);
-
-/* filled in with info about parent chipset driver when we register with it */
-static struct visor_vbus_deviceinfo chipset_driverinfo;
-/* filled in with info about this driver, wrt it servicing client busses */
-static struct visor_vbus_deviceinfo clientbus_driverinfo;
-
-/* list of visor_device structs, linked via .list_all */
-static LIST_HEAD(list_all_bus_instances);
-/* list of visor_device structs, linked via .list_all */
-static LIST_HEAD(list_all_device_instances);
-
-/*
- * Generic function useful for validating any type of channel when it is
- * received by the client that will be accessing the channel.
- * Note that <logCtx> is only needed for callers in the EFI environment, and
- * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
- */
-int visor_check_channel(struct channel_header *ch, struct device *dev,
-                       const guid_t *expected_guid, char *chname,
-                       u64 expected_min_bytes, u32 expected_version,
-                       u64 expected_signature)
-{
-       if (!guid_is_null(expected_guid)) {
-               /* caller wants us to verify type GUID */
-               if (!guid_equal(&ch->chtype, expected_guid)) {
-                       dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
-                               chname, expected_guid, expected_guid,
-                               &ch->chtype);
-                       return 0;
-               }
-       }
-       /* verify channel size */
-       if (expected_min_bytes > 0) {
-               if (ch->size < expected_min_bytes) {
-                       dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
-                               chname, expected_guid,
-                               (unsigned long long)expected_min_bytes,
-                               ch->size);
-                       return 0;
-               }
-       }
-       /* verify channel version */
-       if (expected_version > 0) {
-               if (ch->version_id != expected_version) {
-                       dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n",
-                               chname, expected_guid,
-                               (unsigned long)expected_version,
-                               ch->version_id);
-                       return 0;
-               }
-       }
-       /* verify channel signature */
-       if (expected_signature > 0) {
-               if (ch->signature != expected_signature) {
-                       dev_err(dev, "Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
-                               chname, expected_guid,  expected_signature,
-                               ch->signature);
-                       return 0;
-               }
-       }
-       return 1;
-}
-
-static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
-{
-       struct visor_device *dev;
-       const guid_t *guid;
-
-       dev = to_visor_device(xdev);
-       guid = visorchannel_get_guid(dev->visorchannel);
-       return add_uevent_var(env, "MODALIAS=visorbus:%pUl", guid);
-}
-
-/*
- * visorbus_match() - called automatically upon adding a visor_device
- *                    (device_add), or adding a visor_driver
- *                    (visorbus_register_visor_driver)
- * @xdev: struct device for the device being matched
- * @xdrv: struct device_driver for driver to match device against
- *
- * Return: 1 iff the provided driver can control the specified device
- */
-static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
-{
-       const guid_t *channel_type;
-       int i;
-       struct visor_device *dev;
-       struct visor_driver *drv;
-       struct visorchannel *chan;
-
-       dev = to_visor_device(xdev);
-       channel_type = visorchannel_get_guid(dev->visorchannel);
-       drv = to_visor_driver(xdrv);
-       chan = dev->visorchannel;
-       if (!drv->channel_types)
-               return 0;
-       for (i = 0; !guid_is_null(&drv->channel_types[i].guid); i++)
-               if (guid_equal(&drv->channel_types[i].guid, channel_type) &&
-                   visor_check_channel(visorchannel_get_header(chan),
-                                       xdev,
-                                       &drv->channel_types[i].guid,
-                                       (char *)drv->channel_types[i].name,
-                                       drv->channel_types[i].min_bytes,
-                                       drv->channel_types[i].version,
-                                       VISOR_CHANNEL_SIGNATURE))
-                       return i + 1;
-       return 0;
-}
-
-/*
- * This describes the TYPE of bus.
- * (Don't confuse this with an INSTANCE of the bus.)
- */
-static struct bus_type visorbus_type = {
-       .name = "visorbus",
-       .match = visorbus_match,
-       .uevent = visorbus_uevent,
-       .dev_groups = visorbus_dev_groups,
-};
-
-struct visor_busdev {
-       u32 bus_no;
-       u32 dev_no;
-};
-
-static int match_visorbus_dev_by_id(struct device *dev, const void *data)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-       const struct visor_busdev *id = data;
-
-       if (vdev->chipset_bus_no == id->bus_no &&
-           vdev->chipset_dev_no == id->dev_no)
-               return 1;
-       return 0;
-}
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
-                                              struct visor_device *from)
-{
-       struct device *dev;
-       struct device *dev_start = NULL;
-       struct visor_busdev id = {
-               .bus_no = bus_no,
-               .dev_no = dev_no
-       };
-
-       if (from)
-               dev_start = &from->device;
-       dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
-                             match_visorbus_dev_by_id);
-       if (!dev)
-               return NULL;
-       return to_visor_device(dev);
-}
-
-/*
- * visorbus_release_busdevice() - called when device_unregister() is called for
- *                                the bus device instance, after all other tasks
- *                                involved with destroying the dev are complete
- * @xdev: struct device for the bus being released
- */
-static void visorbus_release_busdevice(struct device *xdev)
-{
-       struct visor_device *dev = dev_get_drvdata(xdev);
-
-       debugfs_remove(dev->debugfs_bus_info);
-       debugfs_remove_recursive(dev->debugfs_dir);
-       visorchannel_destroy(dev->visorchannel);
-       kfree(dev);
-}
-
-/*
- * visorbus_release_device() - called when device_unregister() is called for
- *                             each child device instance
- * @xdev: struct device for the visor device being released
- */
-static void visorbus_release_device(struct device *xdev)
-{
-       struct visor_device *dev = to_visor_device(xdev);
-
-       visorchannel_destroy(dev->visorchannel);
-       kfree(dev);
-}
-
-/*
- * BUS specific channel attributes to appear under
- * /sys/bus/visorbus<x>/dev<y>/channel
- */
-
-static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-
-       return sprintf(buf, "0x%llx\n",
-                      visorchannel_get_physaddr(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(physaddr);
-
-static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
-                          char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-
-       return sprintf(buf, "0x%lx\n",
-                      visorchannel_get_nbytes(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(nbytes);
-
-static ssize_t clientpartition_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-
-       return sprintf(buf, "0x%llx\n",
-                      visorchannel_get_clientpartition(vdev->visorchannel));
-}
-static DEVICE_ATTR_RO(clientpartition);
-
-static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-       char typeid[LINESIZE];
-
-       return sprintf(buf, "%s\n",
-                      visorchannel_id(vdev->visorchannel, typeid));
-}
-static DEVICE_ATTR_RO(typeguid);
-
-static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-       char zoneid[LINESIZE];
-
-       return sprintf(buf, "%s\n",
-                      visorchannel_zoneid(vdev->visorchannel, zoneid));
-}
-static DEVICE_ATTR_RO(zoneguid);
-
-static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       int i = 0;
-       struct bus_type *xbus = dev->bus;
-       struct device_driver *xdrv = dev->driver;
-       struct visor_driver *drv = NULL;
-
-       if (!xdrv)
-               return 0;
-       i = xbus->match(dev, xdrv);
-       if (!i)
-               return 0;
-       drv = to_visor_driver(xdrv);
-       return sprintf(buf, "%s\n", drv->channel_types[i - 1].name);
-}
-static DEVICE_ATTR_RO(typename);
-
-static struct attribute *channel_attrs[] = {
-       &dev_attr_physaddr.attr,
-       &dev_attr_nbytes.attr,
-       &dev_attr_clientpartition.attr,
-       &dev_attr_typeguid.attr,
-       &dev_attr_zoneguid.attr,
-       &dev_attr_typename.attr,
-       NULL
-};
-
-ATTRIBUTE_GROUPS(channel);
-
-/*
- *  BUS instance attributes
- *
- *  define & implement display of bus attributes under
- *  /sys/bus/visorbus/devices/visorbus<n>.
- */
-static ssize_t partition_handle_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-       u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
-
-       return sprintf(buf, "0x%llx\n", handle);
-}
-static DEVICE_ATTR_RO(partition_handle);
-
-static ssize_t partition_guid_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-
-       return sprintf(buf, "{%pUb}\n", &vdev->partition_guid);
-}
-static DEVICE_ATTR_RO(partition_guid);
-
-static ssize_t partition_name_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-
-       return sprintf(buf, "%s\n", vdev->name);
-}
-static DEVICE_ATTR_RO(partition_name);
-
-static ssize_t channel_addr_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-       u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
-
-       return sprintf(buf, "0x%llx\n", addr);
-}
-static DEVICE_ATTR_RO(channel_addr);
-
-static ssize_t channel_bytes_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-       u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
-
-       return sprintf(buf, "0x%llx\n", nbytes);
-}
-static DEVICE_ATTR_RO(channel_bytes);
-
-static ssize_t channel_id_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       struct visor_device *vdev = to_visor_device(dev);
-       int len = 0;
-
-       visorchannel_id(vdev->visorchannel, buf);
-       len = strlen(buf);
-       buf[len++] = '\n';
-       return len;
-}
-static DEVICE_ATTR_RO(channel_id);
-
-static struct attribute *visorbus_attrs[] = {
-       &dev_attr_partition_handle.attr,
-       &dev_attr_partition_guid.attr,
-       &dev_attr_partition_name.attr,
-       &dev_attr_channel_addr.attr,
-       &dev_attr_channel_bytes.attr,
-       &dev_attr_channel_id.attr,
-       NULL
-};
-
-ATTRIBUTE_GROUPS(visorbus);
-
-/*
- *  BUS debugfs entries
- *
- *  define & implement display of debugfs attributes under
- *  /sys/kernel/debug/visorbus/visorbus<n>.
- */
-
-/*
- * vbuschannel_print_devinfo() - format a struct visor_vbus_deviceinfo
- *                               and write it to a seq_file
- * @devinfo: the struct visor_vbus_deviceinfo to format
- * @seq: seq_file to write to
- * @devix: the device index to be included in the output data, or -1 if no
- *         device index is to be included
- *
- * Reads @devInfo, and writes it in human-readable notation to @seq.
- */
-static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
-                                     struct seq_file *seq, int devix)
-{
-       /* uninitialized vbus device entry */
-       if (!isprint(devinfo->devtype[0]))
-               return;
-       if (devix >= 0)
-               seq_printf(seq, "[%d]", devix);
-       else
-               /* vbus device entry is for bus or chipset */
-               seq_puts(seq, "   ");
-       /*
-        * Note: because the s-Par back-end is free to scribble in this area,
-        * we never assume '\0'-termination.
-        */
-       seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
-                  (int)sizeof(devinfo->devtype), devinfo->devtype);
-       seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
-                  (int)sizeof(devinfo->drvname), devinfo->drvname);
-       seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
-                  devinfo->infostrs);
-}
-
-static int bus_info_debugfs_show(struct seq_file *seq, void *v)
-{
-       int i = 0;
-       unsigned long off;
-       struct visor_vbus_deviceinfo dev_info;
-       struct visor_device *vdev = seq->private;
-       struct visorchannel *channel = vdev->visorchannel;
-
-       if (!channel)
-               return 0;
-
-       seq_printf(seq,
-                  "Client device/driver info for %s partition (vbus #%u):\n",
-                  ((vdev->name) ? (char *)(vdev->name) : ""),
-                  vdev->chipset_bus_no);
-       if (visorchannel_read(channel,
-                             offsetof(struct visor_vbus_channel, chp_info),
-                             &dev_info, sizeof(dev_info)) >= 0)
-               vbuschannel_print_devinfo(&dev_info, seq, -1);
-       if (visorchannel_read(channel,
-                             offsetof(struct visor_vbus_channel, bus_info),
-                             &dev_info, sizeof(dev_info)) >= 0)
-               vbuschannel_print_devinfo(&dev_info, seq, -1);
-
-       off = offsetof(struct visor_vbus_channel, dev_info);
-       while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
-               if (visorchannel_read(channel, off, &dev_info,
-                                     sizeof(dev_info)) >= 0)
-                       vbuschannel_print_devinfo(&dev_info, seq, i);
-               off += sizeof(dev_info);
-               i++;
-       }
-       return 0;
-}
-
-static int bus_info_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, bus_info_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations bus_info_debugfs_fops = {
-       .owner = THIS_MODULE,
-       .open = bus_info_debugfs_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static void dev_periodic_work(struct timer_list *t)
-{
-       struct visor_device *dev = from_timer(dev, t, timer);
-       struct visor_driver *drv = to_visor_driver(dev->device.driver);
-
-       drv->channel_interrupt(dev);
-       mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL);
-}
-
-static int dev_start_periodic_work(struct visor_device *dev)
-{
-       if (dev->being_removed || dev->timer_active)
-               return -EINVAL;
-
-       /* now up by at least 2 */
-       get_device(&dev->device);
-       dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
-       add_timer(&dev->timer);
-       dev->timer_active = true;
-       return 0;
-}
-
-static void dev_stop_periodic_work(struct visor_device *dev)
-{
-       if (!dev->timer_active)
-               return;
-
-       del_timer_sync(&dev->timer);
-       dev->timer_active = false;
-       put_device(&dev->device);
-}
-
-/*
- * visordriver_remove_device() - handle visor device going away
- * @xdev: struct device for the visor device being removed
- *
- * This is called when device_unregister() is called for each child device
- * instance, to notify the appropriate visorbus function driver that the device
- * is going away, and to decrease the reference count of the device.
- *
- * Return: 0 iff successful
- */
-static int visordriver_remove_device(struct device *xdev)
-{
-       struct visor_device *dev = to_visor_device(xdev);
-       struct visor_driver *drv = to_visor_driver(xdev->driver);
-
-       mutex_lock(&dev->visordriver_callback_lock);
-       dev->being_removed = true;
-       drv->remove(dev);
-       mutex_unlock(&dev->visordriver_callback_lock);
-       dev_stop_periodic_work(dev);
-       put_device(&dev->device);
-       return 0;
-}
-
-/*
- * visorbus_unregister_visor_driver() - unregisters the provided driver
- * @drv: the driver to unregister
- *
- * A visor function driver calls this function to unregister the driver,
- * i.e., within its module_exit function.
- */
-void visorbus_unregister_visor_driver(struct visor_driver *drv)
-{
-       driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
-
-/*
- * visorbus_read_channel() - reads from the designated channel into
- *                           the provided buffer
- * @dev:    the device whose channel is read from
- * @offset: the offset into the channel at which reading starts
- * @dest:   the destination buffer that is written into from the channel
- * @nbytes: the number of bytes to read from the channel
- *
- * If receiving a message, use the visorchannel_signalremove() function instead.
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_read_channel(struct visor_device *dev, unsigned long offset,
-                         void *dest, unsigned long nbytes)
-{
-       return visorchannel_read(dev->visorchannel, offset, dest, nbytes);
-}
-EXPORT_SYMBOL_GPL(visorbus_read_channel);
-
-/*
- * visorbus_write_channel() - writes the provided buffer into the designated
- *                            channel
- * @dev:    the device whose channel is written to
- * @offset: the offset into the channel at which writing starts
- * @src:    the source buffer that is written into the channel
- * @nbytes: the number of bytes to write into the channel
- *
- * If sending a message, use the visorchannel_signalinsert() function instead.
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_write_channel(struct visor_device *dev, unsigned long offset,
-                          void *src, unsigned long nbytes)
-{
-       return visorchannel_write(dev->visorchannel, offset, src, nbytes);
-}
-EXPORT_SYMBOL_GPL(visorbus_write_channel);
-
-/*
- * visorbus_enable_channel_interrupts() - enables interrupts on the
- *                                        designated device
- * @dev: the device on which to enable interrupts
- *
- * Currently we don't yet have a real interrupt, so for now we just call the
- * interrupt function periodically via a timer.
- */
-int visorbus_enable_channel_interrupts(struct visor_device *dev)
-{
-       struct visor_driver *drv = to_visor_driver(dev->device.driver);
-
-       if (!drv->channel_interrupt) {
-               dev_err(&dev->device, "%s no interrupt function!\n", __func__);
-               return -ENOENT;
-       }
-
-       return dev_start_periodic_work(dev);
-}
-EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
-
-/*
- * visorbus_disable_channel_interrupts() - disables interrupts on the
- *                                         designated device
- * @dev: the device on which to disable interrupts
- */
-void visorbus_disable_channel_interrupts(struct visor_device *dev)
-{
-       dev_stop_periodic_work(dev);
-}
-EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
-
-/*
- * create_visor_device() - create visor device as a result of receiving the
- *                         controlvm device_create message for a new device
- * @dev: a freshly-zeroed struct visor_device, containing only filled-in values
- *       for chipset_bus_no and chipset_dev_no, that will be initialized
- *
- * This is how everything starts from the device end.
- * This function is called when a channel first appears via a ControlVM
- * message.  In response, this function allocates a visor_device to correspond
- * to the new channel, and attempts to connect it the appropriate * driver. If
- * the appropriate driver is found, the visor_driver.probe() function for that
- * driver will be called, and will be passed the new * visor_device that we
- * just created.
- *
- * It's ok if the appropriate driver is not yet loaded, because in that case
- * the new device struct will just stick around in the bus' list of devices.
- * When the appropriate driver calls visorbus_register_visor_driver(), the
- * visor_driver.probe() for the new driver will be called with the new device.
- *
- * Return: 0 if successful, otherwise the negative value returned by
- *         device_add() indicating the reason for failure
- */
-int create_visor_device(struct visor_device *dev)
-{
-       int err;
-       u32 chipset_bus_no = dev->chipset_bus_no;
-       u32 chipset_dev_no = dev->chipset_dev_no;
-
-       mutex_init(&dev->visordriver_callback_lock);
-       dev->device.bus = &visorbus_type;
-       dev->device.groups = channel_groups;
-       device_initialize(&dev->device);
-       dev->device.release = visorbus_release_device;
-       /* keep a reference just for us (now 2) */
-       get_device(&dev->device);
-       timer_setup(&dev->timer, dev_periodic_work, 0);
-       /*
-        * bus_id must be a unique name with respect to this bus TYPE (NOT bus
-        * instance).  That's why we need to include the bus number within the
-        * name.
-        */
-       err = dev_set_name(&dev->device, "vbus%u:dev%u",
-                          chipset_bus_no, chipset_dev_no);
-       if (err)
-               goto err_put;
-       /*
-        * device_add does this:
-        *    bus_add_device(dev)
-        *    ->device_attach(dev)
-        *      ->for each driver drv registered on the bus that dev is on
-        *          if (dev.drv)  **  device already has a driver **
-        *            ** not sure we could ever get here... **
-        *          else
-        *            if (bus.match(dev,drv)) [visorbus_match]
-        *              dev.drv = drv
-        *              if (!drv.probe(dev))  [visordriver_probe_device]
-        *                dev.drv = NULL
-        *
-        * Note that device_add does NOT fail if no driver failed to claim the
-        * device.  The device will be linked onto bus_type.klist_devices
-        * regardless (use bus_for_each_dev).
-        */
-       err = device_add(&dev->device);
-       if (err < 0)
-               goto err_put;
-       list_add_tail(&dev->list_all, &list_all_device_instances);
-       dev->state.created = 1;
-       visorbus_response(dev, err, CONTROLVM_DEVICE_CREATE);
-       /* success: reference kept via unmatched get_device() */
-       return 0;
-
-err_put:
-       put_device(&dev->device);
-       dev_err(&dev->device, "Creating visor device failed. %d\n", err);
-       return err;
-}
-
-void remove_visor_device(struct visor_device *dev)
-{
-       list_del(&dev->list_all);
-       put_device(&dev->device);
-       if (dev->pending_msg_hdr)
-               visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
-       device_unregister(&dev->device);
-}
-
-static int get_vbus_header_info(struct visorchannel *chan,
-                               struct device *dev,
-                               struct visor_vbus_headerinfo *hdr_info)
-{
-       int err;
-
-       if (!visor_check_channel(visorchannel_get_header(chan),
-                                dev,
-                                &visor_vbus_channel_guid,
-                                "vbus",
-                                sizeof(struct visor_vbus_channel),
-                                VISOR_VBUS_CHANNEL_VERSIONID,
-                                VISOR_CHANNEL_SIGNATURE))
-               return -EINVAL;
-
-       err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
-                               sizeof(*hdr_info));
-       if (err < 0)
-               return err;
-       if (hdr_info->struct_bytes < sizeof(struct visor_vbus_headerinfo))
-               return -EINVAL;
-       if (hdr_info->device_info_struct_bytes <
-           sizeof(struct visor_vbus_deviceinfo))
-               return -EINVAL;
-       return 0;
-}
-
-/*
- * write_vbus_chp_info() - write the contents of <info> to the struct
- *                         visor_vbus_channel.chp_info
- * @chan:     indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info:     contains the information to write
- *
- * Writes chipset info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_chp_info(struct visorchannel *chan,
-                               struct visor_vbus_headerinfo *hdr_info,
-                               struct visor_vbus_deviceinfo *info)
-{
-       int off;
-
-       if (hdr_info->chp_info_offset == 0)
-               return;
-
-       off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
-       visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-/*
- * write_vbus_bus_info() - write the contents of <info> to the struct
- *                         visor_vbus_channel.bus_info
- * @chan:     indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info:     contains the information to write
- *
- * Writes bus info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_bus_info(struct visorchannel *chan,
-                               struct visor_vbus_headerinfo *hdr_info,
-                               struct visor_vbus_deviceinfo *info)
-{
-       int off;
-
-       if (hdr_info->bus_info_offset == 0)
-               return;
-
-       off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
-       visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-/*
- * write_vbus_dev_info() - write the contents of <info> to the struct
- *                         visor_vbus_channel.dev_info[<devix>]
- * @chan:     indentifies the s-Par channel that will be updated
- * @hdr_info: used to find appropriate channel offset to write data
- * @info:     contains the information to write
- * @devix:    the relative device number (0..n-1) of the device on the bus
- *
- * Writes device info into the channel memory to be used for diagnostic
- * purposes.
- *
- * Returns no value since this is debug information and not needed for
- * device functionality.
- */
-static void write_vbus_dev_info(struct visorchannel *chan,
-                               struct visor_vbus_headerinfo *hdr_info,
-                               struct visor_vbus_deviceinfo *info,
-                               unsigned int devix)
-{
-       int off;
-
-       if (hdr_info->dev_info_offset == 0)
-               return;
-       off = (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
-             (hdr_info->device_info_struct_bytes * devix);
-       visorchannel_write(chan, off, info, sizeof(*info));
-}
-
-static void bus_device_info_init(
-               struct visor_vbus_deviceinfo *bus_device_info_ptr,
-               const char *dev_type, const char *drv_name)
-{
-       memset(bus_device_info_ptr, 0, sizeof(struct visor_vbus_deviceinfo));
-       snprintf(bus_device_info_ptr->devtype,
-                sizeof(bus_device_info_ptr->devtype),
-                "%s", (dev_type) ? dev_type : "unknownType");
-       snprintf(bus_device_info_ptr->drvname,
-                sizeof(bus_device_info_ptr->drvname),
-                "%s", (drv_name) ? drv_name : "unknownDriver");
-       snprintf(bus_device_info_ptr->infostrs,
-                sizeof(bus_device_info_ptr->infostrs), "kernel ver. %s",
-                utsname()->release);
-}
-
-/*
- * publish_vbus_dev_info() - for a child device just created on a client bus,
- *                          fill in information about the driver that is
- *                          controlling this device into the appropriate slot
- *                          within the vbus channel of the bus instance
- * @visordev: struct visor_device for the desired device
- */
-static void publish_vbus_dev_info(struct visor_device *visordev)
-{
-       int i;
-       struct visor_device *bdev;
-       struct visor_driver *visordrv;
-       u32 bus_no = visordev->chipset_bus_no;
-       u32 dev_no = visordev->chipset_dev_no;
-       struct visor_vbus_deviceinfo dev_info;
-       const char *chan_type_name = NULL;
-       struct visor_vbus_headerinfo *hdr_info;
-
-       if (!visordev->device.driver)
-               return;
-       bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
-       if (!bdev)
-               return;
-       hdr_info = (struct visor_vbus_headerinfo *)bdev->vbus_hdr_info;
-       if (!hdr_info)
-               return;
-       visordrv = to_visor_driver(visordev->device.driver);
-
-       /*
-        * Within the list of device types (by GUID) that the driver
-        * says it supports, find out which one of those types matches
-        * the type of this device, so that we can include the device
-        * type name
-        */
-       for (i = 0; visordrv->channel_types[i].name; i++) {
-               if (guid_equal(&visordrv->channel_types[i].guid,
-                              &visordev->channel_type_guid)) {
-                       chan_type_name = visordrv->channel_types[i].name;
-                       break;
-               }
-       }
-       bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
-       write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
-       write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
-       write_vbus_bus_info(bdev->visorchannel, hdr_info,
-                           &clientbus_driverinfo);
-}
-
-/*
- * visordriver_probe_device() - handle new visor device coming online
- * @xdev: struct device for the visor device being probed
- *
- * This is called automatically upon adding a visor_device (device_add), or
- * adding a visor_driver (visorbus_register_visor_driver), but only after
- * visorbus_match() has returned 1 to indicate a successful match between
- * driver and device.
- *
- * If successful, a reference to the device will be held onto via get_device().
- *
- * Return: 0 if successful, meaning the function driver's probe() function
- *         was successful with this device, otherwise a negative errno
- *         value indicating failure reason
- */
-static int visordriver_probe_device(struct device *xdev)
-{
-       int err;
-       struct visor_driver *drv = to_visor_driver(xdev->driver);
-       struct visor_device *dev = to_visor_device(xdev);
-
-       mutex_lock(&dev->visordriver_callback_lock);
-       dev->being_removed = false;
-       err = drv->probe(dev);
-       if (err) {
-               mutex_unlock(&dev->visordriver_callback_lock);
-               return err;
-       }
-       /* success: reference kept via unmatched get_device() */
-       get_device(&dev->device);
-       publish_vbus_dev_info(dev);
-       mutex_unlock(&dev->visordriver_callback_lock);
-       return 0;
-}
-
-/*
- * visorbus_register_visor_driver() - registers the provided visor driver for
- *                                   handling one or more visor device
- *                                    types (channel_types)
- * @drv: the driver to register
- *
- * A visor function driver calls this function to register the driver. The
- * caller MUST fill in the following fields within the #drv structure:
- *     name, version, owner, channel_types, probe, remove
- *
- * Here's how the whole Linux bus / driver / device model works.
- *
- * At system start-up, the visorbus kernel module is loaded, which registers
- * visorbus_type as a bus type, using bus_register().
- *
- * All kernel modules that support particular device types on a
- * visorbus bus are loaded.  Each of these kernel modules calls
- * visorbus_register_visor_driver() in their init functions, passing a
- * visor_driver struct.  visorbus_register_visor_driver() in turn calls
- * register_driver(&visor_driver.driver).  This .driver member is
- * initialized with generic methods (like probe), whose sole responsibility
- * is to act as a broker for the real methods, which are within the
- * visor_driver struct.  (This is the way the subclass behavior is
- * implemented, since visor_driver is essentially a subclass of the
- * generic driver.)  Whenever a driver_register() happens, core bus code in
- * the kernel does (see device_attach() in drivers/base/dd.c):
- *
- *     for each dev associated with the bus (the bus that driver is on) that
- *     does not yet have a driver
- *         if bus.match(dev,newdriver) == yes_matched  ** .match specified
- *                                                ** during bus_register().
- *             newdriver.probe(dev)  ** for visor drivers, this will call
- *                   ** the generic driver.probe implemented in visorbus.c,
- *                   ** which in turn calls the probe specified within the
- *                   ** struct visor_driver (which was specified by the
- *                   ** actual device driver as part of
- *                   ** visorbus_register_visor_driver()).
- *
- * The above dance also happens when a new device appears.
- * So the question is, how are devices created within the system?
- * Basically, just call device_add(dev).  See pci_bus_add_devices().
- * pci_scan_device() shows an example of how to build a device struct.  It
- * returns the newly-created struct to pci_scan_single_device(), who adds it
- * to the list of devices at PCIBUS.devices.  That list of devices is what
- * is traversed by pci_bus_add_devices().
- *
- * Return: integer indicating success (zero) or failure (non-zero)
- */
-int visorbus_register_visor_driver(struct visor_driver *drv)
-{
-       /* can't register on a nonexistent bus */
-       if (!initialized)
-               return -ENODEV;
-       if (!drv->probe)
-               return -EINVAL;
-       if (!drv->remove)
-               return -EINVAL;
-       if (!drv->pause)
-               return -EINVAL;
-       if (!drv->resume)
-               return -EINVAL;
-
-       drv->driver.name = drv->name;
-       drv->driver.bus = &visorbus_type;
-       drv->driver.probe = visordriver_probe_device;
-       drv->driver.remove = visordriver_remove_device;
-       drv->driver.owner = drv->owner;
-       /*
-        * driver_register does this:
-        *   bus_add_driver(drv)
-        *   ->if (drv.bus)  ** (bus_type) **
-        *       driver_attach(drv)
-        *         for each dev with bus type of drv.bus
-        *           if (!dev.drv)  ** no driver assigned yet **
-        *             if (bus.match(dev,drv))  [visorbus_match]
-        *               dev.drv = drv
-        *               if (!drv.probe(dev))   [visordriver_probe_device]
-        *                 dev.drv = NULL
-        */
-       return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
-
-/*
- * visorbus_create_instance() - create a device instance for the visorbus itself
- * @dev: struct visor_device indicating the bus instance
- *
- * Return: 0 for success, otherwise negative errno value indicating reason for
- *         failure
- */
-int visorbus_create_instance(struct visor_device *dev)
-{
-       int id = dev->chipset_bus_no;
-       int err;
-       struct visor_vbus_headerinfo *hdr_info;
-
-       hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
-       if (!hdr_info)
-               return -ENOMEM;
-       dev_set_name(&dev->device, "visorbus%d", id);
-       dev->device.bus = &visorbus_type;
-       dev->device.groups = visorbus_groups;
-       dev->device.release = visorbus_release_busdevice;
-       dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
-                                             visorbus_debugfs_dir);
-       dev->debugfs_bus_info = debugfs_create_file("client_bus_info", 0440,
-                                                   dev->debugfs_dir, dev,
-                                                   &bus_info_debugfs_fops);
-       dev_set_drvdata(&dev->device, dev);
-       err = get_vbus_header_info(dev->visorchannel, &dev->device, hdr_info);
-       if (err < 0)
-               goto err_debugfs_dir;
-       err = device_register(&dev->device);
-       if (err < 0)
-               goto err_debugfs_dir;
-       list_add_tail(&dev->list_all, &list_all_bus_instances);
-       dev->state.created = 1;
-       dev->vbus_hdr_info = (void *)hdr_info;
-       write_vbus_chp_info(dev->visorchannel, hdr_info, &chipset_driverinfo);
-       write_vbus_bus_info(dev->visorchannel, hdr_info, &clientbus_driverinfo);
-       visorbus_response(dev, err, CONTROLVM_BUS_CREATE);
-       return 0;
-
-err_debugfs_dir:
-       debugfs_remove_recursive(dev->debugfs_dir);
-       kfree(hdr_info);
-       dev_err(&dev->device, "%s failed: %d\n", __func__, err);
-       return err;
-}
-
-/*
- * visorbus_remove_instance() - remove a device instance for the visorbus itself
- * @dev: struct visor_device indentifying the bus to remove
- */
-void visorbus_remove_instance(struct visor_device *dev)
-{
-       /*
-        * Note that this will result in the release method for
-        * dev->dev being called, which will call
-        * visorbus_release_busdevice().  This has something to do with
-        * the put_device() done in device_unregister(), but I have never
-        * successfully been able to trace thru the code to see where/how
-        * release() gets called.  But I know it does.
-        */
-       kfree(dev->vbus_hdr_info);
-       list_del(&dev->list_all);
-       if (dev->pending_msg_hdr)
-               visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
-       device_unregister(&dev->device);
-}
-
-/*
- * remove_all_visor_devices() - remove all child visorbus device instances
- */
-static void remove_all_visor_devices(void)
-{
-       struct list_head *listentry, *listtmp;
-
-       list_for_each_safe(listentry, listtmp, &list_all_device_instances) {
-               struct visor_device *dev;
-
-               dev = list_entry(listentry, struct visor_device, list_all);
-               remove_visor_device(dev);
-       }
-}
-
-/*
- * pause_state_change_complete() - the callback function to be called by a
- *                                 visorbus function driver when a
- *                                 pending "pause device" operation has
- *                                 completed
- * @dev: struct visor_device identifying the paused device
- * @status: 0 iff the pause state change completed successfully, otherwise
- *          a negative errno value indicating the reason for failure
- */
-static void pause_state_change_complete(struct visor_device *dev, int status)
-{
-       if (!dev->pausing)
-               return;
-
-       dev->pausing = false;
-       visorbus_device_changestate_response(dev, status,
-                                            segment_state_standby);
-}
-
-/*
- * resume_state_change_complete() - the callback function to be called by a
- *                                  visorbus function driver when a
- *                                  pending "resume device" operation has
- *                                  completed
- * @dev: struct visor_device identifying the resumed device
- * @status: 0 iff the resume state change completed successfully, otherwise
- *          a negative errno value indicating the reason for failure
- */
-static void resume_state_change_complete(struct visor_device *dev, int status)
-{
-       if (!dev->resuming)
-               return;
-
-       dev->resuming = false;
-       /*
-        * Notify the chipset driver that the resume is complete,
-        * which will presumably want to send some sort of response to
-        * the initiator.
-        */
-       visorbus_device_changestate_response(dev, status,
-                                            segment_state_running);
-}
-
-/*
- * visorchipset_initiate_device_pause_resume() - start a pause or resume
- *                                               operation for a visor device
- * @dev: struct visor_device identifying the device being paused or resumed
- * @is_pause: true to indicate pause operation, false to indicate resume
- *
- * Tell the subordinate function driver for a specific device to pause
- * or resume that device.  Success/failure result is returned asynchronously
- * via a callback function; see pause_state_change_complete() and
- * resume_state_change_complete().
- */
-static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
-                                                    bool is_pause)
-{
-       int err;
-       struct visor_driver *drv;
-
-       /* If no driver associated with the device nothing to pause/resume */
-       if (!dev->device.driver)
-               return 0;
-       if (dev->pausing || dev->resuming)
-               return -EBUSY;
-
-       drv = to_visor_driver(dev->device.driver);
-       if (is_pause) {
-               dev->pausing = true;
-               err = drv->pause(dev, pause_state_change_complete);
-       } else {
-               /*
-                * The vbus_dev_info structure in the channel was been cleared,
-                * make sure it is valid.
-                */
-               publish_vbus_dev_info(dev);
-               dev->resuming = true;
-               err = drv->resume(dev, resume_state_change_complete);
-       }
-       return err;
-}
-
-/*
- * visorchipset_device_pause() - start a pause operation for a visor device
- * @dev_info: struct visor_device identifying the device being paused
- *
- * Tell the subordinate function driver for a specific device to pause
- * that device.  Success/failure result is returned asynchronously
- * via a callback function; see pause_state_change_complete().
- */
-int visorchipset_device_pause(struct visor_device *dev_info)
-{
-       int err;
-
-       err = visorchipset_initiate_device_pause_resume(dev_info, true);
-       if (err < 0) {
-               dev_info->pausing = false;
-               return err;
-       }
-       return 0;
-}
-
-/*
- * visorchipset_device_resume() - start a resume operation for a visor device
- * @dev_info: struct visor_device identifying the device being resumed
- *
- * Tell the subordinate function driver for a specific device to resume
- * that device.  Success/failure result is returned asynchronously
- * via a callback function; see resume_state_change_complete().
- */
-int visorchipset_device_resume(struct visor_device *dev_info)
-{
-       int err;
-
-       err = visorchipset_initiate_device_pause_resume(dev_info, false);
-       if (err < 0) {
-               dev_info->resuming = false;
-               return err;
-       }
-       return 0;
-}
-
-int visorbus_init(void)
-{
-       int err;
-
-       visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
-       bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
-       err = bus_register(&visorbus_type);
-       if (err < 0)
-               return err;
-       initialized = true;
-       bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
-       return 0;
-}
-
-void visorbus_exit(void)
-{
-       struct list_head *listentry, *listtmp;
-
-       remove_all_visor_devices();
-       list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
-               struct visor_device *dev;
-
-               dev = list_entry(listentry, struct visor_device, list_all);
-               visorbus_remove_instance(dev);
-       }
-       bus_unregister(&visorbus_type);
-       initialized = false;
-       debugfs_remove_recursive(visorbus_debugfs_dir);
-}
diff --git a/drivers/visorbus/visorbus_private.h b/drivers/visorbus/visorbus_private.h
deleted file mode 100644 (file)
index 6956de6..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#ifndef __VISORBUS_PRIVATE_H__
-#define __VISORBUS_PRIVATE_H__
-
-#include <linux/uuid.h>
-#include <linux/utsname.h>
-#include <linux/visorbus.h>
-
-#include "controlvmchannel.h"
-#include "vbuschannel.h"
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
-                                              struct visor_device *from);
-int visorbus_create_instance(struct visor_device *dev);
-void visorbus_remove_instance(struct visor_device *bus_info);
-int create_visor_device(struct visor_device *dev_info);
-void remove_visor_device(struct visor_device *dev_info);
-int visorchipset_device_pause(struct visor_device *dev_info);
-int visorchipset_device_resume(struct visor_device *dev_info);
-void visorbus_response(struct visor_device *p, int response, int controlvm_id);
-void visorbus_device_changestate_response(struct visor_device *p, int response,
-                                         struct visor_segment_state state);
-int visorbus_init(void);
-void visorbus_exit(void);
-
-/* visorchannel access functions */
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
-                                        const guid_t *guid, bool needs_lock);
-void visorchannel_destroy(struct visorchannel *channel);
-int visorchannel_read(struct visorchannel *channel, ulong offset,
-                     void *dest, ulong nbytes);
-int visorchannel_write(struct visorchannel *channel, ulong offset,
-                      void *dest, ulong nbytes);
-u64 visorchannel_get_physaddr(struct visorchannel *channel);
-ulong visorchannel_get_nbytes(struct visorchannel *channel);
-char *visorchannel_id(struct visorchannel *channel, char *s);
-char *visorchannel_zoneid(struct visorchannel *channel, char *s);
-u64 visorchannel_get_clientpartition(struct visorchannel *channel);
-int visorchannel_set_clientpartition(struct visorchannel *channel,
-                                    u64 partition_handle);
-char *visorchannel_guid_id(const guid_t *guid, char *s);
-void *visorchannel_get_header(struct visorchannel *channel);
-#endif
diff --git a/drivers/visorbus/visorchannel.c b/drivers/visorbus/visorchannel.c
deleted file mode 100644 (file)
index bd890e0..0000000
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- *  This provides s-Par channel communication primitives, which are
- *  independent of the mechanism used to access the channel data.
- */
-
-#include <linux/uuid.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/visorbus.h>
-
-#include "visorbus_private.h"
-#include "controlvmchannel.h"
-
-#define VISOR_DRV_NAME "visorchannel"
-
-#define VISOR_CONSOLEVIDEO_CHANNEL_GUID \
-       GUID_INIT(0x3cd6e705, 0xd6a2, 0x4aa5, \
-                 0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2)
-
-static const guid_t visor_video_guid = VISOR_CONSOLEVIDEO_CHANNEL_GUID;
-
-struct visorchannel {
-       u64 physaddr;
-       ulong nbytes;
-       void *mapped;
-       bool requested;
-       struct channel_header chan_hdr;
-       guid_t guid;
-       /*
-        * channel creator knows if more than one thread will be inserting or
-        * removing
-        */
-       bool needs_lock;
-       /* protect head writes in chan_hdr */
-       spinlock_t insert_lock;
-       /* protect tail writes in chan_hdr */
-       spinlock_t remove_lock;
-       guid_t type;
-       guid_t inst;
-};
-
-void visorchannel_destroy(struct visorchannel *channel)
-{
-       if (!channel)
-               return;
-
-       if (channel->mapped) {
-               memunmap(channel->mapped);
-               if (channel->requested)
-                       release_mem_region(channel->physaddr, channel->nbytes);
-       }
-       kfree(channel);
-}
-
-u64 visorchannel_get_physaddr(struct visorchannel *channel)
-{
-       return channel->physaddr;
-}
-
-ulong visorchannel_get_nbytes(struct visorchannel *channel)
-{
-       return channel->nbytes;
-}
-
-char *visorchannel_guid_id(const guid_t *guid, char *s)
-{
-       sprintf(s, "%pUL", guid);
-       return s;
-}
-
-char *visorchannel_id(struct visorchannel *channel, char *s)
-{
-       return visorchannel_guid_id(&channel->guid, s);
-}
-
-char *visorchannel_zoneid(struct visorchannel *channel, char *s)
-{
-       return visorchannel_guid_id(&channel->chan_hdr.zone_guid, s);
-}
-
-u64 visorchannel_get_clientpartition(struct visorchannel *channel)
-{
-       return channel->chan_hdr.partition_handle;
-}
-
-int visorchannel_set_clientpartition(struct visorchannel *channel,
-                                    u64 partition_handle)
-{
-       channel->chan_hdr.partition_handle = partition_handle;
-       return 0;
-}
-
-/**
- * visorchannel_get_guid() - queries the GUID of the designated channel
- * @channel: the channel to query
- *
- * Return: the GUID of the provided channel
- */
-const guid_t *visorchannel_get_guid(struct visorchannel *channel)
-{
-       return &channel->guid;
-}
-EXPORT_SYMBOL_GPL(visorchannel_get_guid);
-
-int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest,
-                     ulong nbytes)
-{
-       if (offset + nbytes > channel->nbytes)
-               return -EIO;
-
-       memcpy(dest, channel->mapped + offset, nbytes);
-       return 0;
-}
-
-int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest,
-                      ulong nbytes)
-{
-       size_t chdr_size = sizeof(struct channel_header);
-       size_t copy_size;
-
-       if (offset + nbytes > channel->nbytes)
-               return -EIO;
-
-       if (offset < chdr_size) {
-               copy_size = min(chdr_size - offset, nbytes);
-               memcpy(((char *)(&channel->chan_hdr)) + offset,
-                      dest, copy_size);
-       }
-       memcpy(channel->mapped + offset, dest, nbytes);
-       return 0;
-}
-
-void *visorchannel_get_header(struct visorchannel *channel)
-{
-       return &channel->chan_hdr;
-}
-
-/*
- * Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
- * channel header
- */
-static int sig_queue_offset(struct channel_header *chan_hdr, int q)
-{
-       return ((chan_hdr)->ch_space_offset +
-              ((q) * sizeof(struct signal_queue_header)));
-}
-
-/*
- * Return offset of a specific queue entry (data) from the beginning of a
- * channel header
- */
-static int sig_data_offset(struct channel_header *chan_hdr, int q,
-                          struct signal_queue_header *sig_hdr, int slot)
-{
-       return (sig_queue_offset(chan_hdr, q) + sig_hdr->sig_base_offset +
-              (slot * sig_hdr->signal_size));
-}
-
-/*
- * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back into
- * host memory
- */
-#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
-       visorchannel_write(channel, \
-                          sig_queue_offset(&channel->chan_hdr, queue) + \
-                          offsetof(struct signal_queue_header, FIELD), \
-                          &((sig_hdr)->FIELD), \
-                          sizeof((sig_hdr)->FIELD))
-
-static int sig_read_header(struct visorchannel *channel, u32 queue,
-                          struct signal_queue_header *sig_hdr)
-{
-       if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
-               return -EINVAL;
-
-       /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
-       return visorchannel_read(channel,
-                                sig_queue_offset(&channel->chan_hdr, queue),
-                                sig_hdr, sizeof(struct signal_queue_header));
-}
-
-static int sig_read_data(struct visorchannel *channel, u32 queue,
-                        struct signal_queue_header *sig_hdr, u32 slot,
-                        void *data)
-{
-       int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
-                                                sig_hdr, slot);
-
-       return visorchannel_read(channel, signal_data_offset,
-                                data, sig_hdr->signal_size);
-}
-
-static int sig_write_data(struct visorchannel *channel, u32 queue,
-                         struct signal_queue_header *sig_hdr, u32 slot,
-                         void *data)
-{
-       int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
-                                                sig_hdr, slot);
-
-       return visorchannel_write(channel, signal_data_offset,
-                                 data, sig_hdr->signal_size);
-}
-
-static int signalremove_inner(struct visorchannel *channel, u32 queue,
-                             void *msg)
-{
-       struct signal_queue_header sig_hdr;
-       int error;
-
-       error = sig_read_header(channel, queue, &sig_hdr);
-       if (error)
-               return error;
-       /* No signals to remove; have caller try again. */
-       if (sig_hdr.head == sig_hdr.tail)
-               return -EAGAIN;
-       sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
-       error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
-       if (error)
-               return error;
-       sig_hdr.num_received++;
-       /*
-        * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
-        * host memory. Required for channel sync.
-        */
-       mb();
-       error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
-       if (error)
-               return error;
-       error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
-       if (error)
-               return error;
-       return 0;
-}
-
-/**
- * visorchannel_signalremove() - removes a message from the designated
- *                               channel/queue
- * @channel: the channel the message will be removed from
- * @queue:   the queue the message will be removed from
- * @msg:     the message to remove
- *
- * Return: integer error code indicating the status of the removal
- */
-int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
-                             void *msg)
-{
-       int rc;
-       unsigned long flags;
-
-       if (channel->needs_lock) {
-               spin_lock_irqsave(&channel->remove_lock, flags);
-               rc = signalremove_inner(channel, queue, msg);
-               spin_unlock_irqrestore(&channel->remove_lock, flags);
-       } else {
-               rc = signalremove_inner(channel, queue, msg);
-       }
-
-       return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalremove);
-
-static bool queue_empty(struct visorchannel *channel, u32 queue)
-{
-       struct signal_queue_header sig_hdr;
-
-       if (sig_read_header(channel, queue, &sig_hdr))
-               return true;
-       return (sig_hdr.head == sig_hdr.tail);
-}
-
-/**
- * visorchannel_signalempty() - checks if the designated channel/queue contains
- *                             any messages
- * @channel: the channel to query
- * @queue:   the queue in the channel to query
- *
- * Return: boolean indicating whether any messages in the designated
- *         channel/queue are present
- */
-bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
-{
-       bool rc;
-       unsigned long flags;
-
-       if (!channel->needs_lock)
-               return queue_empty(channel, queue);
-       spin_lock_irqsave(&channel->remove_lock, flags);
-       rc = queue_empty(channel, queue);
-       spin_unlock_irqrestore(&channel->remove_lock, flags);
-       return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalempty);
-
-static int signalinsert_inner(struct visorchannel *channel, u32 queue,
-                             void *msg)
-{
-       struct signal_queue_header sig_hdr;
-       int err;
-
-       err = sig_read_header(channel, queue, &sig_hdr);
-       if (err)
-               return err;
-       sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
-       if (sig_hdr.head == sig_hdr.tail) {
-               sig_hdr.num_overflows++;
-               err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows);
-               if (err)
-                       return err;
-               return -EIO;
-       }
-       err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
-       if (err)
-               return err;
-       sig_hdr.num_sent++;
-       /*
-        * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
-        * host memory. Required for channel sync.
-        */
-       mb();
-       err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
-       if (err)
-               return err;
-       err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
-       if (err)
-               return err;
-       return 0;
-}
-
-/*
- * visorchannel_create() - creates the struct visorchannel abstraction for a
- *                         data area in memory, but does NOT modify this data
- *                         area
- * @physaddr:      physical address of start of channel
- * @gfp:           gfp_t to use when allocating memory for the data struct
- * @guid:          GUID that identifies channel type;
- * @needs_lock:    must specify true if you have multiple threads of execution
- *                 that will be calling visorchannel methods of this
- *                 visorchannel at the same time
- *
- * Return: pointer to visorchannel that was created if successful,
- *         otherwise NULL
- */
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
-                                        const guid_t *guid, bool needs_lock)
-{
-       struct visorchannel *channel;
-       int err;
-       size_t size = sizeof(struct channel_header);
-
-       if (physaddr == 0)
-               return NULL;
-
-       channel = kzalloc(sizeof(*channel), gfp);
-       if (!channel)
-               return NULL;
-       channel->needs_lock = needs_lock;
-       spin_lock_init(&channel->insert_lock);
-       spin_lock_init(&channel->remove_lock);
-       /*
-        * Video driver constains the efi framebuffer so it will get a conflict
-        * resource when requesting its full mem region. Since we are only
-        * using the efi framebuffer for video we can ignore this. Remember that
-        * we haven't requested it so we don't try to release later on.
-        */
-       channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
-       if (!channel->requested && !guid_equal(guid, &visor_video_guid))
-               /* we only care about errors if this is not the video channel */
-               goto err_destroy_channel;
-       channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
-       if (!channel->mapped) {
-               release_mem_region(physaddr, size);
-               goto err_destroy_channel;
-       }
-       channel->physaddr = physaddr;
-       channel->nbytes = size;
-       err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
-       if (err)
-               goto err_destroy_channel;
-       size = (ulong)channel->chan_hdr.size;
-       memunmap(channel->mapped);
-       if (channel->requested)
-               release_mem_region(channel->physaddr, channel->nbytes);
-       channel->mapped = NULL;
-       channel->requested = request_mem_region(channel->physaddr, size,
-                                               VISOR_DRV_NAME);
-       if (!channel->requested && !guid_equal(guid, &visor_video_guid))
-               /* we only care about errors if this is not the video channel */
-               goto err_destroy_channel;
-       channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
-       if (!channel->mapped) {
-               release_mem_region(channel->physaddr, size);
-               goto err_destroy_channel;
-       }
-       channel->nbytes = size;
-       guid_copy(&channel->guid, guid);
-       return channel;
-
-err_destroy_channel:
-       visorchannel_destroy(channel);
-       return NULL;
-}
-
-/**
- * visorchannel_signalinsert() - inserts a message into the designated
- *                               channel/queue
- * @channel: the channel the message will be added to
- * @queue:   the queue the message will be added to
- * @msg:     the message to insert
- *
- * Return: integer error code indicating the status of the insertion
- */
-int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
-                             void *msg)
-{
-       int rc;
-       unsigned long flags;
-
-       if (channel->needs_lock) {
-               spin_lock_irqsave(&channel->insert_lock, flags);
-               rc = signalinsert_inner(channel, queue, msg);
-               spin_unlock_irqrestore(&channel->insert_lock, flags);
-       } else {
-               rc = signalinsert_inner(channel, queue, msg);
-       }
-
-       return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
deleted file mode 100644 (file)
index 5668cad..0000000
+++ /dev/null
@@ -1,1691 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- */
-
-#include <linux/acpi.h>
-#include <linux/crash_dump.h>
-#include <linux/visorbus.h>
-
-#include "visorbus_private.h"
-
-/* {72120008-4AAB-11DC-8530-444553544200} */
-#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
-                                  0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
-
-static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
-static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
-static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
-
-#define POLLJIFFIES_CONTROLVM_FAST 1
-#define POLLJIFFIES_CONTROLVM_SLOW 100
-
-#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
-
-#define UNISYS_VISOR_LEAF_ID 0x40000000
-
-/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
-#define UNISYS_VISOR_ID_EBX 0x73696e55
-#define UNISYS_VISOR_ID_ECX 0x70537379
-#define UNISYS_VISOR_ID_EDX 0x34367261
-
-/*
- * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
- * to slow polling mode. As soon as we get a controlvm message, we switch back
- * to fast polling mode.
- */
-#define MIN_IDLE_SECONDS 10
-
-struct parser_context {
-       unsigned long allocbytes;
-       unsigned long param_bytes;
-       u8 *curr;
-       unsigned long bytes_remaining;
-       bool byte_stream;
-       struct visor_controlvm_parameters_header data;
-};
-
-/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
-#define VMCALL_CONTROLVM_ADDR 0x0501
-
-enum vmcall_result {
-       VMCALL_RESULT_SUCCESS = 0,
-       VMCALL_RESULT_INVALID_PARAM = 1,
-       VMCALL_RESULT_DATA_UNAVAILABLE = 2,
-       VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
-       VMCALL_RESULT_DEVICE_ERROR = 4,
-       VMCALL_RESULT_DEVICE_NOT_READY = 5
-};
-
-/*
- * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
- *                                         parameters to VMCALL_CONTROLVM_ADDR
- *                                         interface.
- * @address:      The Guest-relative physical address of the ControlVm channel.
- *                This VMCall fills this in with the appropriate address.
- *                Contents provided by this VMCALL (OUT).
- * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
- *                this in with the appropriate address. Contents provided by
- *                this VMCALL (OUT).
- * @unused:       Unused Bytes in the 64-Bit Aligned Struct.
- */
-struct vmcall_io_controlvm_addr_params {
-       u64 address;
-       u32 channel_bytes;
-       u8 unused[4];
-} __packed;
-
-struct visorchipset_device {
-       struct acpi_device *acpi_device;
-       unsigned long poll_jiffies;
-       /* when we got our last controlvm message */
-       unsigned long most_recent_message_jiffies;
-       struct delayed_work periodic_controlvm_work;
-       struct visorchannel *controlvm_channel;
-       unsigned long controlvm_payload_bytes_buffered;
-       /*
-        * The following variables are used to handle the scenario where we are
-        * unable to offload the payload from a controlvm message due to memory
-        * requirements. In this scenario, we simply stash the controlvm
-        * message, then attempt to process it again the next time
-        * controlvm_periodic_work() runs.
-        */
-       struct controlvm_message controlvm_pending_msg;
-       bool controlvm_pending_msg_valid;
-       struct vmcall_io_controlvm_addr_params controlvm_params;
-};
-
-static struct visorchipset_device *chipset_dev;
-
-struct parahotplug_request {
-       struct list_head list;
-       int id;
-       unsigned long expiration;
-       struct controlvm_message msg;
-};
-
-/* prototypes for attributes */
-static ssize_t toolaction_show(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       u8 tool_action = 0;
-       int err;
-
-       err = visorchannel_read(chipset_dev->controlvm_channel,
-                               offsetof(struct visor_controlvm_channel,
-                                        tool_action),
-                               &tool_action, sizeof(u8));
-       if (err)
-               return err;
-       return sprintf(buf, "%u\n", tool_action);
-}
-
-static ssize_t toolaction_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
-{
-       u8 tool_action;
-       int err;
-
-       if (kstrtou8(buf, 10, &tool_action))
-               return -EINVAL;
-       err = visorchannel_write(chipset_dev->controlvm_channel,
-                                offsetof(struct visor_controlvm_channel,
-                                         tool_action),
-                                &tool_action, sizeof(u8));
-       if (err)
-               return err;
-       return count;
-}
-static DEVICE_ATTR_RW(toolaction);
-
-static ssize_t boottotool_show(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       struct efi_visor_indication efi_visor_indication;
-       int err;
-
-       err = visorchannel_read(chipset_dev->controlvm_channel,
-                               offsetof(struct visor_controlvm_channel,
-                                        efi_visor_ind),
-                               &efi_visor_indication,
-                               sizeof(struct efi_visor_indication));
-       if (err)
-               return err;
-       return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
-}
-
-static ssize_t boottotool_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
-{
-       int val, err;
-       struct efi_visor_indication efi_visor_indication;
-
-       if (kstrtoint(buf, 10, &val))
-               return -EINVAL;
-       efi_visor_indication.boot_to_tool = val;
-       err = visorchannel_write(chipset_dev->controlvm_channel,
-                                offsetof(struct visor_controlvm_channel,
-                                         efi_visor_ind),
-                                &(efi_visor_indication),
-                                sizeof(struct efi_visor_indication));
-       if (err)
-               return err;
-       return count;
-}
-static DEVICE_ATTR_RW(boottotool);
-
-static ssize_t error_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       u32 error = 0;
-       int err;
-
-       err = visorchannel_read(chipset_dev->controlvm_channel,
-                               offsetof(struct visor_controlvm_channel,
-                                        installation_error),
-                               &error, sizeof(u32));
-       if (err)
-               return err;
-       return sprintf(buf, "%u\n", error);
-}
-
-static ssize_t error_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       u32 error;
-       int err;
-
-       if (kstrtou32(buf, 10, &error))
-               return -EINVAL;
-       err = visorchannel_write(chipset_dev->controlvm_channel,
-                                offsetof(struct visor_controlvm_channel,
-                                         installation_error),
-                                &error, sizeof(u32));
-       if (err)
-               return err;
-       return count;
-}
-static DEVICE_ATTR_RW(error);
-
-static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
-                          char *buf)
-{
-       u32 text_id = 0;
-       int err;
-
-       err = visorchannel_read(chipset_dev->controlvm_channel,
-                               offsetof(struct visor_controlvm_channel,
-                                        installation_text_id),
-                               &text_id, sizeof(u32));
-       if (err)
-               return err;
-       return sprintf(buf, "%u\n", text_id);
-}
-
-static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
-                           const char *buf, size_t count)
-{
-       u32 text_id;
-       int err;
-
-       if (kstrtou32(buf, 10, &text_id))
-               return -EINVAL;
-       err = visorchannel_write(chipset_dev->controlvm_channel,
-                                offsetof(struct visor_controlvm_channel,
-                                         installation_text_id),
-                                &text_id, sizeof(u32));
-       if (err)
-               return err;
-       return count;
-}
-static DEVICE_ATTR_RW(textid);
-
-static ssize_t remaining_steps_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       u16 remaining_steps = 0;
-       int err;
-
-       err = visorchannel_read(chipset_dev->controlvm_channel,
-                               offsetof(struct visor_controlvm_channel,
-                                        installation_remaining_steps),
-                               &remaining_steps, sizeof(u16));
-       if (err)
-               return err;
-       return sprintf(buf, "%hu\n", remaining_steps);
-}
-
-static ssize_t remaining_steps_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       u16 remaining_steps;
-       int err;
-
-       if (kstrtou16(buf, 10, &remaining_steps))
-               return -EINVAL;
-       err = visorchannel_write(chipset_dev->controlvm_channel,
-                                offsetof(struct visor_controlvm_channel,
-                                         installation_remaining_steps),
-                                &remaining_steps, sizeof(u16));
-       if (err)
-               return err;
-       return count;
-}
-static DEVICE_ATTR_RW(remaining_steps);
-
-static void controlvm_init_response(struct controlvm_message *msg,
-                                   struct controlvm_message_header *msg_hdr,
-                                   int response)
-{
-       memset(msg, 0, sizeof(struct controlvm_message));
-       memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
-       msg->hdr.payload_bytes = 0;
-       msg->hdr.payload_vm_offset = 0;
-       msg->hdr.payload_max_bytes = 0;
-       if (response < 0) {
-               msg->hdr.flags.failed = 1;
-               msg->hdr.completion_status = (u32)(-response);
-       }
-}
-
-static int controlvm_respond_chipset_init(
-                               struct controlvm_message_header *msg_hdr,
-                               int response,
-                               enum visor_chipset_feature features)
-{
-       struct controlvm_message outmsg;
-
-       controlvm_init_response(&outmsg, msg_hdr, response);
-       outmsg.cmd.init_chipset.features = features;
-       return visorchannel_signalinsert(chipset_dev->controlvm_channel,
-                                        CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int chipset_init(struct controlvm_message *inmsg)
-{
-       static int chipset_inited;
-       enum visor_chipset_feature features = 0;
-       int rc = CONTROLVM_RESP_SUCCESS;
-       int res = 0;
-
-       if (chipset_inited) {
-               rc = -CONTROLVM_RESP_ALREADY_DONE;
-               res = -EIO;
-               goto out_respond;
-       }
-       chipset_inited = 1;
-       /*
-        * Set features to indicate we support parahotplug (if Command also
-        * supports it). Set the "reply" bit so Command knows this is a
-        * features-aware driver.
-        */
-       features = inmsg->cmd.init_chipset.features &
-                  VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
-       features |= VISOR_CHIPSET_FEATURE_REPLY;
-
-out_respond:
-       if (inmsg->hdr.flags.response_expected)
-               res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
-
-       return res;
-}
-
-static int controlvm_respond(struct controlvm_message_header *msg_hdr,
-                            int response, struct visor_segment_state *state)
-{
-       struct controlvm_message outmsg;
-
-       controlvm_init_response(&outmsg, msg_hdr, response);
-       if (outmsg.hdr.flags.test_message == 1)
-               return -EINVAL;
-       if (state) {
-               outmsg.cmd.device_change_state.state = *state;
-               outmsg.cmd.device_change_state.flags.phys_device = 1;
-       }
-       return visorchannel_signalinsert(chipset_dev->controlvm_channel,
-                                        CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-enum crash_obj_type {
-       CRASH_DEV,
-       CRASH_BUS,
-};
-
-static int save_crash_message(struct controlvm_message *msg,
-                             enum crash_obj_type cr_type)
-{
-       u32 local_crash_msg_offset;
-       u16 local_crash_msg_count;
-       int err;
-
-       err = visorchannel_read(chipset_dev->controlvm_channel,
-                               offsetof(struct visor_controlvm_channel,
-                                        saved_crash_message_count),
-                               &local_crash_msg_count, sizeof(u16));
-       if (err) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to read message count\n");
-               return err;
-       }
-       if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "invalid number of messages\n");
-               return -EIO;
-       }
-       err = visorchannel_read(chipset_dev->controlvm_channel,
-                               offsetof(struct visor_controlvm_channel,
-                                        saved_crash_message_offset),
-                               &local_crash_msg_offset, sizeof(u32));
-       if (err) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to read offset\n");
-               return err;
-       }
-       switch (cr_type) {
-       case CRASH_DEV:
-               local_crash_msg_offset += sizeof(struct controlvm_message);
-               err = visorchannel_write(chipset_dev->controlvm_channel,
-                                        local_crash_msg_offset, msg,
-                                        sizeof(struct controlvm_message));
-               if (err) {
-                       dev_err(&chipset_dev->acpi_device->dev,
-                               "failed to write dev msg\n");
-                       return err;
-               }
-               break;
-       case CRASH_BUS:
-               err = visorchannel_write(chipset_dev->controlvm_channel,
-                                        local_crash_msg_offset, msg,
-                                        sizeof(struct controlvm_message));
-               if (err) {
-                       dev_err(&chipset_dev->acpi_device->dev,
-                               "failed to write bus msg\n");
-                       return err;
-               }
-               break;
-       default:
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "Invalid crash_obj_type\n");
-               break;
-       }
-       return 0;
-}
-
-static int controlvm_responder(enum controlvm_id cmd_id,
-                              struct controlvm_message_header *pending_msg_hdr,
-                              int response)
-{
-       if (pending_msg_hdr->id != (u32)cmd_id)
-               return -EINVAL;
-
-       return controlvm_respond(pending_msg_hdr, response, NULL);
-}
-
-static int device_changestate_responder(enum controlvm_id cmd_id,
-                                       struct visor_device *p, int response,
-                                       struct visor_segment_state state)
-{
-       struct controlvm_message outmsg;
-
-       if (p->pending_msg_hdr->id != cmd_id)
-               return -EINVAL;
-
-       controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
-       outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
-       outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
-       outmsg.cmd.device_change_state.state = state;
-       return visorchannel_signalinsert(chipset_dev->controlvm_channel,
-                                        CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int visorbus_create(struct controlvm_message *inmsg)
-{
-       struct controlvm_message_packet *cmd = &inmsg->cmd;
-       struct controlvm_message_header *pmsg_hdr;
-       u32 bus_no = cmd->create_bus.bus_no;
-       struct visor_device *bus_info;
-       struct visorchannel *visorchannel;
-       int err;
-
-       bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
-       if (bus_info && bus_info->state.created == 1) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed %s: already exists\n", __func__);
-               err = -EEXIST;
-               goto err_respond;
-       }
-       bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
-       if (!bus_info) {
-               err = -ENOMEM;
-               goto err_respond;
-       }
-       INIT_LIST_HEAD(&bus_info->list_all);
-       bus_info->chipset_bus_no = bus_no;
-       bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
-       if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
-               err = save_crash_message(inmsg, CRASH_BUS);
-               if (err)
-                       goto err_free_bus_info;
-       }
-       if (inmsg->hdr.flags.response_expected == 1) {
-               pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
-               if (!pmsg_hdr) {
-                       err = -ENOMEM;
-                       goto err_free_bus_info;
-               }
-               memcpy(pmsg_hdr, &inmsg->hdr,
-                      sizeof(struct controlvm_message_header));
-               bus_info->pending_msg_hdr = pmsg_hdr;
-       }
-       visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
-                                          GFP_KERNEL,
-                                          &cmd->create_bus.bus_data_type_guid,
-                                          false);
-       if (!visorchannel) {
-               err = -ENOMEM;
-               goto err_free_pending_msg;
-       }
-       bus_info->visorchannel = visorchannel;
-       /* Response will be handled by visorbus_create_instance on success */
-       err = visorbus_create_instance(bus_info);
-       if (err)
-               goto err_destroy_channel;
-       return 0;
-
-err_destroy_channel:
-       visorchannel_destroy(visorchannel);
-
-err_free_pending_msg:
-       kfree(bus_info->pending_msg_hdr);
-
-err_free_bus_info:
-       kfree(bus_info);
-
-err_respond:
-       if (inmsg->hdr.flags.response_expected == 1)
-               controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
-       return err;
-}
-
-static int visorbus_destroy(struct controlvm_message *inmsg)
-{
-       struct controlvm_message_header *pmsg_hdr;
-       u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
-       struct visor_device *bus_info;
-       int err;
-
-       bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
-       if (!bus_info) {
-               err = -ENODEV;
-               goto err_respond;
-       }
-       if (bus_info->state.created == 0) {
-               err = -ENOENT;
-               goto err_respond;
-       }
-       if (bus_info->pending_msg_hdr) {
-               /* only non-NULL if dev is still waiting on a response */
-               err = -EEXIST;
-               goto err_respond;
-       }
-       if (inmsg->hdr.flags.response_expected == 1) {
-               pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
-               if (!pmsg_hdr) {
-                       err = -ENOMEM;
-                       goto err_respond;
-               }
-               memcpy(pmsg_hdr, &inmsg->hdr,
-                      sizeof(struct controlvm_message_header));
-               bus_info->pending_msg_hdr = pmsg_hdr;
-       }
-       /* Response will be handled by visorbus_remove_instance */
-       visorbus_remove_instance(bus_info);
-       return 0;
-
-err_respond:
-       if (inmsg->hdr.flags.response_expected == 1)
-               controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
-       return err;
-}
-
-static const guid_t *parser_id_get(struct parser_context *ctx)
-{
-       return &ctx->data.id;
-}
-
-static void *parser_string_get(u8 *pscan, int nscan)
-{
-       int value_length;
-       void *value;
-
-       if (nscan == 0)
-               return NULL;
-
-       value_length = strnlen(pscan, nscan);
-       value = kzalloc(value_length + 1, GFP_KERNEL);
-       if (!value)
-               return NULL;
-       if (value_length > 0)
-               memcpy(value, pscan, value_length);
-       return value;
-}
-
-static void *parser_name_get(struct parser_context *ctx)
-{
-       struct visor_controlvm_parameters_header *phdr;
-
-       phdr = &ctx->data;
-       if ((unsigned long)phdr->name_offset +
-           (unsigned long)phdr->name_length > ctx->param_bytes)
-               return NULL;
-       ctx->curr = (char *)&phdr + phdr->name_offset;
-       ctx->bytes_remaining = phdr->name_length;
-       return parser_string_get(ctx->curr, phdr->name_length);
-}
-
-static int visorbus_configure(struct controlvm_message *inmsg,
-                             struct parser_context *parser_ctx)
-{
-       struct controlvm_message_packet *cmd = &inmsg->cmd;
-       u32 bus_no;
-       struct visor_device *bus_info;
-       int err = 0;
-
-       bus_no = cmd->configure_bus.bus_no;
-       bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
-       if (!bus_info) {
-               err = -EINVAL;
-               goto err_respond;
-       }
-       if (bus_info->state.created == 0) {
-               err = -EINVAL;
-               goto err_respond;
-       }
-       if (bus_info->pending_msg_hdr) {
-               err = -EIO;
-               goto err_respond;
-       }
-       err = visorchannel_set_clientpartition(bus_info->visorchannel,
-                                              cmd->configure_bus.guest_handle);
-       if (err)
-               goto err_respond;
-       if (parser_ctx) {
-               const guid_t *partition_guid = parser_id_get(parser_ctx);
-
-               guid_copy(&bus_info->partition_guid, partition_guid);
-               bus_info->name = parser_name_get(parser_ctx);
-       }
-       if (inmsg->hdr.flags.response_expected == 1)
-               controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
-       return 0;
-
-err_respond:
-       dev_err(&chipset_dev->acpi_device->dev,
-               "%s exited with err: %d\n", __func__, err);
-       if (inmsg->hdr.flags.response_expected == 1)
-               controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
-       return err;
-}
-
-static int visorbus_device_create(struct controlvm_message *inmsg)
-{
-       struct controlvm_message_packet *cmd = &inmsg->cmd;
-       struct controlvm_message_header *pmsg_hdr;
-       u32 bus_no = cmd->create_device.bus_no;
-       u32 dev_no = cmd->create_device.dev_no;
-       struct visor_device *dev_info;
-       struct visor_device *bus_info;
-       struct visorchannel *visorchannel;
-       int err;
-
-       bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
-       if (!bus_info) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to get bus by id: %d\n", bus_no);
-               err = -ENODEV;
-               goto err_respond;
-       }
-       if (bus_info->state.created == 0) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "bus not created, id: %d\n", bus_no);
-               err = -EINVAL;
-               goto err_respond;
-       }
-       dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
-       if (dev_info && dev_info->state.created == 1) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to get bus by id: %d/%d\n", bus_no, dev_no);
-               err = -EEXIST;
-               goto err_respond;
-       }
-
-       dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
-       if (!dev_info) {
-               err = -ENOMEM;
-               goto err_respond;
-       }
-       dev_info->chipset_bus_no = bus_no;
-       dev_info->chipset_dev_no = dev_no;
-       guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
-       dev_info->device.parent = &bus_info->device;
-       visorchannel = visorchannel_create(cmd->create_device.channel_addr,
-                                          GFP_KERNEL,
-                                          &cmd->create_device.data_type_guid,
-                                          true);
-       if (!visorchannel) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to create visorchannel: %d/%d\n",
-                       bus_no, dev_no);
-               err = -ENOMEM;
-               goto err_free_dev_info;
-       }
-       dev_info->visorchannel = visorchannel;
-       guid_copy(&dev_info->channel_type_guid,
-                 &cmd->create_device.data_type_guid);
-       if (guid_equal(&cmd->create_device.data_type_guid,
-                      &visor_vhba_channel_guid)) {
-               err = save_crash_message(inmsg, CRASH_DEV);
-               if (err)
-                       goto err_destroy_visorchannel;
-       }
-       if (inmsg->hdr.flags.response_expected == 1) {
-               pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
-               if (!pmsg_hdr) {
-                       err = -ENOMEM;
-                       goto err_destroy_visorchannel;
-               }
-               memcpy(pmsg_hdr, &inmsg->hdr,
-                      sizeof(struct controlvm_message_header));
-               dev_info->pending_msg_hdr = pmsg_hdr;
-       }
-       /* create_visor_device will send response */
-       err = create_visor_device(dev_info);
-       if (err)
-               goto err_destroy_visorchannel;
-
-       return 0;
-
-err_destroy_visorchannel:
-       visorchannel_destroy(visorchannel);
-
-err_free_dev_info:
-       kfree(dev_info);
-
-err_respond:
-       if (inmsg->hdr.flags.response_expected == 1)
-               controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
-       return err;
-}
-
-static int visorbus_device_changestate(struct controlvm_message *inmsg)
-{
-       struct controlvm_message_packet *cmd = &inmsg->cmd;
-       struct controlvm_message_header *pmsg_hdr;
-       u32 bus_no = cmd->device_change_state.bus_no;
-       u32 dev_no = cmd->device_change_state.dev_no;
-       struct visor_segment_state state = cmd->device_change_state.state;
-       struct visor_device *dev_info;
-       int err = 0;
-
-       dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
-       if (!dev_info) {
-               err = -ENODEV;
-               goto err_respond;
-       }
-       if (dev_info->state.created == 0) {
-               err = -EINVAL;
-               goto err_respond;
-       }
-       if (dev_info->pending_msg_hdr) {
-               /* only non-NULL if dev is still waiting on a response */
-               err = -EIO;
-               goto err_respond;
-       }
-
-       if (inmsg->hdr.flags.response_expected == 1) {
-               pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
-               if (!pmsg_hdr) {
-                       err = -ENOMEM;
-                       goto err_respond;
-               }
-               memcpy(pmsg_hdr, &inmsg->hdr,
-                      sizeof(struct controlvm_message_header));
-               dev_info->pending_msg_hdr = pmsg_hdr;
-       }
-       if (state.alive == segment_state_running.alive &&
-           state.operating == segment_state_running.operating)
-               /* Response will be sent from visorchipset_device_resume */
-               err = visorchipset_device_resume(dev_info);
-       /* ServerNotReady / ServerLost / SegmentStateStandby */
-       else if (state.alive == segment_state_standby.alive &&
-                state.operating == segment_state_standby.operating)
-               /*
-                * technically this is standby case where server is lost.
-                * Response will be sent from visorchipset_device_pause.
-                */
-               err = visorchipset_device_pause(dev_info);
-       if (err)
-               goto err_respond;
-       return 0;
-
-err_respond:
-       dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
-       if (inmsg->hdr.flags.response_expected == 1)
-               controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
-       return err;
-}
-
-static int visorbus_device_destroy(struct controlvm_message *inmsg)
-{
-       struct controlvm_message_packet *cmd = &inmsg->cmd;
-       struct controlvm_message_header *pmsg_hdr;
-       u32 bus_no = cmd->destroy_device.bus_no;
-       u32 dev_no = cmd->destroy_device.dev_no;
-       struct visor_device *dev_info;
-       int err;
-
-       dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
-       if (!dev_info) {
-               err = -ENODEV;
-               goto err_respond;
-       }
-       if (dev_info->state.created == 0) {
-               err = -EINVAL;
-               goto err_respond;
-       }
-       if (dev_info->pending_msg_hdr) {
-               /* only non-NULL if dev is still waiting on a response */
-               err = -EIO;
-               goto err_respond;
-       }
-       if (inmsg->hdr.flags.response_expected == 1) {
-               pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
-               if (!pmsg_hdr) {
-                       err = -ENOMEM;
-                       goto err_respond;
-               }
-
-               memcpy(pmsg_hdr, &inmsg->hdr,
-                      sizeof(struct controlvm_message_header));
-               dev_info->pending_msg_hdr = pmsg_hdr;
-       }
-       kfree(dev_info->name);
-       remove_visor_device(dev_info);
-       return 0;
-
-err_respond:
-       if (inmsg->hdr.flags.response_expected == 1)
-               controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
-       return err;
-}
-
-/*
- * The general parahotplug flow works as follows. The visorchipset receives
- * a DEVICE_CHANGESTATE message from Command specifying a physical device
- * to enable or disable. The CONTROLVM message handler calls
- * parahotplug_process_message, which then adds the message to a global list
- * and kicks off a udev event which causes a user level script to enable or
- * disable the specified device. The udev script then writes to
- * /sys/devices/platform/visorchipset/parahotplug, which causes the
- * parahotplug store functions to get called, at which point the
- * appropriate CONTROLVM message is retrieved from the list and responded to.
- */
-
-#define PARAHOTPLUG_TIMEOUT_MS 2000
-
-/*
- * parahotplug_next_id() - generate unique int to match an outstanding
- *                         CONTROLVM message with a udev script /sys
- *                         response
- *
- * Return: a unique integer value
- */
-static int parahotplug_next_id(void)
-{
-       static atomic_t id = ATOMIC_INIT(0);
-
-       return atomic_inc_return(&id);
-}
-
-/*
- * parahotplug_next_expiration() - returns the time (in jiffies) when a
- *                                 CONTROLVM message on the list should expire
- *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
- *
- * Return: expected expiration time (in jiffies)
- */
-static unsigned long parahotplug_next_expiration(void)
-{
-       return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
-}
-
-/*
- * parahotplug_request_create() - create a parahotplug_request, which is
- *                                basically a wrapper for a CONTROLVM_MESSAGE
- *                                that we can stick on a list
- * @msg: the message to insert in the request
- *
- * Return: the request containing the provided message
- */
-static struct parahotplug_request *parahotplug_request_create(
-                                               struct controlvm_message *msg)
-{
-       struct parahotplug_request *req;
-
-       req = kmalloc(sizeof(*req), GFP_KERNEL);
-       if (!req)
-               return NULL;
-       req->id = parahotplug_next_id();
-       req->expiration = parahotplug_next_expiration();
-       req->msg = *msg;
-       return req;
-}
-
-/*
- * parahotplug_request_destroy() - free a parahotplug_request
- * @req: the request to deallocate
- */
-static void parahotplug_request_destroy(struct parahotplug_request *req)
-{
-       kfree(req);
-}
-
-static LIST_HEAD(parahotplug_request_list);
-/* lock for above */
-static DEFINE_SPINLOCK(parahotplug_request_list_lock);
-
-/*
- * parahotplug_request_complete() - mark request as complete
- * @id:     the id of the request
- * @active: indicates whether the request is assigned to active partition
- *
- * Called from the /sys handler, which means the user script has
- * finished the enable/disable. Find the matching identifier, and
- * respond to the CONTROLVM message with success.
- *
- * Return: 0 on success or -EINVAL on failure
- */
-static int parahotplug_request_complete(int id, u16 active)
-{
-       struct list_head *pos;
-       struct list_head *tmp;
-       struct parahotplug_request *req;
-
-       spin_lock(&parahotplug_request_list_lock);
-       /* Look for a request matching "id". */
-       list_for_each_safe(pos, tmp, &parahotplug_request_list) {
-               req = list_entry(pos, struct parahotplug_request, list);
-               if (req->id == id) {
-                       /*
-                        * Found a match. Remove it from the list and
-                        * respond.
-                        */
-                       list_del(pos);
-                       spin_unlock(&parahotplug_request_list_lock);
-                       req->msg.cmd.device_change_state.state.active = active;
-                       if (req->msg.hdr.flags.response_expected)
-                               controlvm_respond(
-                                      &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
-                                      &req->msg.cmd.device_change_state.state);
-                       parahotplug_request_destroy(req);
-                       return 0;
-               }
-       }
-       spin_unlock(&parahotplug_request_list_lock);
-       return -EINVAL;
-}
-
-/*
- * devicedisabled_store() - disables the hotplug device
- * @dev:   sysfs interface variable not utilized in this function
- * @attr:  sysfs interface variable not utilized in this function
- * @buf:   buffer containing the device id
- * @count: the size of the buffer
- *
- * The parahotplug/devicedisabled interface gets called by our support script
- * when an SR-IOV device has been shut down. The ID is passed to the script
- * and then passed back when the device has been removed.
- *
- * Return: the size of the buffer for success or negative for error
- */
-static ssize_t devicedisabled_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t count)
-{
-       unsigned int id;
-       int err;
-
-       if (kstrtouint(buf, 10, &id))
-               return -EINVAL;
-       err = parahotplug_request_complete(id, 0);
-       if (err < 0)
-               return err;
-       return count;
-}
-static DEVICE_ATTR_WO(devicedisabled);
-
-/*
- * deviceenabled_store() - enables the hotplug device
- * @dev:   sysfs interface variable not utilized in this function
- * @attr:  sysfs interface variable not utilized in this function
- * @buf:   buffer containing the device id
- * @count: the size of the buffer
- *
- * The parahotplug/deviceenabled interface gets called by our support script
- * when an SR-IOV device has been recovered. The ID is passed to the script
- * and then passed back when the device has been brought back up.
- *
- * Return: the size of the buffer for success or negative for error
- */
-static ssize_t deviceenabled_store(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t count)
-{
-       unsigned int id;
-
-       if (kstrtouint(buf, 10, &id))
-               return -EINVAL;
-       parahotplug_request_complete(id, 1);
-       return count;
-}
-static DEVICE_ATTR_WO(deviceenabled);
-
-static struct attribute *visorchipset_install_attrs[] = {
-       &dev_attr_toolaction.attr,
-       &dev_attr_boottotool.attr,
-       &dev_attr_error.attr,
-       &dev_attr_textid.attr,
-       &dev_attr_remaining_steps.attr,
-       NULL
-};
-
-static const struct attribute_group visorchipset_install_group = {
-       .name = "install",
-       .attrs = visorchipset_install_attrs
-};
-
-static struct attribute *visorchipset_parahotplug_attrs[] = {
-       &dev_attr_devicedisabled.attr,
-       &dev_attr_deviceenabled.attr,
-       NULL
-};
-
-static const struct attribute_group visorchipset_parahotplug_group = {
-       .name = "parahotplug",
-       .attrs = visorchipset_parahotplug_attrs
-};
-
-static const struct attribute_group *visorchipset_dev_groups[] = {
-       &visorchipset_install_group,
-       &visorchipset_parahotplug_group,
-       NULL
-};
-
-/*
- * parahotplug_request_kickoff() - initiate parahotplug request
- * @req: the request to initiate
- *
- * Cause uevent to run the user level script to do the disable/enable specified
- * in the parahotplug_request.
- */
-static int parahotplug_request_kickoff(struct parahotplug_request *req)
-{
-       struct controlvm_message_packet *cmd = &req->msg.cmd;
-       char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
-            env_func[40];
-       char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
-                        env_func, NULL
-       };
-
-       sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
-       sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
-       sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
-               cmd->device_change_state.state.active);
-       sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
-               cmd->device_change_state.bus_no);
-       sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
-               cmd->device_change_state.dev_no >> 3);
-       sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
-               cmd->device_change_state.dev_no & 0x7);
-       return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
-                                 KOBJ_CHANGE, envp);
-}
-
-/*
- * parahotplug_process_message() - enables or disables a PCI device by kicking
- *                                 off a udev script
- * @inmsg: the message indicating whether to enable or disable
- */
-static int parahotplug_process_message(struct controlvm_message *inmsg)
-{
-       struct parahotplug_request *req;
-       int err;
-
-       req = parahotplug_request_create(inmsg);
-       if (!req)
-               return -ENOMEM;
-       /*
-        * For enable messages, just respond with success right away, we don't
-        * need to wait to see if the enable was successful.
-        */
-       if (inmsg->cmd.device_change_state.state.active) {
-               err = parahotplug_request_kickoff(req);
-               if (err)
-                       goto err_respond;
-               controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
-                                 &inmsg->cmd.device_change_state.state);
-               parahotplug_request_destroy(req);
-               return 0;
-       }
-       /*
-        * For disable messages, add the request to the request list before
-        * kicking off the udev script. It won't get responded to until the
-        * script has indicated it's done.
-        */
-       spin_lock(&parahotplug_request_list_lock);
-       list_add_tail(&req->list, &parahotplug_request_list);
-       spin_unlock(&parahotplug_request_list_lock);
-       err = parahotplug_request_kickoff(req);
-       if (err)
-               goto err_respond;
-       return 0;
-
-err_respond:
-       controlvm_respond(&inmsg->hdr, err,
-                         &inmsg->cmd.device_change_state.state);
-       return err;
-}
-
-/*
- * chipset_ready_uevent() - sends chipset_ready action
- *
- * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
-{
-       int res;
-
-       res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
-       if (msg_hdr->flags.response_expected)
-               controlvm_respond(msg_hdr, res, NULL);
-       return res;
-}
-
-/*
- * chipset_selftest_uevent() - sends chipset_selftest action
- *
- * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
-{
-       char env_selftest[20];
-       char *envp[] = { env_selftest, NULL };
-       int res;
-
-       sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
-       res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
-                                KOBJ_CHANGE, envp);
-       if (msg_hdr->flags.response_expected)
-               controlvm_respond(msg_hdr, res, NULL);
-       return res;
-}
-
-/*
- * chipset_notready_uevent() - sends chipset_notready action
- *
- * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
- *
- * Return: 0 on success, negative on failure
- */
-static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
-{
-       int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
-                                KOBJ_OFFLINE);
-
-       if (msg_hdr->flags.response_expected)
-               controlvm_respond(msg_hdr, res, NULL);
-       return res;
-}
-
-static int unisys_vmcall(unsigned long tuple, unsigned long param)
-{
-       int result = 0;
-       unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
-       unsigned long reg_ebx;
-       unsigned long reg_ecx;
-
-       reg_ebx = param & 0xFFFFFFFF;
-       reg_ecx = param >> 32;
-       cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
-       if (!(cpuid_ecx & 0x80000000))
-               return -EPERM;
-       __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
-                            "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
-       if (result)
-               goto error;
-       return 0;
-
-/* Need to convert from VMCALL error codes to Linux */
-error:
-       switch (result) {
-       case VMCALL_RESULT_INVALID_PARAM:
-               return -EINVAL;
-       case VMCALL_RESULT_DATA_UNAVAILABLE:
-               return -ENODEV;
-       default:
-               return -EFAULT;
-       }
-}
-
-static int controlvm_channel_create(struct visorchipset_device *dev)
-{
-       struct visorchannel *chan;
-       u64 addr;
-       int err;
-
-       err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
-                           virt_to_phys(&dev->controlvm_params));
-       if (err)
-               return err;
-       addr = dev->controlvm_params.address;
-       chan = visorchannel_create(addr, GFP_KERNEL,
-                                  &visor_controlvm_channel_guid, true);
-       if (!chan)
-               return -ENOMEM;
-       dev->controlvm_channel = chan;
-       return 0;
-}
-
-static void setup_crash_devices_work_queue(struct work_struct *work)
-{
-       struct controlvm_message local_crash_bus_msg;
-       struct controlvm_message local_crash_dev_msg;
-       struct controlvm_message msg = {
-               .hdr.id = CONTROLVM_CHIPSET_INIT,
-               .cmd.init_chipset = {
-                       .bus_count = 23,
-                       .switch_count = 0,
-               },
-       };
-       u32 local_crash_msg_offset;
-       u16 local_crash_msg_count;
-
-       /* send init chipset msg */
-       chipset_init(&msg);
-       /* get saved message count */
-       if (visorchannel_read(chipset_dev->controlvm_channel,
-                             offsetof(struct visor_controlvm_channel,
-                                      saved_crash_message_count),
-                             &local_crash_msg_count, sizeof(u16)) < 0) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to read channel\n");
-               return;
-       }
-       if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
-               dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
-               return;
-       }
-       /* get saved crash message offset */
-       if (visorchannel_read(chipset_dev->controlvm_channel,
-                             offsetof(struct visor_controlvm_channel,
-                                      saved_crash_message_offset),
-                             &local_crash_msg_offset, sizeof(u32)) < 0) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to read channel\n");
-               return;
-       }
-       /* read create device message for storage bus offset */
-       if (visorchannel_read(chipset_dev->controlvm_channel,
-                             local_crash_msg_offset,
-                             &local_crash_bus_msg,
-                             sizeof(struct controlvm_message)) < 0) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to read channel\n");
-               return;
-       }
-       /* read create device message for storage device */
-       if (visorchannel_read(chipset_dev->controlvm_channel,
-                             local_crash_msg_offset +
-                             sizeof(struct controlvm_message),
-                             &local_crash_dev_msg,
-                             sizeof(struct controlvm_message)) < 0) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "failed to read channel\n");
-               return;
-       }
-       /* reuse IOVM create bus message */
-       if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "no valid create_bus message\n");
-               return;
-       }
-       visorbus_create(&local_crash_bus_msg);
-       /* reuse create device message for storage device */
-       if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
-               dev_err(&chipset_dev->acpi_device->dev,
-                       "no valid create_device message\n");
-               return;
-       }
-       visorbus_device_create(&local_crash_dev_msg);
-}
-
-void visorbus_response(struct visor_device *bus_info, int response,
-                      int controlvm_id)
-{
-       if (!bus_info->pending_msg_hdr)
-               return;
-
-       controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
-       kfree(bus_info->pending_msg_hdr);
-       bus_info->pending_msg_hdr = NULL;
-}
-
-void visorbus_device_changestate_response(struct visor_device *dev_info,
-                                         int response,
-                                         struct visor_segment_state state)
-{
-       if (!dev_info->pending_msg_hdr)
-               return;
-
-       device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
-                                    response, state);
-       kfree(dev_info->pending_msg_hdr);
-       dev_info->pending_msg_hdr = NULL;
-}
-
-static void parser_done(struct parser_context *ctx)
-{
-       chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
-       kfree(ctx);
-}
-
-static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
-                                                bool *retry)
-{
-       unsigned long allocbytes;
-       struct parser_context *ctx;
-       void *mapping;
-
-       *retry = false;
-       /* alloc an extra byte to ensure payload is \0 terminated */
-       allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
-                    sizeof(struct visor_controlvm_parameters_header));
-       if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
-            MAX_CONTROLVM_PAYLOAD_BYTES) {
-               *retry = true;
-               return NULL;
-       }
-       ctx = kzalloc(allocbytes, GFP_KERNEL);
-       if (!ctx) {
-               *retry = true;
-               return NULL;
-       }
-       ctx->allocbytes = allocbytes;
-       ctx->param_bytes = bytes;
-       mapping = memremap(addr, bytes, MEMREMAP_WB);
-       if (!mapping)
-               goto err_finish_ctx;
-       memcpy(&ctx->data, mapping, bytes);
-       memunmap(mapping);
-       ctx->byte_stream = true;
-       chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
-       return ctx;
-
-err_finish_ctx:
-       kfree(ctx);
-       return NULL;
-}
-
-/*
- * handle_command() - process a controlvm message
- * @inmsg:        the message to process
- * @channel_addr: address of the controlvm channel
- *
- * Return:
- *     0       - Successfully processed the message
- *     -EAGAIN - ControlVM message was not processed and should be retried
- *               reading the next controlvm message; a scenario where this can
- *               occur is when we need to throttle the allocation of memory in
- *               which to copy out controlvm payload data.
- *     < 0     - error: ControlVM message was processed but an error occurred.
- */
-static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
-{
-       struct controlvm_message_packet *cmd = &inmsg.cmd;
-       u64 parm_addr;
-       u32 parm_bytes;
-       struct parser_context *parser_ctx = NULL;
-       struct controlvm_message ackmsg;
-       int err = 0;
-
-       /* create parsing context if necessary */
-       parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
-       parm_bytes = inmsg.hdr.payload_bytes;
-       /*
-        * Parameter and channel addresses within test messages actually lie
-        * within our OS-controlled memory. We need to know that, because it
-        * makes a difference in how we compute the virtual address.
-        */
-       if (parm_bytes) {
-               bool retry;
-
-               parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
-               if (!parser_ctx && retry)
-                       return -EAGAIN;
-       }
-       controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
-       err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
-                                       CONTROLVM_QUEUE_ACK, &ackmsg);
-       if (err)
-               return err;
-       switch (inmsg.hdr.id) {
-       case CONTROLVM_CHIPSET_INIT:
-               err = chipset_init(&inmsg);
-               break;
-       case CONTROLVM_BUS_CREATE:
-               err = visorbus_create(&inmsg);
-               break;
-       case CONTROLVM_BUS_DESTROY:
-               err = visorbus_destroy(&inmsg);
-               break;
-       case CONTROLVM_BUS_CONFIGURE:
-               err = visorbus_configure(&inmsg, parser_ctx);
-               break;
-       case CONTROLVM_DEVICE_CREATE:
-               err = visorbus_device_create(&inmsg);
-               break;
-       case CONTROLVM_DEVICE_CHANGESTATE:
-               if (cmd->device_change_state.flags.phys_device) {
-                       err = parahotplug_process_message(&inmsg);
-               } else {
-                       /*
-                        * save the hdr and cmd structures for later use when
-                        * sending back the response to Command
-                        */
-                       err = visorbus_device_changestate(&inmsg);
-                       break;
-               }
-               break;
-       case CONTROLVM_DEVICE_DESTROY:
-               err = visorbus_device_destroy(&inmsg);
-               break;
-       case CONTROLVM_DEVICE_CONFIGURE:
-               /* no op just send a respond that we passed */
-               if (inmsg.hdr.flags.response_expected)
-                       controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
-                                         NULL);
-               break;
-       case CONTROLVM_CHIPSET_READY:
-               err = chipset_ready_uevent(&inmsg.hdr);
-               break;
-       case CONTROLVM_CHIPSET_SELFTEST:
-               err = chipset_selftest_uevent(&inmsg.hdr);
-               break;
-       case CONTROLVM_CHIPSET_STOP:
-               err = chipset_notready_uevent(&inmsg.hdr);
-               break;
-       default:
-               err = -ENOMSG;
-               if (inmsg.hdr.flags.response_expected)
-                       controlvm_respond(&inmsg.hdr,
-                                         -CONTROLVM_RESP_ID_UNKNOWN, NULL);
-               break;
-       }
-       if (parser_ctx) {
-               parser_done(parser_ctx);
-               parser_ctx = NULL;
-       }
-       return err;
-}
-
-/*
- * read_controlvm_event() - retreives the next message from the
- *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
- *                          channel
- * @msg: pointer to the retrieved message
- *
- * Return: 0 if valid message was retrieved or -error
- */
-static int read_controlvm_event(struct controlvm_message *msg)
-{
-       int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
-                                           CONTROLVM_QUEUE_EVENT, msg);
-
-       if (err)
-               return err;
-       /* got a message */
-       if (msg->hdr.flags.test_message == 1)
-               return -EINVAL;
-       return 0;
-}
-
-/*
- * parahotplug_process_list() - remove any request from the list that's been on
- *                              there too long and respond with an error
- */
-static void parahotplug_process_list(void)
-{
-       struct list_head *pos;
-       struct list_head *tmp;
-
-       spin_lock(&parahotplug_request_list_lock);
-       list_for_each_safe(pos, tmp, &parahotplug_request_list) {
-               struct parahotplug_request *req =
-                   list_entry(pos, struct parahotplug_request, list);
-
-               if (!time_after_eq(jiffies, req->expiration))
-                       continue;
-               list_del(pos);
-               if (req->msg.hdr.flags.response_expected)
-                       controlvm_respond(
-                               &req->msg.hdr,
-                               CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
-                               &req->msg.cmd.device_change_state.state);
-               parahotplug_request_destroy(req);
-       }
-       spin_unlock(&parahotplug_request_list_lock);
-}
-
-static void controlvm_periodic_work(struct work_struct *work)
-{
-       struct controlvm_message inmsg;
-       int count = 0;
-       int err;
-
-       /* Drain the RESPONSE queue make it empty */
-       do {
-               err = visorchannel_signalremove(chipset_dev->controlvm_channel,
-                                               CONTROLVM_QUEUE_RESPONSE,
-                                               &inmsg);
-       } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
-       if (err != -EAGAIN)
-               goto schedule_out;
-       if (chipset_dev->controlvm_pending_msg_valid) {
-               /*
-                * we throttled processing of a prior msg, so try to process
-                * it again rather than reading a new one
-                */
-               inmsg = chipset_dev->controlvm_pending_msg;
-               chipset_dev->controlvm_pending_msg_valid = false;
-               err = 0;
-       } else {
-               err = read_controlvm_event(&inmsg);
-       }
-       while (!err) {
-               chipset_dev->most_recent_message_jiffies = jiffies;
-               err = handle_command(inmsg,
-                                    visorchannel_get_physaddr
-                                    (chipset_dev->controlvm_channel));
-               if (err == -EAGAIN) {
-                       chipset_dev->controlvm_pending_msg = inmsg;
-                       chipset_dev->controlvm_pending_msg_valid = true;
-                       break;
-               }
-
-               err = read_controlvm_event(&inmsg);
-       }
-       /* parahotplug_worker */
-       parahotplug_process_list();
-
-/*
- * The controlvm messages are sent in a bulk. If we start receiving messages, we
- * want the polling to be fast. If we do not receive any message for
- * MIN_IDLE_SECONDS, we can slow down the polling.
- */
-schedule_out:
-       if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
-                               (HZ * MIN_IDLE_SECONDS))) {
-               /*
-                * it's been longer than MIN_IDLE_SECONDS since we processed
-                * our last controlvm message; slow down the polling
-                */
-               if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
-                       chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
-       } else {
-               if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
-                       chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
-       }
-       schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
-                             chipset_dev->poll_jiffies);
-}
-
-static int visorchipset_init(struct acpi_device *acpi_device)
-{
-       int err = -ENOMEM;
-       struct visorchannel *controlvm_channel;
-
-       chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
-       if (!chipset_dev)
-               goto error;
-       err = controlvm_channel_create(chipset_dev);
-       if (err)
-               goto error_free_chipset_dev;
-       acpi_device->driver_data = chipset_dev;
-       chipset_dev->acpi_device = acpi_device;
-       chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
-       err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
-                                 visorchipset_dev_groups);
-       if (err < 0)
-               goto error_destroy_channel;
-       controlvm_channel = chipset_dev->controlvm_channel;
-       if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
-                                &chipset_dev->acpi_device->dev,
-                                &visor_controlvm_channel_guid,
-                                "controlvm",
-                                sizeof(struct visor_controlvm_channel),
-                                VISOR_CONTROLVM_CHANNEL_VERSIONID,
-                                VISOR_CHANNEL_SIGNATURE)) {
-               err = -ENODEV;
-               goto error_delete_groups;
-       }
-       /* if booting in a crash kernel */
-       if (is_kdump_kernel())
-               INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
-                                 setup_crash_devices_work_queue);
-       else
-               INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
-                                 controlvm_periodic_work);
-       chipset_dev->most_recent_message_jiffies = jiffies;
-       chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
-       schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
-                             chipset_dev->poll_jiffies);
-       err = visorbus_init();
-       if (err < 0)
-               goto error_cancel_work;
-       return 0;
-
-error_cancel_work:
-       cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
-
-error_delete_groups:
-       sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
-                           visorchipset_dev_groups);
-
-error_destroy_channel:
-       visorchannel_destroy(chipset_dev->controlvm_channel);
-
-error_free_chipset_dev:
-       kfree(chipset_dev);
-
-error:
-       dev_err(&acpi_device->dev, "failed with error %d\n", err);
-       return err;
-}
-
-static int visorchipset_exit(struct acpi_device *acpi_device)
-{
-       visorbus_exit();
-       cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
-       sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
-                           visorchipset_dev_groups);
-       visorchannel_destroy(chipset_dev->controlvm_channel);
-       kfree(chipset_dev);
-       return 0;
-}
-
-static const struct acpi_device_id unisys_device_ids[] = {
-       {"PNP0A07", 0},
-       {"", 0},
-};
-
-static struct acpi_driver unisys_acpi_driver = {
-       .name = "unisys_acpi",
-       .class = "unisys_acpi_class",
-       .owner = THIS_MODULE,
-       .ids = unisys_device_ids,
-       .ops = {
-               .add = visorchipset_init,
-               .remove = visorchipset_exit,
-       },
-};
-
-MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
-
-static __init int visorutil_spar_detect(void)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
-               /* check the ID */
-               cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
-               return  (ebx == UNISYS_VISOR_ID_EBX) &&
-                       (ecx == UNISYS_VISOR_ID_ECX) &&
-                       (edx == UNISYS_VISOR_ID_EDX);
-       }
-       return 0;
-}
-
-static int __init init_unisys(void)
-{
-       int result;
-
-       if (!visorutil_spar_detect())
-               return -ENODEV;
-       result = acpi_bus_register_driver(&unisys_acpi_driver);
-       if (result)
-               return -ENODEV;
-       pr_info("Unisys Visorchipset Driver Loaded.\n");
-       return 0;
-};
-
-static void __exit exit_unisys(void)
-{
-       acpi_bus_unregister_driver(&unisys_acpi_driver);
-}
-
-module_init(init_unisys);
-module_exit(exit_unisys);
-
-MODULE_AUTHOR("Unisys");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
index 936392c..c13dd9d 100644 (file)
@@ -15,6 +15,4 @@ source "drivers/vme/bridges/Kconfig"
 
 source "drivers/vme/boards/Kconfig"
 
-source "drivers/staging/vme/devices/Kconfig"
-
 endif # VME
index f6664fc..0eb560f 100644 (file)
@@ -172,8 +172,9 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
                        CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
        if (err < 0) {
-               pr_err("Failed to send command control message %x.%x: err=%d.\n",
-                               value, index, err);
+               dev_err(&dev->udev->dev,
+                       "Failed to send command control message %x.%x: err=%d.\n",
+                       value, index, err);
                return err;
        }
 
@@ -187,8 +188,9 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
                        MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
        if (err < 0) {
-               pr_err("Failed to send mode control message %x.%x: err=%d.\n",
-                               value, index, err);
+               dev_err(&dev->udev->dev,
+                       "Failed to send mode control message %x.%x: err=%d.\n",
+                       value, index, err);
                return err;
        }
 
@@ -202,72 +204,68 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
                        COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
        if (err < 0) {
-               pr_err("Failed to send control message %x.%x: err=%d.\n",
-                               value, index, err);
+               dev_err(&dev->udev->dev,
+                       "Failed to send control message %x.%x: err=%d.\n",
+                       value, index, err);
                return err;
        }
 
        return err;
 }
 
-static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
-{
-       pr_info("%45s: %8x\n", str, buf[off]);
-}
-
-static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
+static void ds_dump_status(struct ds_device *ds_dev, unsigned char *buf, int count)
 {
+       struct device *dev = &ds_dev->udev->dev;
        int i;
 
-       dev_info(&dev->udev->dev, "ep_status=0x%x, count=%d, status=%*phC",
-               dev->ep[EP_STATUS], count, count, buf);
+       dev_info(dev, "ep_status=0x%x, count=%d, status=%*phC",
+               ds_dev->ep[EP_STATUS], count, count, buf);
 
        if (count >= 16) {
-               ds_print_msg(buf, "enable flag", 0);
-               ds_print_msg(buf, "1-wire speed", 1);
-               ds_print_msg(buf, "strong pullup duration", 2);
-               ds_print_msg(buf, "programming pulse duration", 3);
-               ds_print_msg(buf, "pulldown slew rate control", 4);
-               ds_print_msg(buf, "write-1 low time", 5);
-               ds_print_msg(buf, "data sample offset/write-0 recovery time",
-                       6);
-               ds_print_msg(buf, "reserved (test register)", 7);
-               ds_print_msg(buf, "device status flags", 8);
-               ds_print_msg(buf, "communication command byte 1", 9);
-               ds_print_msg(buf, "communication command byte 2", 10);
-               ds_print_msg(buf, "communication command buffer status", 11);
-               ds_print_msg(buf, "1-wire data output buffer status", 12);
-               ds_print_msg(buf, "1-wire data input buffer status", 13);
-               ds_print_msg(buf, "reserved", 14);
-               ds_print_msg(buf, "reserved", 15);
+               dev_dbg(dev, "enable flag: 0x%02x", buf[0]);
+               dev_dbg(dev, "1-wire speed: 0x%02x", buf[1]);
+               dev_dbg(dev, "strong pullup duration: 0x%02x", buf[2]);
+               dev_dbg(dev, "programming pulse duration: 0x%02x", buf[3]);
+               dev_dbg(dev, "pulldown slew rate control: 0x%02x", buf[4]);
+               dev_dbg(dev, "write-1 low time: 0x%02x", buf[5]);
+               dev_dbg(dev, "data sample offset/write-0 recovery time: 0x%02x", buf[6]);
+               dev_dbg(dev, "reserved (test register): 0x%02x", buf[7]);
+               dev_dbg(dev, "device status flags: 0x%02x", buf[8]);
+               dev_dbg(dev, "communication command byte 1: 0x%02x", buf[9]);
+               dev_dbg(dev, "communication command byte 2: 0x%02x", buf[10]);
+               dev_dbg(dev, "communication command buffer status: 0x%02x", buf[11]);
+               dev_dbg(dev, "1-wire data output buffer status: 0x%02x", buf[12]);
+               dev_dbg(dev, "1-wire data input buffer status: 0x%02x", buf[13]);
+               dev_dbg(dev, "reserved: 0x%02x", buf[14]);
+               dev_dbg(dev, "reserved: 0x%02x", buf[15]);
        }
+
        for (i = 16; i < count; ++i) {
                if (buf[i] == RR_DETECT) {
-                       ds_print_msg(buf, "new device detect", i);
+                       dev_dbg(dev, "New device detect.\n");
                        continue;
                }
-               ds_print_msg(buf, "Result Register Value: ", i);
+               dev_dbg(dev, "Result Register Value: 0x%02x", buf[i]);
                if (buf[i] & RR_NRS)
-                       pr_info("NRS: Reset no presence or ...\n");
+                       dev_dbg(dev, "NRS: Reset no presence or ...\n");
                if (buf[i] & RR_SH)
-                       pr_info("SH: short on reset or set path\n");
+                       dev_dbg(dev, "SH: short on reset or set path\n");
                if (buf[i] & RR_APP)
-                       pr_info("APP: alarming presence on reset\n");
+                       dev_dbg(dev, "APP: alarming presence on reset\n");
                if (buf[i] & RR_VPP)
-                       pr_info("VPP: 12V expected not seen\n");
+                       dev_dbg(dev, "VPP: 12V expected not seen\n");
                if (buf[i] & RR_CMP)
-                       pr_info("CMP: compare error\n");
+                       dev_dbg(dev, "CMP: compare error\n");
                if (buf[i] & RR_CRC)
-                       pr_info("CRC: CRC error detected\n");
+                       dev_dbg(dev, "CRC: CRC error detected\n");
                if (buf[i] & RR_RDP)
-                       pr_info("RDP: redirected page\n");
+                       dev_dbg(dev, "RDP: redirected page\n");
                if (buf[i] & RR_EOS)
-                       pr_info("EOS: end of search error\n");
+                       dev_dbg(dev, "EOS: end of search error\n");
        }
 }
 
-static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
-                         bool dump)
+static int ds_recv_status(struct ds_device *dev, struct ds_status *st)
 {
        int count, err;
 
@@ -281,14 +279,12 @@ static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
                                dev->st_buf, sizeof(dev->st_buf),
                                &count, 1000);
        if (err < 0) {
-               pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
-                      dev->ep[EP_STATUS], err);
+               dev_err(&dev->udev->dev,
+                       "Failed to read 1-wire data from 0x%x: err=%d.\n",
+                       dev->ep[EP_STATUS], err);
                return err;
        }
 
-       if (dump)
-               ds_dump_status(dev, dev->st_buf, count);
-
        if (st && count >= sizeof(*st))
                memcpy(st, dev->st_buf, sizeof(*st));
 
@@ -302,13 +298,15 @@ static void ds_reset_device(struct ds_device *dev)
         * the strong pullup.
         */
        if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE))
-               pr_err("ds_reset_device: Error allowing strong pullup\n");
+               dev_err(&dev->udev->dev,
+                       "%s: Error allowing strong pullup\n", __func__);
        /* Chip strong pullup time was cleared. */
        if (dev->spu_sleep) {
                /* lower 4 bits are 0, see ds_set_pullup */
                u8 del = dev->spu_sleep>>4;
                if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del))
-                       pr_err("ds_reset_device: Error setting duration\n");
+                       dev_err(&dev->udev->dev,
+                               "%s: Error setting duration\n", __func__);
        }
 }
 
@@ -329,9 +327,16 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
        err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
                                buf, size, &count, 1000);
        if (err < 0) {
+               int recv_len;
+
                dev_info(&dev->udev->dev, "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
                usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
-               ds_recv_status(dev, NULL, true);
+
+               /* status might tell us why endpoint is stuck? */
+               recv_len = ds_recv_status(dev, NULL);
+               if (recv_len >= 0)
+                       ds_dump_status(dev, dev->st_buf, recv_len);
+
                return err;
        }
 
@@ -355,7 +360,7 @@ static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len)
        count = 0;
        err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000);
        if (err < 0) {
-               pr_err("Failed to write 1-wire data to ep0x%x: "
+               dev_err(&dev->udev->dev, "Failed to write 1-wire data to ep0x%x: "
                        "err=%d.\n", dev->ep[EP_DATA_OUT], err);
                return err;
        }
@@ -377,7 +382,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
                err = ds_send_control(dev, CTL_RESUME_EXE, 0);
                if (err)
                        break;
-               err = ds_recv_status(dev, &st, false);
+               err = ds_recv_status(dev, &st);
                if (err)
                        break;
 
@@ -424,7 +429,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
 
        do {
                st->status = 0;
-               err = ds_recv_status(dev, st, false);
+               err = ds_recv_status(dev, st);
 #if 0
                if (err >= 0) {
                        int i;
@@ -437,7 +442,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
        } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
 
        if (err >= 16 && st->status & ST_EPOF) {
-               pr_info("Resetting device after ST_EPOF.\n");
+               dev_info(&dev->udev->dev, "Resetting device after ST_EPOF.\n");
                ds_reset_device(dev);
                /* Always dump the device status. */
                count = 101;
@@ -721,7 +726,7 @@ static void ds9490r_search(void *data, struct w1_master *master,
        do {
                schedule_timeout(jtime);
 
-               err = ds_recv_status(dev, &st, false);
+               err = ds_recv_status(dev, &st);
                if (err < 0 || err < sizeof(st))
                        break;
 
@@ -992,10 +997,9 @@ static int ds_probe(struct usb_interface *intf,
        int i, err, alt;
 
        dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
-       if (!dev) {
-               pr_info("Failed to allocate new DS9490R structure.\n");
+       if (!dev)
                return -ENOMEM;
-       }
+
        dev->udev = usb_get_dev(udev);
        if (!dev->udev) {
                err = -ENOMEM;
@@ -1025,7 +1029,7 @@ static int ds_probe(struct usb_interface *intf,
 
        iface_desc = intf->cur_altsetting;
        if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
-               pr_info("Num endpoints=%d. It is not DS9490R.\n",
+               dev_err(&dev->udev->dev, "Num endpoints=%d. It is not DS9490R.\n",
                        iface_desc->desc.bNumEndpoints);
                err = -EINVAL;
                goto err_out_clear;
index c4e82a8..32fd376 100644 (file)
@@ -883,6 +883,14 @@ config RENESAS_RZAWDT
          This driver adds watchdog support for the integrated watchdogs in the
          Renesas RZ/A SoCs. These watchdogs can be used to reset a system.
 
+config RENESAS_RZN1WDT
+       tristate "Renesas RZ/N1 watchdog"
+       depends on ARCH_RENESAS || COMPILE_TEST
+       select WATCHDOG_CORE
+       help
+         This driver adds watchdog support for the integrated watchdogs in the
+         Renesas RZ/N1 SoCs. These watchdogs can be used to reset a system.
+
 config RENESAS_RZG2LWDT
        tristate "Renesas RZ/G2L WDT Watchdog"
        depends on ARCH_RENESAS || COMPILE_TEST
@@ -1011,6 +1019,17 @@ config APPLE_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called apple_wdt.
 
+config SUNPLUS_WATCHDOG
+       tristate "Sunplus watchdog support"
+       depends on ARCH_SUNPLUS || COMPILE_TEST
+       select WATCHDOG_CORE
+       help
+         Say Y here to include support for the watchdog timer
+         in Sunplus SoCs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called sunplus_wdt.
+
 # X86 (i386 + ia64 + x86_64) Architecture
 
 config ACQUIRE_WDT
@@ -1820,6 +1839,17 @@ config RALINK_WDT
        help
          Hardware driver for the Ralink SoC Watchdog Timer.
 
+config GXP_WATCHDOG
+       tristate "HPE GXP watchdog support"
+       depends on ARCH_HPE_GXP
+       select WATCHDOG_CORE
+       help
+         Say Y here to include support for the watchdog timer
+         in HPE GXP SoCs.
+
+         To compile this driver as a module, choose M here.
+         The module will be called gxp-wdt.
+
 config MT7621_WDT
        tristate "Mediatek SoC watchdog"
        select WATCHDOG_CORE
index f7da867..c324e9d 100644 (file)
@@ -84,6 +84,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
 obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
 obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
 obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
+obj-$(CONFIG_RENESAS_RZN1WDT) += rzn1_wdt.o
 obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
 obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
 obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o
@@ -92,9 +93,11 @@ obj-$(CONFIG_RTD119X_WATCHDOG) += rtd119x_wdt.o
 obj-$(CONFIG_SPRD_WATCHDOG) += sprd_wdt.o
 obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
 obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
+obj-$(CONFIG_GXP_WATCHDOG) += gxp-wdt.o
 obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
 obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
 obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o
+obj-$(CONFIG_SUNPLUS_WATCHDOG) += sunplus_wdt.o
 
 # X86 (i386 + ia64 + x86_64) Architecture
 obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
index 8656a13..1ffcf6a 100644 (file)
@@ -218,6 +218,7 @@ static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
                         bcm7038_wdt_resume);
 
 static const struct of_device_id bcm7038_wdt_match[] = {
+       { .compatible = "brcm,bcm6345-wdt" },
        { .compatible = "brcm,bcm7038-wdt" },
        {},
 };
index 9adad18..09a4af4 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/mfd/da9063/registers.h>
 #include <linux/mfd/da9063/core.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 
 /*
@@ -26,6 +27,8 @@
  *   others: timeout = 2048 ms * 2^(TWDSCALE-1).
  */
 static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
+static bool use_sw_pm;
+
 #define DA9063_TWDSCALE_DISABLE                0
 #define DA9063_TWDSCALE_MIN            1
 #define DA9063_TWDSCALE_MAX            (ARRAY_SIZE(wdt_timeout) - 1)
@@ -218,6 +221,8 @@ static int da9063_wdt_probe(struct platform_device *pdev)
        if (!wdd)
                return -ENOMEM;
 
+       use_sw_pm = device_property_present(dev, "dlg,use-sw-pm");
+
        wdd->info = &da9063_watchdog_info;
        wdd->ops = &da9063_watchdog_ops;
        wdd->min_timeout = DA9063_WDT_MIN_TIMEOUT;
@@ -228,6 +233,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
 
        watchdog_set_restart_priority(wdd, 128);
        watchdog_set_drvdata(wdd, da9063);
+       dev_set_drvdata(dev, wdd);
 
        wdd->timeout = DA9063_WDG_TIMEOUT;
 
@@ -249,10 +255,40 @@ static int da9063_wdt_probe(struct platform_device *pdev)
        return devm_watchdog_register_device(dev, wdd);
 }
 
+static int __maybe_unused da9063_wdt_suspend(struct device *dev)
+{
+       struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+       if (!use_sw_pm)
+               return 0;
+
+       if (watchdog_active(wdd))
+               return da9063_wdt_stop(wdd);
+
+       return 0;
+}
+
+static int __maybe_unused da9063_wdt_resume(struct device *dev)
+{
+       struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+       if (!use_sw_pm)
+               return 0;
+
+       if (watchdog_active(wdd))
+               return da9063_wdt_start(wdd);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(da9063_wdt_pm_ops,
+                       da9063_wdt_suspend, da9063_wdt_resume);
+
 static struct platform_driver da9063_wdt_driver = {
        .probe = da9063_wdt_probe,
        .driver = {
                .name = DA9063_DRVNAME_WATCHDOG,
+               .pm = &da9063_wdt_pm_ops,
        },
 };
 module_platform_driver(da9063_wdt_driver);
diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c
new file mode 100644 (file)
index 0000000..b0b2d7a
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define MASK_WDGCS_ENABLE      0x01
+#define MASK_WDGCS_RELOAD      0x04
+#define MASK_WDGCS_NMIEN       0x08
+#define MASK_WDGCS_WARN                0x80
+
+#define WDT_MAX_TIMEOUT_MS     655350
+#define WDT_DEFAULT_TIMEOUT    30
+#define SECS_TO_WDOG_TICKS(x) ((x) * 100)
+#define WDOG_TICKS_TO_SECS(x) ((x) / 100)
+
+#define GXP_WDT_CNT_OFS                0x10
+#define GXP_WDT_CTRL_OFS       0x16
+
+struct gxp_wdt {
+       void __iomem *base;
+       struct watchdog_device wdd;
+};
+
+static void gxp_wdt_enable_reload(struct gxp_wdt *drvdata)
+{
+       u8 val;
+
+       val = readb(drvdata->base + GXP_WDT_CTRL_OFS);
+       val |= (MASK_WDGCS_ENABLE | MASK_WDGCS_RELOAD);
+       writeb(val, drvdata->base + GXP_WDT_CTRL_OFS);
+}
+
+static int gxp_wdt_start(struct watchdog_device *wdd)
+{
+       struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+       writew(SECS_TO_WDOG_TICKS(wdd->timeout), drvdata->base + GXP_WDT_CNT_OFS);
+       gxp_wdt_enable_reload(drvdata);
+       return 0;
+}
+
+static int gxp_wdt_stop(struct watchdog_device *wdd)
+{
+       struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+       u8 val;
+
+       val = readb_relaxed(drvdata->base + GXP_WDT_CTRL_OFS);
+       val &= ~MASK_WDGCS_ENABLE;
+       writeb(val, drvdata->base + GXP_WDT_CTRL_OFS);
+       return 0;
+}
+
+static int gxp_wdt_set_timeout(struct watchdog_device *wdd,
+                              unsigned int timeout)
+{
+       struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+       u32 actual;
+
+       wdd->timeout = timeout;
+       actual = min(timeout * 100, wdd->max_hw_heartbeat_ms / 10);
+       writew(actual, drvdata->base + GXP_WDT_CNT_OFS);
+
+       return 0;
+}
+
+static unsigned int gxp_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+       struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+       u32 val = readw(drvdata->base + GXP_WDT_CNT_OFS);
+
+       return WDOG_TICKS_TO_SECS(val);
+}
+
+static int gxp_wdt_ping(struct watchdog_device *wdd)
+{
+       struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+       gxp_wdt_enable_reload(drvdata);
+       return 0;
+}
+
+static int gxp_restart(struct watchdog_device *wdd, unsigned long action,
+                      void *data)
+{
+       struct gxp_wdt *drvdata = watchdog_get_drvdata(wdd);
+
+       writew(1, drvdata->base + GXP_WDT_CNT_OFS);
+       gxp_wdt_enable_reload(drvdata);
+       mdelay(100);
+       return 0;
+}
+
+static const struct watchdog_ops gxp_wdt_ops = {
+       .owner =        THIS_MODULE,
+       .start =        gxp_wdt_start,
+       .stop =         gxp_wdt_stop,
+       .ping =         gxp_wdt_ping,
+       .set_timeout =  gxp_wdt_set_timeout,
+       .get_timeleft = gxp_wdt_get_timeleft,
+       .restart =      gxp_restart,
+};
+
+static const struct watchdog_info gxp_wdt_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+       .identity = "HPE GXP Watchdog timer",
+};
+
+static int gxp_wdt_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct gxp_wdt *drvdata;
+       int err;
+       u8 val;
+
+       drvdata = devm_kzalloc(dev, sizeof(struct gxp_wdt), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       /*
+        * The register area where the timer and watchdog reside is disarranged.
+        * Hence mapping individual register blocks for the timer and watchdog
+        * is not recommended as they would have access to each others
+        * registers. Based on feedback the watchdog is no longer part of the
+        * device tree file and the timer driver now creates the watchdog as a
+        * child device. During the watchdogs creation, the timer driver passes
+        * the base address to the watchdog over the private interface.
+        */
+
+       drvdata->base = (void __iomem *)dev->platform_data;
+
+       drvdata->wdd.info = &gxp_wdt_info;
+       drvdata->wdd.ops = &gxp_wdt_ops;
+       drvdata->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
+       drvdata->wdd.parent = dev;
+       drvdata->wdd.timeout = WDT_DEFAULT_TIMEOUT;
+
+       watchdog_set_drvdata(&drvdata->wdd, drvdata);
+       watchdog_set_nowayout(&drvdata->wdd, WATCHDOG_NOWAYOUT);
+
+       val = readb(drvdata->base + GXP_WDT_CTRL_OFS);
+
+       if (val & MASK_WDGCS_ENABLE)
+               set_bit(WDOG_HW_RUNNING, &drvdata->wdd.status);
+
+       watchdog_set_restart_priority(&drvdata->wdd, 128);
+
+       watchdog_stop_on_reboot(&drvdata->wdd);
+       err = devm_watchdog_register_device(dev, &drvdata->wdd);
+       if (err) {
+               dev_err(dev, "Failed to register watchdog device");
+               return err;
+       }
+
+       dev_info(dev, "HPE GXP watchdog timer");
+
+       return 0;
+}
+
+static struct platform_driver gxp_wdt_driver = {
+       .probe = gxp_wdt_probe,
+       .driver = {
+               .name = "gxp-wdt",
+       },
+};
+module_platform_driver(gxp_wdt_driver);
+
+MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
+MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>");
+MODULE_DESCRIPTION("Driver for GXP watchdog timer");
index 3f2f434..34693f1 100644 (file)
@@ -596,7 +596,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 /*
  * Suspend-to-idle requires this, because it stops the ticks and timekeeping, so
  * the watchdog cannot be pinged while in that state.  In ACPI sleep states the
@@ -604,15 +603,15 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
  */
 
 #ifdef CONFIG_ACPI
-static inline bool need_suspend(void)
+static inline bool __maybe_unused need_suspend(void)
 {
        return acpi_target_system_state() == ACPI_STATE_S0;
 }
 #else
-static inline bool need_suspend(void) { return true; }
+static inline bool __maybe_unused need_suspend(void) { return true; }
 #endif
 
-static int iTCO_wdt_suspend_noirq(struct device *dev)
+static int __maybe_unused iTCO_wdt_suspend_noirq(struct device *dev)
 {
        struct iTCO_wdt_private *p = dev_get_drvdata(dev);
        int ret = 0;
@@ -626,7 +625,7 @@ static int iTCO_wdt_suspend_noirq(struct device *dev)
        return ret;
 }
 
-static int iTCO_wdt_resume_noirq(struct device *dev)
+static int __maybe_unused iTCO_wdt_resume_noirq(struct device *dev)
 {
        struct iTCO_wdt_private *p = dev_get_drvdata(dev);
 
@@ -637,20 +636,15 @@ static int iTCO_wdt_resume_noirq(struct device *dev)
 }
 
 static const struct dev_pm_ops iTCO_wdt_pm = {
-       .suspend_noirq = iTCO_wdt_suspend_noirq,
-       .resume_noirq = iTCO_wdt_resume_noirq,
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(iTCO_wdt_suspend_noirq,
+                                     iTCO_wdt_resume_noirq)
 };
 
-#define ITCO_WDT_PM_OPS        (&iTCO_wdt_pm)
-#else
-#define ITCO_WDT_PM_OPS        NULL
-#endif /* CONFIG_PM_SLEEP */
-
 static struct platform_driver iTCO_wdt_driver = {
        .probe          = iTCO_wdt_probe,
        .driver         = {
                .name   = DRV_NAME,
-               .pm     = ITCO_WDT_PM_OPS,
+               .pm     = &iTCO_wdt_pm,
        },
 };
 
index 4577a76..f0d4e3c 100644 (file)
@@ -10,7 +10,9 @@
  */
 
 #include <dt-bindings/reset/mt2712-resets.h>
+#include <dt-bindings/reset/mt7986-resets.h>
 #include <dt-bindings/reset/mt8183-resets.h>
+#include <dt-bindings/reset/mt8186-resets.h>
 #include <dt-bindings/reset/mt8192-resets.h>
 #include <dt-bindings/reset/mt8195-resets.h>
 #include <linux/delay.h>
@@ -76,10 +78,18 @@ static const struct mtk_wdt_data mt2712_data = {
        .toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM,
 };
 
+static const struct mtk_wdt_data mt7986_data = {
+       .toprgu_sw_rst_num = MT7986_TOPRGU_SW_RST_NUM,
+};
+
 static const struct mtk_wdt_data mt8183_data = {
        .toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM,
 };
 
+static const struct mtk_wdt_data mt8186_data = {
+       .toprgu_sw_rst_num = MT8186_TOPRGU_SW_RST_NUM,
+};
+
 static const struct mtk_wdt_data mt8192_data = {
        .toprgu_sw_rst_num = MT8192_TOPRGU_SW_RST_NUM,
 };
@@ -418,7 +428,9 @@ static int mtk_wdt_resume(struct device *dev)
 static const struct of_device_id mtk_wdt_dt_ids[] = {
        { .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
        { .compatible = "mediatek,mt6589-wdt" },
+       { .compatible = "mediatek,mt7986-wdt", .data = &mt7986_data },
        { .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data },
+       { .compatible = "mediatek,mt8186-wdt", .data = &mt8186_data },
        { .compatible = "mediatek,mt8192-wdt", .data = &mt8192_data },
        { .compatible = "mediatek,mt8195-wdt", .data = &mt8195_data },
        { /* sentinel */ }
index db843f8..053ef3b 100644 (file)
@@ -226,7 +226,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
        ret = pm_runtime_get_sync(dev);
-       if (ret) {
+       if (ret < 0) {
                pm_runtime_put_noidle(dev);
                pm_runtime_disable(&pdev->dev);
                return dev_err_probe(dev, ret, "runtime pm failed\n");
@@ -253,6 +253,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
        }
 
        if (readl(wdt->base + RTIDWDCTRL) == WDENABLE_KEY) {
+               int preset_heartbeat;
                u32 time_left_ms;
                u64 heartbeat_ms;
                u32 wsize;
@@ -263,11 +264,12 @@ static int rti_wdt_probe(struct platform_device *pdev)
                heartbeat_ms <<= WDT_PRELOAD_SHIFT;
                heartbeat_ms *= 1000;
                do_div(heartbeat_ms, wdt->freq);
-               if (heartbeat_ms != heartbeat * 1000)
+               preset_heartbeat = heartbeat_ms + 500;
+               preset_heartbeat /= 1000;
+               if (preset_heartbeat != heartbeat)
                        dev_warn(dev, "watchdog already running, ignoring heartbeat config!\n");
 
-               heartbeat = heartbeat_ms;
-               heartbeat /= 1000;
+               heartbeat = preset_heartbeat;
 
                wsize = readl(wdt->base + RTIWWDSIZECTRL);
                ret = rti_wdt_setup_hw_hb(wdd, wsize);
index 6b426df..6eea0ee 100644 (file)
 #define WDTSET         0x04
 #define WDTTIM         0x08
 #define WDTINT         0x0C
+#define PECR           0x10
+#define PEEN           0x14
 #define WDTCNT_WDTEN   BIT(0)
 #define WDTINT_INTDISP BIT(0)
+#define PEEN_FORCE     BIT(0)
 
 #define WDT_DEFAULT_TIMEOUT            60U
 
@@ -43,6 +46,8 @@ struct rzg2l_wdt_priv {
        struct reset_control *rstc;
        unsigned long osc_clk_rate;
        unsigned long delay;
+       struct clk *pclk;
+       struct clk *osc_clk;
 };
 
 static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
@@ -53,7 +58,7 @@ static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
 
 static u32 rzg2l_wdt_get_cycle_usec(unsigned long cycle, u32 wdttime)
 {
-       u64 timer_cycle_us = 1024 * 1024 * (wdttime + 1) * MICRO;
+       u64 timer_cycle_us = 1024 * 1024ULL * (wdttime + 1) * MICRO;
 
        return div64_ul(timer_cycle_us, cycle);
 }
@@ -86,7 +91,6 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev)
 {
        struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
 
-       reset_control_deassert(priv->rstc);
        pm_runtime_get_sync(wdev->parent);
 
        /* Initialize time out */
@@ -106,7 +110,26 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
        struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
 
        pm_runtime_put(wdev->parent);
-       reset_control_assert(priv->rstc);
+       reset_control_reset(priv->rstc);
+
+       return 0;
+}
+
+static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
+{
+       struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+       wdev->timeout = timeout;
+
+       /*
+        * If the watchdog is active, reset the module for updating the WDTSET
+        * register so that it is updated with new timeout values.
+        */
+       if (watchdog_active(wdev)) {
+               pm_runtime_put(wdev->parent);
+               reset_control_reset(priv->rstc);
+               rzg2l_wdt_start(wdev);
+       }
 
        return 0;
 }
@@ -116,15 +139,14 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
 {
        struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
 
-       /* Reset the module before we modify any register */
-       reset_control_reset(priv->rstc);
-       pm_runtime_get_sync(wdev->parent);
+       clk_prepare_enable(priv->pclk);
+       clk_prepare_enable(priv->osc_clk);
 
-       /* smallest counter value to reboot soon */
-       rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(1), WDTSET);
+       /* Generate Reset (WDTRSTB) Signal on parity error */
+       rzg2l_wdt_write(priv, 0, PECR);
 
-       /* Enable watchdog timer*/
-       rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+       /* Force parity error */
+       rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
 
        return 0;
 }
@@ -148,15 +170,15 @@ static const struct watchdog_ops rzg2l_wdt_ops = {
        .start = rzg2l_wdt_start,
        .stop = rzg2l_wdt_stop,
        .ping = rzg2l_wdt_ping,
+       .set_timeout = rzg2l_wdt_set_timeout,
        .restart = rzg2l_wdt_restart,
 };
 
-static void rzg2l_wdt_reset_assert_pm_disable_put(void *data)
+static void rzg2l_wdt_reset_assert_pm_disable(void *data)
 {
        struct watchdog_device *wdev = data;
        struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
 
-       pm_runtime_put(wdev->parent);
        pm_runtime_disable(wdev->parent);
        reset_control_assert(priv->rstc);
 }
@@ -166,7 +188,6 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct rzg2l_wdt_priv *priv;
        unsigned long pclk_rate;
-       struct clk *wdt_clk;
        int ret;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -178,22 +199,20 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
                return PTR_ERR(priv->base);
 
        /* Get watchdog main clock */
-       wdt_clk = clk_get(&pdev->dev, "oscclk");
-       if (IS_ERR(wdt_clk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no oscclk");
+       priv->osc_clk = devm_clk_get(&pdev->dev, "oscclk");
+       if (IS_ERR(priv->osc_clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(priv->osc_clk), "no oscclk");
 
-       priv->osc_clk_rate = clk_get_rate(wdt_clk);
-       clk_put(wdt_clk);
+       priv->osc_clk_rate = clk_get_rate(priv->osc_clk);
        if (!priv->osc_clk_rate)
                return dev_err_probe(&pdev->dev, -EINVAL, "oscclk rate is 0");
 
        /* Get Peripheral clock */
-       wdt_clk = clk_get(&pdev->dev, "pclk");
-       if (IS_ERR(wdt_clk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no pclk");
+       priv->pclk = devm_clk_get(&pdev->dev, "pclk");
+       if (IS_ERR(priv->pclk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk");
 
-       pclk_rate = clk_get_rate(wdt_clk);
-       clk_put(wdt_clk);
+       pclk_rate = clk_get_rate(priv->pclk);
        if (!pclk_rate)
                return dev_err_probe(&pdev->dev, -EINVAL, "pclk rate is 0");
 
@@ -204,13 +223,11 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
                return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
                                     "failed to get cpg reset");
 
-       reset_control_deassert(priv->rstc);
+       ret = reset_control_deassert(priv->rstc);
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to deassert");
+
        pm_runtime_enable(&pdev->dev);
-       ret = pm_runtime_resume_and_get(&pdev->dev);
-       if (ret < 0) {
-               dev_err(dev, "pm_runtime_resume_and_get failed ret=%pe", ERR_PTR(ret));
-               goto out_pm_get;
-       }
 
        priv->wdev.info = &rzg2l_wdt_ident;
        priv->wdev.ops = &rzg2l_wdt_ops;
@@ -222,7 +239,7 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
 
        watchdog_set_drvdata(&priv->wdev, priv);
        ret = devm_add_action_or_reset(&pdev->dev,
-                                      rzg2l_wdt_reset_assert_pm_disable_put,
+                                      rzg2l_wdt_reset_assert_pm_disable,
                                       &priv->wdev);
        if (ret < 0)
                return ret;
@@ -235,12 +252,6 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
                dev_warn(dev, "Specified timeout invalid, using default");
 
        return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
-
-out_pm_get:
-       pm_runtime_disable(dev);
-       reset_control_assert(priv->rstc);
-
-       return ret;
 }
 
 static const struct of_device_id rzg2l_wdt_ids[] = {
diff --git a/drivers/watchdog/rzn1_wdt.c b/drivers/watchdog/rzn1_wdt.c
new file mode 100644 (file)
index 0000000..55ab384
--- /dev/null
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/N1 Watchdog timer.
+ * This is a 12-bit timer driver from a (62.5/16384) MHz clock. It can't even
+ * cope with 2 seconds.
+ *
+ * Copyright 2018 Renesas Electronics Europe Ltd.
+ *
+ * Derived from Ralink RT288x watchdog timer.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_TIMEOUT                60
+
+#define RZN1_WDT_RETRIGGER                     0x0
+#define RZN1_WDT_RETRIGGER_RELOAD_VAL          0
+#define RZN1_WDT_RETRIGGER_RELOAD_VAL_MASK     0xfff
+#define RZN1_WDT_RETRIGGER_PRESCALE            BIT(12)
+#define RZN1_WDT_RETRIGGER_ENABLE              BIT(13)
+#define RZN1_WDT_RETRIGGER_WDSI                        (0x2 << 14)
+
+#define RZN1_WDT_PRESCALER                     16384
+#define RZN1_WDT_MAX                           4095
+
+struct rzn1_watchdog {
+       struct watchdog_device          wdtdev;
+       void __iomem                    *base;
+       unsigned long                   clk_rate_khz;
+};
+
+static inline uint32_t max_heart_beat_ms(unsigned long clk_rate_khz)
+{
+       return (RZN1_WDT_MAX * RZN1_WDT_PRESCALER) / clk_rate_khz;
+}
+
+static inline uint32_t compute_reload_value(uint32_t tick_ms,
+                                           unsigned long clk_rate_khz)
+{
+       return (tick_ms * clk_rate_khz) / RZN1_WDT_PRESCALER;
+}
+
+static int rzn1_wdt_ping(struct watchdog_device *w)
+{
+       struct rzn1_watchdog *wdt = watchdog_get_drvdata(w);
+
+       /* Any value retrigggers the watchdog */
+       writel(0, wdt->base + RZN1_WDT_RETRIGGER);
+
+       return 0;
+}
+
+static int rzn1_wdt_start(struct watchdog_device *w)
+{
+       struct rzn1_watchdog *wdt = watchdog_get_drvdata(w);
+       u32 val;
+
+       /*
+        * The hardware allows you to write to this reg only once.
+        * Since this includes the reload value, there is no way to change the
+        * timeout once started. Also note that the WDT clock is half the bus
+        * fabric clock rate, so if the bus fabric clock rate is changed after
+        * the WDT is started, the WDT interval will be wrong.
+        */
+       val = RZN1_WDT_RETRIGGER_WDSI;
+       val |= RZN1_WDT_RETRIGGER_ENABLE;
+       val |= RZN1_WDT_RETRIGGER_PRESCALE;
+       val |= compute_reload_value(w->max_hw_heartbeat_ms, wdt->clk_rate_khz);
+       writel(val, wdt->base + RZN1_WDT_RETRIGGER);
+
+       return 0;
+}
+
+static irqreturn_t rzn1_wdt_irq(int irq, void *_wdt)
+{
+       pr_crit("RZN1 Watchdog. Initiating system reboot\n");
+       emergency_restart();
+
+       return IRQ_HANDLED;
+}
+
+static struct watchdog_info rzn1_wdt_info = {
+       .identity = "RZ/N1 Watchdog",
+       .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops rzn1_wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = rzn1_wdt_start,
+       .ping = rzn1_wdt_ping,
+};
+
+static void rzn1_wdt_clk_disable_unprepare(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
+static int rzn1_wdt_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct rzn1_watchdog *wdt;
+       struct device_node *np = dev->of_node;
+       struct clk *clk;
+       unsigned long clk_rate;
+       int ret;
+       int irq;
+
+       wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
+
+       wdt->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(wdt->base))
+               return PTR_ERR(wdt->base);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       ret = devm_request_irq(dev, irq, rzn1_wdt_irq, 0,
+                              np->name, wdt);
+       if (ret) {
+               dev_err(dev, "failed to request irq %d\n", irq);
+               return ret;
+       }
+
+       clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(dev, "failed to get the clock\n");
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               dev_err(dev, "failed to prepare/enable the clock\n");
+               return ret;
+       }
+
+       ret = devm_add_action_or_reset(dev, rzn1_wdt_clk_disable_unprepare,
+                                      clk);
+       if (ret)
+               return ret;
+
+       clk_rate = clk_get_rate(clk);
+       if (!clk_rate) {
+               dev_err(dev, "failed to get the clock rate\n");
+               return -EINVAL;
+       }
+
+       wdt->clk_rate_khz = clk_rate / 1000;
+       wdt->wdtdev.info = &rzn1_wdt_info,
+       wdt->wdtdev.ops = &rzn1_wdt_ops,
+       wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+       wdt->wdtdev.parent = dev;
+       /*
+        * The period of the watchdog cannot be changed once set
+        * and is limited to a very short period.
+        * Configure it for a 1s period once and for all, and
+        * rely on the heart-beat provided by the watchdog core
+        * to make this usable by the user-space.
+        */
+       wdt->wdtdev.max_hw_heartbeat_ms = max_heart_beat_ms(wdt->clk_rate_khz);
+       if (wdt->wdtdev.max_hw_heartbeat_ms > 1000)
+               wdt->wdtdev.max_hw_heartbeat_ms = 1000;
+
+       wdt->wdtdev.timeout = DEFAULT_TIMEOUT;
+       ret = watchdog_init_timeout(&wdt->wdtdev, 0, dev);
+       if (ret)
+               return ret;
+
+       watchdog_set_drvdata(&wdt->wdtdev, wdt);
+
+       return devm_watchdog_register_device(dev, &wdt->wdtdev);
+}
+
+
+static const struct of_device_id rzn1_wdt_match[] = {
+       { .compatible = "renesas,rzn1-wdt" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rzn1_wdt_match);
+
+static struct platform_driver rzn1_wdt_driver = {
+       .probe          = rzn1_wdt_probe,
+       .driver         = {
+               .name           = KBUILD_MODNAME,
+               .of_match_table = rzn1_wdt_match,
+       },
+};
+
+module_platform_driver(rzn1_wdt_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/N1 hardware watchdog");
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_LICENSE("GPL");
index 27846c6..2d0a06a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
+#include <linux/platform_device.h>
 #include <linux/miscdevice.h>
 #include <linux/watchdog.h>
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/timex.h>
 
-#ifdef CONFIG_ARCH_PXA
-#include <mach/regs-ost.h>
-#endif
+#define REG_OSMR0      0x0000  /* OS timer Match Reg. 0 */
+#define REG_OSMR1      0x0004  /* OS timer Match Reg. 1 */
+#define REG_OSMR2      0x0008  /* OS timer Match Reg. 2 */
+#define REG_OSMR3      0x000c  /* OS timer Match Reg. 3 */
+#define REG_OSCR       0x0010  /* OS timer Counter Reg. */
+#define REG_OSSR       0x0014  /* OS timer Status Reg. */
+#define REG_OWER       0x0018  /* OS timer Watch-dog Enable Reg. */
+#define REG_OIER       0x001C  /* OS timer Interrupt Enable Reg. */
 
-#include <mach/reset.h>
-#include <mach/hardware.h>
+#define OSSR_M3                (1 << 3)        /* Match status channel 3 */
+#define OSSR_M2                (1 << 2)        /* Match status channel 2 */
+#define OSSR_M1                (1 << 1)        /* Match status channel 1 */
+#define OSSR_M0                (1 << 0)        /* Match status channel 0 */
+
+#define OWER_WME       (1 << 0)        /* Watchdog Match Enable */
+
+#define OIER_E3                (1 << 3)        /* Interrupt enable channel 3 */
+#define OIER_E2                (1 << 2)        /* Interrupt enable channel 2 */
+#define OIER_E1                (1 << 1)        /* Interrupt enable channel 1 */
+#define OIER_E0                (1 << 0)        /* Interrupt enable channel 0 */
 
 static unsigned long oscr_freq;
 static unsigned long sa1100wdt_users;
 static unsigned int pre_margin;
 static int boot_status;
+static void __iomem *reg_base;
+
+static inline void sa1100_wr(u32 val, u32 offset)
+{
+       writel_relaxed(val, reg_base + offset);
+}
+
+static inline u32 sa1100_rd(u32 offset)
+{
+       return readl_relaxed(reg_base + offset);
+}
 
 /*
  *     Allow only one person to hold it open
@@ -51,10 +77,10 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
                return -EBUSY;
 
        /* Activate SA1100 Watchdog timer */
-       writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
-       writel_relaxed(OSSR_M3, OSSR);
-       writel_relaxed(OWER_WME, OWER);
-       writel_relaxed(readl_relaxed(OIER) | OIER_E3, OIER);
+       sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
+       sa1100_wr(OSSR_M3, REG_OSSR);
+       sa1100_wr(OWER_WME, REG_OWER);
+       sa1100_wr(sa1100_rd(REG_OIER) | OIER_E3, REG_OIER);
        return stream_open(inode, file);
 }
 
@@ -62,7 +88,7 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
  * The watchdog cannot be disabled.
  *
  * Previous comments suggested that turning off the interrupt by
- * clearing OIER[E3] would prevent the watchdog timing out but this
+ * clearing REG_OIER[E3] would prevent the watchdog timing out but this
  * does not appear to be true (at least on the PXA255).
  */
 static int sa1100dog_release(struct inode *inode, struct file *file)
@@ -77,7 +103,7 @@ static ssize_t sa1100dog_write(struct file *file, const char __user *data,
 {
        if (len)
                /* Refresh OSMR3 timer. */
-               writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+               sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
        return len;
 }
 
@@ -111,7 +137,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
                break;
 
        case WDIOC_KEEPALIVE:
-               writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+               sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
                ret = 0;
                break;
 
@@ -126,7 +152,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
                }
 
                pre_margin = oscr_freq * time;
-               writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+               sa1100_wr(sa1100_rd(REG_OSCR) + pre_margin, REG_OSMR3);
                fallthrough;
 
        case WDIOC_GETTIMEOUT:
@@ -152,12 +178,22 @@ static struct miscdevice sa1100dog_miscdev = {
        .fops           = &sa1100dog_fops,
 };
 
-static int margin __initdata = 60;             /* (secs) Default is 1 minute */
+static int margin = 60;                /* (secs) Default is 1 minute */
 static struct clk *clk;
 
-static int __init sa1100dog_init(void)
+static int sa1100dog_probe(struct platform_device *pdev)
 {
        int ret;
+       int *platform_data;
+       struct resource *res;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENXIO;
+       reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+       ret = PTR_ERR_OR_ZERO(reg_base);
+       if (ret)
+               return ret;
 
        clk = clk_get(NULL, "OSTIMER0");
        if (IS_ERR(clk)) {
@@ -175,13 +211,9 @@ static int __init sa1100dog_init(void)
 
        oscr_freq = clk_get_rate(clk);
 
-       /*
-        * Read the reset status, and save it for later.  If
-        * we suspend, RCSR will be cleared, and the watchdog
-        * reset reason will be lost.
-        */
-       boot_status = (reset_status & RESET_STATUS_WATCHDOG) ?
-                               WDIOF_CARDRESET : 0;
+       platform_data = pdev->dev.platform_data;
+       if (platform_data && *platform_data)
+               boot_status = WDIOF_CARDRESET;
        pre_margin = oscr_freq * margin;
 
        ret = misc_register(&sa1100dog_miscdev);
@@ -197,15 +229,21 @@ err:
        return ret;
 }
 
-static void __exit sa1100dog_exit(void)
+static int sa1100dog_remove(struct platform_device *pdev)
 {
        misc_deregister(&sa1100dog_miscdev);
        clk_disable_unprepare(clk);
        clk_put(clk);
+
+       return 0;
 }
 
-module_init(sa1100dog_init);
-module_exit(sa1100dog_exit);
+struct platform_driver sa1100dog_driver = {
+       .driver.name = "sa1100_wdt",
+       .probe    = sa1100dog_probe,
+       .remove   = sa1100dog_remove,
+};
+module_platform_driver(sa1100dog_driver);
 
 MODULE_AUTHOR("Oleg Drokin <green@crimea.edu>");
 MODULE_DESCRIPTION("SA1100/PXA2xx Watchdog");
index dbeb214..f9479a3 100644 (file)
@@ -272,6 +272,7 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
        watchdog_set_nowayout(&wdt->wdd, nowayout);
        watchdog_set_drvdata(&wdt->wdd, wdt);
        watchdog_set_restart_priority(&wdt->wdd, 128);
+       watchdog_stop_on_unregister(&wdt->wdd);
 
        /*
         * If 'timeout-sec' devicetree property is specified, use that.
diff --git a/drivers/watchdog/sunplus_wdt.c b/drivers/watchdog/sunplus_wdt.c
new file mode 100644 (file)
index 0000000..e2d8c53
--- /dev/null
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sunplus Watchdog Driver
+ *
+ * Copyright (C) 2021 Sunplus Technology Co., Ltd.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/watchdog.h>
+
+#define WDT_CTRL               0x00
+#define WDT_CNT                        0x04
+
+#define WDT_STOP               0x3877
+#define WDT_RESUME             0x4A4B
+#define WDT_CLRIRQ             0x7482
+#define WDT_UNLOCK             0xAB00
+#define WDT_LOCK               0xAB01
+#define WDT_CONMAX             0xDEAF
+
+/* TIMEOUT_MAX = ffff0/90kHz =11.65, so longer than 11 seconds will time out. */
+#define SP_WDT_MAX_TIMEOUT     11U
+#define SP_WDT_DEFAULT_TIMEOUT 10
+
+#define STC_CLK                        90000
+
+#define DEVICE_NAME            "sunplus-wdt"
+
+static unsigned int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+                       __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct sp_wdt_priv {
+       struct watchdog_device wdev;
+       void __iomem *base;
+       struct clk *clk;
+       struct reset_control *rstc;
+};
+
+static int sp_wdt_restart(struct watchdog_device *wdev,
+                         unsigned long action, void *data)
+{
+       struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+       void __iomem *base = priv->base;
+
+       writel(WDT_STOP, base + WDT_CTRL);
+       writel(WDT_UNLOCK, base + WDT_CTRL);
+       writel(0x0001, base + WDT_CNT);
+       writel(WDT_LOCK, base + WDT_CTRL);
+       writel(WDT_RESUME, base + WDT_CTRL);
+
+       return 0;
+}
+
+static int sp_wdt_ping(struct watchdog_device *wdev)
+{
+       struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+       void __iomem *base = priv->base;
+       u32 count;
+
+       if (wdev->timeout > SP_WDT_MAX_TIMEOUT) {
+               /* WDT_CONMAX sets the count to the maximum (down-counting). */
+               writel(WDT_CONMAX, base + WDT_CTRL);
+       } else {
+               writel(WDT_UNLOCK, base + WDT_CTRL);
+               /*
+                * Watchdog timer is a 20-bit down-counting based on STC_CLK.
+                * This register bits[16:0] is from bit[19:4] of the watchdog
+                * timer counter.
+                */
+               count = (wdev->timeout * STC_CLK) >> 4;
+               writel(count, base + WDT_CNT);
+               writel(WDT_LOCK, base + WDT_CTRL);
+       }
+
+       return 0;
+}
+
+static int sp_wdt_stop(struct watchdog_device *wdev)
+{
+       struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+       void __iomem *base = priv->base;
+
+       writel(WDT_STOP, base + WDT_CTRL);
+
+       return 0;
+}
+
+static int sp_wdt_start(struct watchdog_device *wdev)
+{
+       struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+       void __iomem *base = priv->base;
+
+       writel(WDT_RESUME, base + WDT_CTRL);
+
+       return 0;
+}
+
+static unsigned int sp_wdt_get_timeleft(struct watchdog_device *wdev)
+{
+       struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev);
+       void __iomem *base = priv->base;
+       u32 val;
+
+       val = readl(base + WDT_CNT);
+       val &= 0xffff;
+       val = val << 4;
+
+       return val;
+}
+
+static const struct watchdog_info sp_wdt_info = {
+       .identity       = DEVICE_NAME,
+       .options        = WDIOF_SETTIMEOUT |
+                         WDIOF_MAGICCLOSE |
+                         WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops sp_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = sp_wdt_start,
+       .stop           = sp_wdt_stop,
+       .ping           = sp_wdt_ping,
+       .get_timeleft   = sp_wdt_get_timeleft,
+       .restart        = sp_wdt_restart,
+};
+
+static void sp_clk_disable_unprepare(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
+static void sp_reset_control_assert(void *data)
+{
+       reset_control_assert(data);
+}
+
+static int sp_wdt_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct sp_wdt_priv *priv;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(priv->clk))
+               return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return dev_err_probe(dev, ret, "Failed to enable clock\n");
+
+       ret = devm_add_action_or_reset(dev, sp_clk_disable_unprepare, priv->clk);
+       if (ret)
+               return ret;
+
+       /* The timer and watchdog shared the STC reset */
+       priv->rstc = devm_reset_control_get_shared(dev, NULL);
+       if (IS_ERR(priv->rstc))
+               return dev_err_probe(dev, PTR_ERR(priv->rstc), "Failed to get reset\n");
+
+       reset_control_deassert(priv->rstc);
+
+       ret = devm_add_action_or_reset(dev, sp_reset_control_assert, priv->rstc);
+       if (ret)
+               return ret;
+
+       priv->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       priv->wdev.info = &sp_wdt_info;
+       priv->wdev.ops = &sp_wdt_ops;
+       priv->wdev.timeout = SP_WDT_DEFAULT_TIMEOUT;
+       priv->wdev.max_hw_heartbeat_ms = SP_WDT_MAX_TIMEOUT * 1000;
+       priv->wdev.min_timeout = 1;
+       priv->wdev.parent = dev;
+
+       watchdog_set_drvdata(&priv->wdev, priv);
+       watchdog_init_timeout(&priv->wdev, timeout, dev);
+       watchdog_set_nowayout(&priv->wdev, nowayout);
+       watchdog_stop_on_reboot(&priv->wdev);
+       watchdog_set_restart_priority(&priv->wdev, 128);
+
+       return devm_watchdog_register_device(dev, &priv->wdev);
+}
+
+static const struct of_device_id sp_wdt_of_match[] = {
+       {.compatible = "sunplus,sp7021-wdt", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sp_wdt_of_match);
+
+static struct platform_driver sp_wdt_driver = {
+       .probe = sp_wdt_probe,
+       .driver = {
+                  .name = DEVICE_NAME,
+                  .of_match_table = sp_wdt_of_match,
+       },
+};
+
+module_platform_driver(sp_wdt_driver);
+
+MODULE_AUTHOR("Xiantao Hu <xt.hu@cqplus1.com>");
+MODULE_DESCRIPTION("Sunplus Watchdog Timer Driver");
+MODULE_LICENSE("GPL");
index c137ad2..0ea554c 100644 (file)
@@ -125,13 +125,16 @@ static int ts4800_wdt_probe(struct platform_device *pdev)
        ret = of_property_read_u32_index(np, "syscon", 1, &reg);
        if (ret < 0) {
                dev_err(dev, "no offset in syscon\n");
+               of_node_put(syscon_np);
                return ret;
        }
 
        /* allocate memory for watchdog struct */
        wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
-       if (!wdt)
+       if (!wdt) {
+               of_node_put(syscon_np);
                return -ENOMEM;
+       }
 
        /* set regmap and offset to know where to write */
        wdt->feed_offset = reg;
index 195c8c0..e6f95e9 100644 (file)
@@ -344,6 +344,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
        wdat->period = tbl->timer_period;
        wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count;
        wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count;
+       wdat->wdd.min_timeout = 1;
        wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED;
        wdat->wdd.info = &wdat_wdt_info;
        wdat->wdd.ops = &wdat_wdt_ops;
@@ -450,8 +451,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
         * watchdog properly after it has opened the device. In some cases
         * the BIOS default is too short and causes immediate reboot.
         */
-       if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms ||
-           timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) {
+       if (watchdog_timeout_invalid(&wdat->wdd, timeout)) {
                dev_warn(dev, "Invalid timeout %d given, using %d\n",
                         timeout, WDAT_DEFAULT_TIMEOUT);
                timeout = WDAT_DEFAULT_TIMEOUT;
@@ -462,6 +462,8 @@ static int wdat_wdt_probe(struct platform_device *pdev)
                return ret;
 
        watchdog_set_nowayout(&wdat->wdd, nowayout);
+       watchdog_stop_on_reboot(&wdat->wdd);
+       watchdog_stop_on_unregister(&wdat->wdd);
        return devm_watchdog_register_device(dev, &wdat->wdd);
 }
 
index 55acb32..a15729b 100644 (file)
@@ -175,8 +175,6 @@ undo:
 
 static void __del_gref(struct gntalloc_gref *gref)
 {
-       unsigned long addr;
-
        if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
                uint8_t *tmp = kmap_local_page(gref->page);
                tmp[gref->notify.pgoff] = 0;
@@ -190,10 +188,9 @@ static void __del_gref(struct gntalloc_gref *gref)
        gref->notify.flags = 0;
 
        if (gref->gref_id) {
-               if (gref->page) {
-                       addr = (unsigned long)page_to_virt(gref->page);
-                       gnttab_end_foreign_access(gref->gref_id, addr);
-               } else
+               if (gref->page)
+                       gnttab_end_foreign_access(gref->gref_id, gref->page);
+               else
                        gnttab_free_grant_reference(gref->gref_id);
        }
 
index 91073b4..940e5e9 100644 (file)
@@ -524,7 +524,7 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
 
        for (i = 0; i < count; i++)
                if (refs[i] != INVALID_GRANT_REF)
-                       gnttab_end_foreign_access(refs[i], 0UL);
+                       gnttab_end_foreign_access(refs[i], NULL);
 }
 
 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
index 1a1aec0..7a18292 100644 (file)
@@ -430,13 +430,13 @@ int gnttab_try_end_foreign_access(grant_ref_t ref)
 }
 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
 
-void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
+void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
 {
        if (gnttab_try_end_foreign_access(ref)) {
-               if (page != 0)
-                       put_page(virt_to_page(page));
+               if (page)
+                       put_page(page);
        } else
-               gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL);
+               gnttab_add_deferred(ref, page);
 }
 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 
@@ -632,7 +632,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
        if (xen_auto_xlat_grant_frames.count)
                return -EINVAL;
 
-       vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
+       vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
        if (vaddr == NULL) {
                pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
                        &addr);
@@ -640,7 +640,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
        }
        pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
        if (!pfn) {
-               xen_unmap(vaddr);
+               memunmap(vaddr);
                return -ENOMEM;
        }
        for (i = 0; i < max_nr_gframes; i++)
@@ -659,7 +659,7 @@ void gnttab_free_auto_xlat_frames(void)
        if (!xen_auto_xlat_grant_frames.count)
                return;
        kfree(xen_auto_xlat_grant_frames.pfn);
-       xen_unmap(xen_auto_xlat_grant_frames.vaddr);
+       memunmap(xen_auto_xlat_grant_frames.vaddr);
 
        xen_auto_xlat_grant_frames.pfn = NULL;
        xen_auto_xlat_grant_frames.count = 0;
index e254ed1..1826e8e 100644 (file)
@@ -238,8 +238,8 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
        spin_unlock(&bedata->socket_lock);
 
        for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
-               gnttab_end_foreign_access(map->active.ring->ref[i], 0);
-       gnttab_end_foreign_access(map->active.ref, 0);
+               gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
+       gnttab_end_foreign_access(map->active.ref, NULL);
        free_page((unsigned long)map->active.ring);
 
        kfree(map);
@@ -1117,7 +1117,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
                }
        }
        if (bedata->ref != -1)
-               gnttab_end_foreign_access(bedata->ref, 0);
+               gnttab_end_foreign_access(bedata->ref, NULL);
        kfree(bedata->ring.sring);
        kfree(bedata);
        xenbus_switch_state(dev, XenbusStateClosed);
index b643376..bef8d72 100644 (file)
@@ -135,7 +135,7 @@ void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
 
                for (i = 0; i < buf->num_grefs; i++)
                        if (buf->grefs[i] != INVALID_GRANT_REF)
-                               gnttab_end_foreign_access(buf->grefs[i], 0UL);
+                               gnttab_end_foreign_access(buf->grefs[i], NULL);
        }
        kfree(buf->grefs);
        kfree(buf->directory);
index d6fdd2d..d5f3f76 100644 (file)
@@ -439,7 +439,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
 
        for (i = 0; i < nr_pages; i++) {
                if (grefs[i] != INVALID_GRANT_REF) {
-                       gnttab_end_foreign_access(grefs[i], 0);
+                       gnttab_end_foreign_access(grefs[i], NULL);
                        grefs[i] = INVALID_GRANT_REF;
                }
        }
index d367f2b..58b732d 100644 (file)
@@ -752,8 +752,8 @@ static void xenbus_probe(void)
        xenstored_ready = 1;
 
        if (!xen_store_interface) {
-               xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
-                                               XEN_PAGE_SIZE);
+               xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+                                              XEN_PAGE_SIZE, MEMREMAP_WB);
                /*
                 * Now it is safe to free the IRQ used for xenstore late
                 * initialization. No need to unbind: it is about to be
@@ -1009,8 +1009,8 @@ static int __init xenbus_init(void)
 #endif
                        xen_store_gfn = (unsigned long)v;
                        xen_store_interface =
-                               xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
-                                         XEN_PAGE_SIZE);
+                               memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+                                        XEN_PAGE_SIZE, MEMREMAP_WB);
                        if (xen_store_interface->connection != XENSTORE_CONNECTED)
                                wait = true;
                }
index 32dff7b..21e1545 100644 (file)
@@ -58,7 +58,7 @@ config ARCH_USE_GNU_PROPERTY
 config BINFMT_ELF_FDPIC
        bool "Kernel support for FDPIC ELF binaries"
        default y if !BINFMT_ELF
-       depends on (ARM || (SUPERH && !MMU))
+       depends on ARM || ((M68K || SUPERH) && !MMU)
        select ELFCORE
        help
          ELF FDPIC binaries are based on ELF, but allow the individual load
index 94aa735..79f6b74 100644 (file)
@@ -463,8 +463,11 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                }
 
                /* skip if starts before the current position */
-               if (offset < curr)
+               if (offset < curr) {
+                       if (next > curr)
+                               ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
                        continue;
+               }
 
                /* found the next entry */
                if (!dir_emit(ctx, dire->u.name, nlen,
index 7584aa6..e5221be 100644 (file)
@@ -256,6 +256,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
        struct iov_iter iter;
        ssize_t err = 0;
        size_t len;
+       int mode;
 
        __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
        __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
@@ -264,7 +265,8 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
                goto out;
 
        /* We need to fetch the inline data. */
-       req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
+       mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
+       req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out;
@@ -604,8 +606,10 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                                    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
                                    ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
                                    true);
-       if (IS_ERR(req))
+       if (IS_ERR(req)) {
+               redirty_page_for_writepage(wbc, page);
                return PTR_ERR(req);
+       }
 
        set_page_writeback(page);
        if (caching)
@@ -1644,7 +1648,7 @@ int ceph_uninline_data(struct file *file)
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
-       struct ceph_osd_request *req;
+       struct ceph_osd_request *req = NULL;
        struct ceph_cap_flush *prealloc_cf;
        struct folio *folio = NULL;
        u64 inline_version = CEPH_INLINE_NONE;
@@ -1652,10 +1656,23 @@ int ceph_uninline_data(struct file *file)
        int err = 0;
        u64 len;
 
+       spin_lock(&ci->i_ceph_lock);
+       inline_version = ci->i_inline_version;
+       spin_unlock(&ci->i_ceph_lock);
+
+       dout("uninline_data %p %llx.%llx inline_version %llu\n",
+            inode, ceph_vinop(inode), inline_version);
+
+       if (inline_version == CEPH_INLINE_NONE)
+               return 0;
+
        prealloc_cf = ceph_alloc_cap_flush();
        if (!prealloc_cf)
                return -ENOMEM;
 
+       if (inline_version == 1) /* initial version, no data */
+               goto out_uninline;
+
        folio = read_mapping_folio(inode->i_mapping, 0, file);
        if (IS_ERR(folio)) {
                err = PTR_ERR(folio);
@@ -1664,17 +1681,6 @@ int ceph_uninline_data(struct file *file)
 
        folio_lock(folio);
 
-       spin_lock(&ci->i_ceph_lock);
-       inline_version = ci->i_inline_version;
-       spin_unlock(&ci->i_ceph_lock);
-
-       dout("uninline_data %p %llx.%llx inline_version %llu\n",
-            inode, ceph_vinop(inode), inline_version);
-
-       if (inline_version == 1 || /* initial version, no data */
-           inline_version == CEPH_INLINE_NONE)
-               goto out_unlock;
-
        len = i_size_read(inode);
        if (len > folio_size(folio))
                len = folio_size(folio);
@@ -1739,6 +1745,7 @@ int ceph_uninline_data(struct file *file)
        ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
                                  req->r_end_latency, len, err);
 
+out_uninline:
        if (!err) {
                int dirty;
 
@@ -1757,8 +1764,10 @@ out_put_req:
        if (err == -ECANCELED)
                err = 0;
 out_unlock:
-       folio_unlock(folio);
-       folio_put(folio);
+       if (folio) {
+               folio_unlock(folio);
+               folio_put(folio);
+       }
 out:
        ceph_free_cap_flush(prealloc_cf);
        dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
@@ -1777,7 +1786,6 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
 
        if (!mapping->a_ops->read_folio)
                return -ENOEXEC;
-       file_accessed(file);
        vma->vm_ops = &ceph_vmops;
        return 0;
 }
index 5c14ef0..bf2e940 100644 (file)
@@ -1577,7 +1577,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
 
        while (first_tid <= last_tid) {
                struct ceph_cap *cap = ci->i_auth_cap;
-               struct ceph_cap_flush *cf;
+               struct ceph_cap_flush *cf = NULL, *iter;
                int ret;
 
                if (!(cap && cap->session == session)) {
@@ -1587,8 +1587,9 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
                }
 
                ret = -ENOENT;
-               list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
-                       if (cf->tid >= first_tid) {
+               list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) {
+                       if (iter->tid >= first_tid) {
+                               cf = iter;
                                ret = 0;
                                break;
                        }
@@ -1910,6 +1911,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
        struct rb_node *p;
        bool queue_invalidate = false;
        bool tried_invalidate = false;
+       bool queue_writeback = false;
 
        if (session)
                ceph_get_mds_session(session);
@@ -2062,10 +2064,27 @@ retry:
                }
 
                /* completed revocation? going down and there are no caps? */
-               if (revoking && (revoking & cap_used) == 0) {
-                       dout("completed revocation of %s\n",
-                            ceph_cap_string(cap->implemented & ~cap->issued));
-                       goto ack;
+               if (revoking) {
+                       if ((revoking & cap_used) == 0) {
+                               dout("completed revocation of %s\n",
+                                     ceph_cap_string(cap->implemented & ~cap->issued));
+                               goto ack;
+                       }
+
+                       /*
+                        * If the "i_wrbuffer_ref" was increased by mmap or generic
+                        * cache write just before the ceph_check_caps() is called,
+                        * the Fb capability revoking will fail this time. Then we
+                        * must wait for the BDI's delayed work to flush the dirty
+                        * pages and to release the "i_wrbuffer_ref", which will cost
+                        * at most 5 seconds. That means the MDS needs to wait at
+                        * most 5 seconds to finished the Fb capability's revocation.
+                        *
+                        * Let's queue a writeback for it.
+                        */
+                       if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
+                           (revoking & CEPH_CAP_FILE_BUFFER))
+                               queue_writeback = true;
                }
 
                /* want more caps from mds? */
@@ -2135,6 +2154,8 @@ ack:
        spin_unlock(&ci->i_ceph_lock);
 
        ceph_put_mds_session(session);
+       if (queue_writeback)
+               ceph_queue_writeback(inode);
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
 }
@@ -2218,9 +2239,9 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
 }
 
 /*
- * wait for any unsafe requests to complete.
+ * flush the mdlog and wait for any unsafe requests to complete.
  */
-static int unsafe_request_wait(struct inode *inode)
+static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
@@ -2336,7 +2357,7 @@ retry:
                kfree(sessions);
        }
 
-       dout("unsafe_request_wait %p wait on tid %llu %llu\n",
+       dout("%s %p wait on tid %llu %llu\n", __func__,
             inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
        if (req1) {
                ret = !wait_for_completion_timeout(&req1->r_safe_completion,
@@ -2380,7 +2401,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        dirty = try_flush_caps(inode, &flush_tid);
        dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
 
-       err = unsafe_request_wait(inode);
+       err = flush_mdlog_and_wait_inode_unsafe_requests(inode);
 
        /*
         * only wait on non-file metadata writeback (the mds
@@ -3182,10 +3203,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                                struct ceph_snap_context *snapc)
 {
        struct inode *inode = &ci->vfs_inode;
-       struct ceph_cap_snap *capsnap = NULL;
+       struct ceph_cap_snap *capsnap = NULL, *iter;
        int put = 0;
        bool last = false;
-       bool found = false;
        bool flush_snaps = false;
        bool complete_capsnap = false;
 
@@ -3212,14 +3232,14 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
                     last ? " LAST" : "");
        } else {
-               list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
-                       if (capsnap->context == snapc) {
-                               found = true;
+               list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
+                       if (iter->context == snapc) {
+                               capsnap = iter;
                                break;
                        }
                }
 
-               if (!found) {
+               if (!capsnap) {
                        /*
                         * The capsnap should already be removed when removing
                         * auth cap in the case of a forced unmount.
@@ -3769,8 +3789,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        u64 follows = le64_to_cpu(m->snap_follows);
-       struct ceph_cap_snap *capsnap;
-       bool flushed = false;
+       struct ceph_cap_snap *capsnap = NULL, *iter;
        bool wake_ci = false;
        bool wake_mdsc = false;
 
@@ -3778,26 +3797,26 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
             inode, ci, session->s_mds, follows);
 
        spin_lock(&ci->i_ceph_lock);
-       list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
-               if (capsnap->follows == follows) {
-                       if (capsnap->cap_flush.tid != flush_tid) {
+       list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
+               if (iter->follows == follows) {
+                       if (iter->cap_flush.tid != flush_tid) {
                                dout(" cap_snap %p follows %lld tid %lld !="
-                                    " %lld\n", capsnap, follows,
-                                    flush_tid, capsnap->cap_flush.tid);
+                                    " %lld\n", iter, follows,
+                                    flush_tid, iter->cap_flush.tid);
                                break;
                        }
-                       flushed = true;
+                       capsnap = iter;
                        break;
                } else {
                        dout(" skipping cap_snap %p follows %lld\n",
-                            capsnap, capsnap->follows);
+                            iter, iter->follows);
                }
        }
-       if (flushed)
+       if (capsnap)
                ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc);
        spin_unlock(&ci->i_ceph_lock);
 
-       if (flushed) {
+       if (capsnap) {
                ceph_put_snap_context(capsnap->context);
                ceph_put_cap_snap(capsnap);
                if (wake_ci)
index 63113e2..b7e9cac 100644 (file)
@@ -578,7 +578,7 @@ void ceph_evict_inode(struct inode *inode)
 
        __ceph_remove_caps(ci);
 
-       if (__ceph_has_any_quota(ci))
+       if (__ceph_has_quota(ci, QUOTA_GET_ANY))
                ceph_adjust_quota_realms_count(inode, false);
 
        /*
@@ -1466,10 +1466,12 @@ retry_lookup:
                        } else if (have_lease) {
                                if (d_unhashed(dn))
                                        d_add(dn, NULL);
+                       }
+
+                       if (!d_unhashed(dn) && have_lease)
                                update_dentry_lease(dir, dn,
                                                    rinfo->dlease, session,
                                                    req->r_request_started);
-                       }
                        goto done;
                }
 
@@ -1884,7 +1886,6 @@ static void ceph_do_invalidate_pages(struct inode *inode)
        orig_gen = ci->i_rdcache_gen;
        spin_unlock(&ci->i_ceph_lock);
 
-       ceph_fscache_invalidate(inode, false);
        if (invalidate_inode_pages2(inode->i_mapping) < 0) {
                pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
                       ceph_vinop(inode));
@@ -2258,6 +2259,30 @@ int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        return err;
 }
 
+int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
+{
+       int issued = ceph_caps_issued(ceph_inode(inode));
+
+       /*
+        * If any 'x' caps is issued we can just choose the auth MDS
+        * instead of the random replica MDSes. Because only when the
+        * Locker is in LOCK_EXEC state will the loner client could
+        * get the 'x' caps. And if we send the getattr requests to
+        * any replica MDS it must auth pin and tries to rdlock from
+        * the auth MDS, and then the auth MDS need to do the Locker
+        * state transition to LOCK_SYNC. And after that the lock state
+        * will change back.
+        *
+        * This cost much when doing the Locker state transition and
+        * usually will need to revoke caps from clients.
+        */
+       if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
+           || (mask & CEPH_STAT_RSTAT))
+               return USE_AUTH_MDS;
+       else
+               return USE_ANY_MDS;
+}
+
 /*
  * Verify that we have a lease on the given mask.  If not,
  * do a getattr against an mds.
@@ -2281,7 +2306,7 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
        if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
                        return 0;
 
-       mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
+       mode = ceph_try_to_choose_auth_mds(inode, mask);
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
        if (IS_ERR(req))
                return PTR_ERR(req);
@@ -2423,7 +2448,7 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
                return -ESTALE;
 
        /* Skip the getattr altogether if we're asked not to sync */
-       if (!(flags & AT_STATX_DONT_SYNC)) {
+       if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
                err = ceph_do_getattr(inode,
                                statx_to_caps(request_mask, inode->i_mode),
                                flags & AT_STATX_FORCE_SYNC);
index 00c3de1..f5d110d 100644 (file)
@@ -437,7 +437,7 @@ static int ceph_parse_deleg_inos(void **p, void *end,
        ceph_decode_32_safe(p, end, sets, bad);
        dout("got %u sets of delegated inodes\n", sets);
        while (sets--) {
-               u64 start, len, ino;
+               u64 start, len;
 
                ceph_decode_64_safe(p, end, start, bad);
                ceph_decode_64_safe(p, end, len, bad);
@@ -449,7 +449,7 @@ static int ceph_parse_deleg_inos(void **p, void *end,
                        continue;
                }
                while (len--) {
-                       int err = xa_insert(&s->s_delegated_inos, ino = start++,
+                       int err = xa_insert(&s->s_delegated_inos, start++,
                                            DELEGATED_INO_AVAILABLE,
                                            GFP_KERNEL);
                        if (!err) {
@@ -2651,7 +2651,28 @@ static int __prepare_send_request(struct ceph_mds_session *session,
        struct ceph_mds_client *mdsc = session->s_mdsc;
        struct ceph_mds_request_head_old *rhead;
        struct ceph_msg *msg;
-       int flags = 0;
+       int flags = 0, max_retry;
+
+       /*
+        * The type of 'r_attempts' in kernel 'ceph_mds_request'
+        * is 'int', while in 'ceph_mds_request_head' the type of
+        * 'num_retry' is '__u8'. So in case the request retries
+        *  exceeding 256 times, the MDS will receive a incorrect
+        *  retry seq.
+        *
+        * In this case it's ususally a bug in MDS and continue
+        * retrying the request makes no sense.
+        *
+        * In future this could be fixed in ceph code, so avoid
+        * using the hardcode here.
+        */
+       max_retry = sizeof_field(struct ceph_mds_request_head, num_retry);
+       max_retry = 1 << (max_retry * BITS_PER_BYTE);
+       if (req->r_attempts >= max_retry) {
+               pr_warn_ratelimited("%s request tid %llu seq overflow\n",
+                                   __func__, req->r_tid);
+               return -EMULTIHOP;
+       }
 
        req->r_attempts++;
        if (req->r_inode) {
@@ -2663,7 +2684,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
                else
                        req->r_sent_on_mseq = -1;
        }
-       dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
+       dout("%s %p tid %lld %s (attempt %d)\n", __func__, req,
             req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
 
        if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
@@ -3265,6 +3286,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
        int err = -EINVAL;
        void *p = msg->front.iov_base;
        void *end = p + msg->front.iov_len;
+       bool aborted = false;
 
        ceph_decode_need(&p, end, 2*sizeof(u32), bad);
        next_mds = ceph_decode_32(&p);
@@ -3273,16 +3295,41 @@ static void handle_forward(struct ceph_mds_client *mdsc,
        mutex_lock(&mdsc->mutex);
        req = lookup_get_request(mdsc, tid);
        if (!req) {
+               mutex_unlock(&mdsc->mutex);
                dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
-               goto out;  /* dup reply? */
+               return;  /* dup reply? */
        }
 
        if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
                dout("forward tid %llu aborted, unregistering\n", tid);
                __unregister_request(mdsc, req);
        } else if (fwd_seq <= req->r_num_fwd) {
-               dout("forward tid %llu to mds%d - old seq %d <= %d\n",
-                    tid, next_mds, req->r_num_fwd, fwd_seq);
+               /*
+                * The type of 'num_fwd' in ceph 'MClientRequestForward'
+                * is 'int32_t', while in 'ceph_mds_request_head' the
+                * type is '__u8'. So in case the request bounces between
+                * MDSes exceeding 256 times, the client will get stuck.
+                *
+                * In this case it's ususally a bug in MDS and continue
+                * bouncing the request makes no sense.
+                *
+                * In future this could be fixed in ceph code, so avoid
+                * using the hardcode here.
+                */
+               int max = sizeof_field(struct ceph_mds_request_head, num_fwd);
+               max = 1 << (max * BITS_PER_BYTE);
+               if (req->r_num_fwd >= max) {
+                       mutex_lock(&req->r_fill_mutex);
+                       req->r_err = -EMULTIHOP;
+                       set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
+                       mutex_unlock(&req->r_fill_mutex);
+                       aborted = true;
+                       pr_warn_ratelimited("forward tid %llu seq overflow\n",
+                                           tid);
+               } else {
+                       dout("forward tid %llu to mds%d - old seq %d <= %d\n",
+                            tid, next_mds, req->r_num_fwd, fwd_seq);
+               }
        } else {
                /* resend. forward race not possible; mds would drop */
                dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
@@ -3294,9 +3341,12 @@ static void handle_forward(struct ceph_mds_client *mdsc,
                put_request_session(req);
                __do_request(mdsc, req);
        }
-       ceph_mdsc_put_request(req);
-out:
        mutex_unlock(&mdsc->mutex);
+
+       /* kick calling process */
+       if (aborted)
+               complete_request(mdsc, req);
+       ceph_mdsc_put_request(req);
        return;
 
 bad:
@@ -3375,13 +3425,17 @@ static void handle_session(struct ceph_mds_session *session,
        }
 
        if (msg_version >= 5) {
-               u32 flags;
-               /* version >= 4, struct_v, struct_cv, len, metric_spec */
-               ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 2, bad);
+               u32 flags, len;
+
+               /* version >= 4 */
+               ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */
+               ceph_decode_32_safe(&p, end, len, bad); /* len */
+               ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */
+
                /* version >= 5, flags   */
-                ceph_decode_32_safe(&p, end, flags, bad);
+               ceph_decode_32_safe(&p, end, flags, bad);
                if (flags & CEPH_SESSION_BLOCKLISTED) {
-                       pr_warn("mds%d session blocklisted\n", session->s_mds);
+                       pr_warn("mds%d session blocklisted\n", session->s_mds);
                        blocklisted = true;
                }
        }
@@ -4396,12 +4450,6 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
        memcpy((void *)(lease + 1) + 4,
               dentry->d_name.name, dentry->d_name.len);
        spin_unlock(&dentry->d_lock);
-       /*
-        * if this is a preemptive lease RELEASE, no need to
-        * flush request stream, since the actual request will
-        * soon follow.
-        */
-       msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
 
        ceph_con_send(&session->s_con, msg);
 }
@@ -4696,15 +4744,17 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
 }
 
 /*
- * wait for all write mds requests to flush.
+ * flush the mdlog and wait for all write mds requests to flush.
  */
-static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
+static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
+                                                u64 want_tid)
 {
        struct ceph_mds_request *req = NULL, *nextreq;
+       struct ceph_mds_session *last_session = NULL;
        struct rb_node *n;
 
        mutex_lock(&mdsc->mutex);
-       dout("wait_unsafe_requests want %lld\n", want_tid);
+       dout("%s want %lld\n", __func__, want_tid);
 restart:
        req = __get_oldest_req(mdsc);
        while (req && req->r_tid <= want_tid) {
@@ -4716,14 +4766,32 @@ restart:
                        nextreq = NULL;
                if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
                    (req->r_op & CEPH_MDS_OP_WRITE)) {
+                       struct ceph_mds_session *s = req->r_session;
+
+                       if (!s) {
+                               req = nextreq;
+                               continue;
+                       }
+
                        /* write op */
                        ceph_mdsc_get_request(req);
                        if (nextreq)
                                ceph_mdsc_get_request(nextreq);
+                       s = ceph_get_mds_session(s);
                        mutex_unlock(&mdsc->mutex);
-                       dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
+
+                       /* send flush mdlog request to MDS */
+                       if (last_session != s) {
+                               send_flush_mdlog(s);
+                               ceph_put_mds_session(last_session);
+                               last_session = s;
+                       } else {
+                               ceph_put_mds_session(s);
+                       }
+                       dout("%s wait on %llu (want %llu)\n", __func__,
                             req->r_tid, want_tid);
                        wait_for_completion(&req->r_safe_completion);
+
                        mutex_lock(&mdsc->mutex);
                        ceph_mdsc_put_request(req);
                        if (!nextreq)
@@ -4738,7 +4806,8 @@ restart:
                req = nextreq;
        }
        mutex_unlock(&mdsc->mutex);
-       dout("wait_unsafe_requests done\n");
+       ceph_put_mds_session(last_session);
+       dout("%s done\n", __func__);
 }
 
 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
@@ -4767,7 +4836,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        dout("sync want tid %lld flush_seq %lld\n",
             want_tid, want_flush);
 
-       wait_unsafe_requests(mdsc, want_tid);
+       flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
        wait_caps_flush(mdsc, want_flush);
 }
 
index 3349784..1140aec 100644 (file)
@@ -579,7 +579,7 @@ static inline int ceph_wait_on_async_create(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        return wait_on_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT,
-                          TASK_INTERRUPTIBLE);
+                          TASK_KILLABLE);
 }
 
 extern u64 ceph_get_deleg_ino(struct ceph_mds_session *session);
index a338a3e..64592ad 100644 (file)
@@ -195,9 +195,9 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
 
 /*
  * This function walks through the snaprealm for an inode and returns the
- * ceph_snap_realm for the first snaprealm that has quotas set (either max_files
- * or max_bytes).  If the root is reached, return the root ceph_snap_realm
- * instead.
+ * ceph_snap_realm for the first snaprealm that has quotas set (max_files,
+ * max_bytes, or any, depending on the 'which_quota' argument).  If the root is
+ * reached, return the root ceph_snap_realm instead.
  *
  * Note that the caller is responsible for calling ceph_put_snap_realm() on the
  * returned realm.
@@ -209,7 +209,9 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
  * will be restarted.
  */
 static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
-                                              struct inode *inode, bool retry)
+                                              struct inode *inode,
+                                              enum quota_get_realm which_quota,
+                                              bool retry)
 {
        struct ceph_inode_info *ci = NULL;
        struct ceph_snap_realm *realm, *next;
@@ -248,7 +250,7 @@ restart:
                }
 
                ci = ceph_inode(in);
-               has_quota = __ceph_has_any_quota(ci);
+               has_quota = __ceph_has_quota(ci, which_quota);
                iput(in);
 
                next = realm->parent;
@@ -279,8 +281,8 @@ restart:
         * dropped and we can then restart the whole operation.
         */
        down_read(&mdsc->snap_rwsem);
-       old_realm = get_quota_realm(mdsc, old, true);
-       new_realm = get_quota_realm(mdsc, new, false);
+       old_realm = get_quota_realm(mdsc, old, QUOTA_GET_ANY, true);
+       new_realm = get_quota_realm(mdsc, new, QUOTA_GET_ANY, false);
        if (PTR_ERR(new_realm) == -EAGAIN) {
                up_read(&mdsc->snap_rwsem);
                if (old_realm)
@@ -483,7 +485,8 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
        bool is_updated = false;
 
        down_read(&mdsc->snap_rwsem);
-       realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root), true);
+       realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root),
+                               QUOTA_GET_MAX_BYTES, true);
        up_read(&mdsc->snap_rwsem);
        if (!realm)
                return false;
index e6987d2..b73b4f7 100644 (file)
@@ -1119,6 +1119,7 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc)
        s->s_time_gran = 1;
        s->s_time_min = 0;
        s->s_time_max = U32_MAX;
+       s->s_flags |= SB_NODIRATIME | SB_NOATIME;
 
        ret = set_anon_super_fc(s, fc);
        if (ret != 0)
index 20ceab7..dd7dac0 100644 (file)
@@ -1022,6 +1022,7 @@ static inline void ceph_queue_flush_snaps(struct inode *inode)
        ceph_queue_inode_work(inode, CEPH_I_WORK_FLUSH_SNAPS);
 }
 
+extern int ceph_try_to_choose_auth_mds(struct inode *inode, int mask);
 extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
                             int mask, bool force);
 static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
@@ -1278,9 +1279,29 @@ extern void ceph_fs_debugfs_init(struct ceph_fs_client *client);
 extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
 
 /* quota.c */
-static inline bool __ceph_has_any_quota(struct ceph_inode_info *ci)
+
+enum quota_get_realm {
+       QUOTA_GET_MAX_FILES,
+       QUOTA_GET_MAX_BYTES,
+       QUOTA_GET_ANY
+};
+
+static inline bool __ceph_has_quota(struct ceph_inode_info *ci,
+                                   enum quota_get_realm which)
 {
-       return ci->i_max_files || ci->i_max_bytes;
+       bool has_quota = false;
+
+       switch (which) {
+       case QUOTA_GET_MAX_BYTES:
+               has_quota = !!ci->i_max_bytes;
+               break;
+       case QUOTA_GET_MAX_FILES:
+               has_quota = !!ci->i_max_files;
+               break;
+       default:
+               has_quota = !!(ci->i_max_files || ci->i_max_bytes);
+       }
+       return has_quota;
 }
 
 extern void ceph_adjust_quota_realms_count(struct inode *inode, bool inc);
@@ -1289,10 +1310,10 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci,
                                       u64 max_bytes, u64 max_files)
 {
        bool had_quota, has_quota;
-       had_quota = __ceph_has_any_quota(ci);
+       had_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
        ci->i_max_bytes = max_bytes;
        ci->i_max_files = max_files;
-       has_quota = __ceph_has_any_quota(ci);
+       has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
 
        if (had_quota != has_quota)
                ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota);
index afec840..8c2dc2c 100644 (file)
@@ -366,6 +366,14 @@ static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
        }
 #define XATTR_RSTAT_FIELD(_type, _name)                        \
        XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT)
+#define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name)                      \
+       {                                                               \
+               .name = CEPH_XATTR_NAME(_type, _name),                  \
+               .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)),    \
+               .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name,   \
+               .exists_cb = NULL,                                      \
+               .flags = VXATTR_FLAG_RSTAT,                             \
+       }
 #define XATTR_LAYOUT_FIELD(_type, _name, _field)                       \
        {                                                               \
                .name = CEPH_XATTR_NAME2(_type, _name, _field), \
@@ -404,7 +412,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
        XATTR_RSTAT_FIELD(dir, rsubdirs),
        XATTR_RSTAT_FIELD(dir, rsnaps),
        XATTR_RSTAT_FIELD(dir, rbytes),
-       XATTR_RSTAT_FIELD(dir, rctime),
+       XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime),
        {
                .name = "ceph.dir.pin",
                .name_size = sizeof("ceph.dir.pin"),
index cc8fdcb..8c9f2c0 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_CIFS) += cifs.o
 cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
          inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \
          cifs_unicode.o nterr.o cifsencrypt.o \
-         readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
+         readdir.o ioctl.o sess.o export.o unc.o winucase.o \
          smb2ops.o smb2maperror.o smb2transport.o \
          smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
          dns_resolve.o cifs_spnego_negtokeninit.asn1.o asn1.o
@@ -30,3 +30,5 @@ cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o
 cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
 
 cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
+
+cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o
index 180c234..1e4c7cc 100644 (file)
@@ -465,7 +465,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
        int ret = 0;
 
        /* Store the reconnect address */
-       mutex_lock(&tcon->ses->server->srv_mutex);
+       cifs_server_lock(tcon->ses->server);
        if (cifs_sockaddr_equal(&tcon->ses->server->dstaddr, addr))
                goto unlock;
 
@@ -501,7 +501,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
        cifs_signal_cifsd_for_reconnect(tcon->ses->server, false);
 
 unlock:
-       mutex_unlock(&tcon->ses->server->srv_mutex);
+       cifs_server_unlock(tcon->ses->server);
 
        return ret;
 }
index 0912d8b..663cb9d 100644 (file)
@@ -236,9 +236,9 @@ int cifs_verify_signature(struct smb_rqst *rqst,
                                        cpu_to_le32(expected_sequence_number);
        cifs_pdu->Signature.Sequence.Reserved = 0;
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
        rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be);
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        if (rc)
                return rc;
@@ -626,7 +626,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
 
        memcpy(ses->auth_key.response + baselen, tiblob, tilen);
 
-       mutex_lock(&ses->server->srv_mutex);
+       cifs_server_lock(ses->server);
 
        rc = cifs_alloc_hash("hmac(md5)",
                             &ses->server->secmech.hmacmd5,
@@ -678,7 +678,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
                cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
 
 unlock:
-       mutex_unlock(&ses->server->srv_mutex);
+       cifs_server_unlock(ses->server);
 setup_ntlmv2_rsp_ret:
        kfree(tiblob);
 
index f539a39..12c8728 100644 (file)
@@ -838,7 +838,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
              int flags, struct smb3_fs_context *old_ctx)
 {
        int rc;
-       struct super_block *sb;
+       struct super_block *sb = NULL;
        struct cifs_sb_info *cifs_sb = NULL;
        struct cifs_mnt_data mnt_data;
        struct dentry *root;
@@ -934,9 +934,11 @@ out_super:
        return root;
 out:
        if (cifs_sb) {
-               kfree(cifs_sb->prepath);
-               smb3_cleanup_fs_context(cifs_sb->ctx);
-               kfree(cifs_sb);
+               if (!sb || IS_ERR(sb)) {  /* otherwise kill_sb will handle */
+                       kfree(cifs_sb->prepath);
+                       smb3_cleanup_fs_context(cifs_sb->ctx);
+                       kfree(cifs_sb);
+               }
        }
        return root;
 }
index c0542bd..dd7e070 100644 (file)
@@ -152,6 +152,7 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define SMB3_PRODUCT_BUILD 35
-#define CIFS_VERSION   "2.36"
+/* when changing internal version - update following two lines at same time */
+#define SMB3_PRODUCT_BUILD 37
+#define CIFS_VERSION   "2.37"
 #endif                         /* _CIFSFS_H */
index 68da230..f873379 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mempool.h>
 #include <linux/workqueue.h>
 #include <linux/utsname.h>
+#include <linux/sched/mm.h>
 #include <linux/netfs.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
@@ -628,7 +629,8 @@ struct TCP_Server_Info {
        unsigned int in_flight;  /* number of requests on the wire to server */
        unsigned int max_in_flight; /* max number of requests that were on wire */
        spinlock_t req_lock;  /* protect the two values above */
-       struct mutex srv_mutex;
+       struct mutex _srv_mutex;
+       unsigned int nofs_flag;
        struct task_struct *tsk;
        char server_GUID[16];
        __u16 sec_mode;
@@ -743,6 +745,22 @@ struct TCP_Server_Info {
 #endif
 };
 
+static inline void cifs_server_lock(struct TCP_Server_Info *server)
+{
+       unsigned int nofs_flag = memalloc_nofs_save();
+
+       mutex_lock(&server->_srv_mutex);
+       server->nofs_flag = nofs_flag;
+}
+
+static inline void cifs_server_unlock(struct TCP_Server_Info *server)
+{
+       unsigned int nofs_flag = server->nofs_flag;
+
+       mutex_unlock(&server->_srv_mutex);
+       memalloc_nofs_restore(nofs_flag);
+}
+
 struct cifs_credits {
        unsigned int value;
        unsigned int instance;
@@ -1945,11 +1963,13 @@ extern mempool_t *cifs_mid_poolp;
 
 /* Operations for different SMB versions */
 #define SMB1_VERSION_STRING    "1.0"
+#define SMB20_VERSION_STRING    "2.0"
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
 extern struct smb_version_operations smb1_operations;
 extern struct smb_version_values smb1_values;
-#define SMB20_VERSION_STRING   "2.0"
 extern struct smb_version_operations smb20_operations;
 extern struct smb_version_values smb20_values;
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
 #define SMB21_VERSION_STRING   "2.1"
 extern struct smb_version_operations smb21_operations;
 extern struct smb_version_values smb21_values;
index 53373a3..d46702f 100644 (file)
@@ -148,7 +148,7 @@ static void cifs_resolve_server(struct work_struct *work)
        struct TCP_Server_Info *server = container_of(work,
                                        struct TCP_Server_Info, resolve.work);
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
 
        /*
         * Resolve the hostname again to make sure that IP address is up-to-date.
@@ -159,7 +159,7 @@ static void cifs_resolve_server(struct work_struct *work)
                                __func__, rc);
        }
 
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 }
 
 /*
@@ -267,7 +267,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
 
        /* do not want to be sending data on a socket we are freeing */
        cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
        if (server->ssocket) {
                cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
                         server->ssocket->flags);
@@ -296,7 +296,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
                mid->mid_flags |= MID_DELETED;
        }
        spin_unlock(&GlobalMid_Lock);
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
        list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
@@ -306,9 +306,9 @@ cifs_abort_connection(struct TCP_Server_Info *server)
        }
 
        if (cifs_rdma_enabled(server)) {
-               mutex_lock(&server->srv_mutex);
+               cifs_server_lock(server);
                smbd_destroy(server);
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
        }
 }
 
@@ -359,7 +359,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
 
        do {
                try_to_freeze();
-               mutex_lock(&server->srv_mutex);
+               cifs_server_lock(server);
 
                if (!cifs_swn_set_server_dstaddr(server)) {
                        /* resolve the hostname again to make sure that IP address is up-to-date */
@@ -372,7 +372,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
                else
                        rc = generic_ip_connect(server);
                if (rc) {
-                       mutex_unlock(&server->srv_mutex);
+                       cifs_server_unlock(server);
                        cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
                        msleep(3000);
                } else {
@@ -383,7 +383,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
                                server->tcpStatus = CifsNeedNegotiate;
                        spin_unlock(&cifs_tcp_ses_lock);
                        cifs_swn_reset_server_dstaddr(server);
-                       mutex_unlock(&server->srv_mutex);
+                       cifs_server_unlock(server);
                        mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
                }
        } while (server->tcpStatus == CifsNeedReconnect);
@@ -488,12 +488,12 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
 
        do {
                try_to_freeze();
-               mutex_lock(&server->srv_mutex);
+               cifs_server_lock(server);
 
                rc = reconnect_target_unlocked(server, &tl, &target_hint);
                if (rc) {
                        /* Failed to reconnect socket */
-                       mutex_unlock(&server->srv_mutex);
+                       cifs_server_unlock(server);
                        cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
                        msleep(3000);
                        continue;
@@ -510,7 +510,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
                        server->tcpStatus = CifsNeedNegotiate;
                spin_unlock(&cifs_tcp_ses_lock);
                cifs_swn_reset_server_dstaddr(server);
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
        } while (server->tcpStatus == CifsNeedReconnect);
 
@@ -1565,7 +1565,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
        init_waitqueue_head(&tcp_ses->response_q);
        init_waitqueue_head(&tcp_ses->request_q);
        INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
-       mutex_init(&tcp_ses->srv_mutex);
+       mutex_init(&tcp_ses->_srv_mutex);
        memcpy(tcp_ses->workstation_RFC1001_name,
                ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
        memcpy(tcp_ses->server_RFC1001_name,
@@ -1845,7 +1845,6 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
        unsigned int rc, xid;
        unsigned int chan_count;
        struct TCP_Server_Info *server = ses->server;
-       cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
 
        spin_lock(&cifs_tcp_ses_lock);
        if (ses->ses_status == SES_EXITING) {
index c5dd6f7..34a8f3b 100644 (file)
@@ -1229,6 +1229,30 @@ void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
        kref_put(&mg->refcount, mount_group_release);
 }
 
+/* Extract share from DFS target and return a pointer to prefix path or NULL */
+static const char *parse_target_share(const char *target, char **share)
+{
+       const char *s, *seps = "/\\";
+       size_t len;
+
+       s = strpbrk(target + 1, seps);
+       if (!s)
+               return ERR_PTR(-EINVAL);
+
+       len = strcspn(s + 1, seps);
+       if (!len)
+               return ERR_PTR(-EINVAL);
+       s += len;
+
+       len = s - target + 1;
+       *share = kstrndup(target, len, GFP_KERNEL);
+       if (!*share)
+               return ERR_PTR(-ENOMEM);
+
+       s = target + len;
+       return s + strspn(s, seps);
+}
+
 /**
  * dfs_cache_get_tgt_share - parse a DFS target
  *
@@ -1242,56 +1266,46 @@ void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
                            char **prefix)
 {
-       char *s, sep, *p;
-       size_t len;
-       size_t plen1, plen2;
+       char sep;
+       char *target_share;
+       char *ppath = NULL;
+       const char *target_ppath, *dfsref_ppath;
+       size_t target_pplen, dfsref_pplen;
+       size_t len, c;
 
        if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
                return -EINVAL;
 
-       *share = NULL;
-       *prefix = NULL;
-
        sep = it->it_name[0];
        if (sep != '\\' && sep != '/')
                return -EINVAL;
 
-       s = strchr(it->it_name + 1, sep);
-       if (!s)
-               return -EINVAL;
+       target_ppath = parse_target_share(it->it_name, &target_share);
+       if (IS_ERR(target_ppath))
+               return PTR_ERR(target_ppath);
 
-       /* point to prefix in target node */
-       s = strchrnul(s + 1, sep);
+       /* point to prefix in DFS referral path */
+       dfsref_ppath = path + it->it_path_consumed;
+       dfsref_ppath += strspn(dfsref_ppath, "/\\");
 
-       /* extract target share */
-       *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
-       if (!*share)
-               return -ENOMEM;
+       target_pplen = strlen(target_ppath);
+       dfsref_pplen = strlen(dfsref_ppath);
 
-       /* skip separator */
-       if (*s)
-               s++;
-       /* point to prefix in DFS path */
-       p = path + it->it_path_consumed;
-       if (*p == sep)
-               p++;
-
-       /* merge prefix paths from DFS path and target node */
-       plen1 = it->it_name + strlen(it->it_name) - s;
-       plen2 = path + strlen(path) - p;
-       if (plen1 || plen2) {
-               len = plen1 + plen2 + 2;
-               *prefix = kmalloc(len, GFP_KERNEL);
-               if (!*prefix) {
-                       kfree(*share);
-                       *share = NULL;
+       /* merge prefix paths from DFS referral path and target node */
+       if (target_pplen || dfsref_pplen) {
+               len = target_pplen + dfsref_pplen + 2;
+               ppath = kzalloc(len, GFP_KERNEL);
+               if (!ppath) {
+                       kfree(target_share);
                        return -ENOMEM;
                }
-               if (plen1)
-                       scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
-               else
-                       strscpy(*prefix, p, len);
+               c = strscpy(ppath, target_ppath, len);
+               if (c && dfsref_pplen)
+                       ppath[c] = sep;
+               strlcat(ppath, dfsref_ppath, len);
        }
+       *share = target_share;
+       *prefix = ppath;
        return 0;
 }
 
@@ -1327,9 +1341,9 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
                cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
                         __func__, ip);
        } else {
-               mutex_lock(&server->srv_mutex);
+               cifs_server_lock(server);
                match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
        }
 
        kfree(ip);
index c6214cf..3b7915a 100644 (file)
@@ -1120,14 +1120,14 @@ sess_establish_session(struct sess_data *sess_data)
        struct cifs_ses *ses = sess_data->ses;
        struct TCP_Server_Info *server = sess_data->server;
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
        if (!server->session_estab) {
                if (server->sign) {
                        server->session_key.response =
                                kmemdup(ses->auth_key.response,
                                ses->auth_key.len, GFP_KERNEL);
                        if (!server->session_key.response) {
-                               mutex_unlock(&server->srv_mutex);
+                               cifs_server_unlock(server);
                                return -ENOMEM;
                        }
                        server->session_key.len =
@@ -1136,7 +1136,7 @@ sess_establish_session(struct sess_data *sess_data)
                server->sequence_number = 0x2;
                server->session_estab = true;
        }
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        cifs_dbg(FYI, "CIFS session established successfully\n");
        return 0;
index c71c9a4..2e20ee4 100644 (file)
@@ -38,10 +38,10 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
        in_buf->WordCount = 0;
        put_bcc(0, in_buf);
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
        rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
        if (rc) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                return rc;
        }
 
@@ -55,7 +55,7 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
        if (rc < 0)
                server->sequence_number--;
 
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n",
                 get_mid(in_buf), rc);
index d7ade73..98a76fa 100644 (file)
@@ -3859,7 +3859,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
                if (rc)
                        goto out;
 
-               if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
+               if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
                        smb2_set_sparse(xid, tcon, cfile, inode, false);
 
                eof = cpu_to_le64(off + len);
@@ -4345,11 +4345,13 @@ smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
        }
 }
 
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
 static bool
 smb2_is_read_op(__u32 oplock)
 {
        return oplock == SMB2_OPLOCK_LEVEL_II;
 }
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
 
 static bool
 smb21_is_read_op(__u32 oplock)
@@ -5448,7 +5450,7 @@ out:
        return rc;
 }
 
-
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
 struct smb_version_operations smb20_operations = {
        .compare_fids = smb2_compare_fids,
        .setup_request = smb2_setup_request,
@@ -5547,6 +5549,7 @@ struct smb_version_operations smb20_operations = {
        .is_status_io_timeout = smb2_is_status_io_timeout,
        .is_network_name_deleted = smb2_is_network_name_deleted,
 };
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
 
 struct smb_version_operations smb21_operations = {
        .compare_fids = smb2_compare_fids,
@@ -5878,6 +5881,7 @@ struct smb_version_operations smb311_operations = {
        .is_network_name_deleted = smb2_is_network_name_deleted,
 };
 
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
 struct smb_version_values smb20_values = {
        .version_string = SMB20_VERSION_STRING,
        .protocol_id = SMB20_PROT_ID,
@@ -5898,6 +5902,7 @@ struct smb_version_values smb20_values = {
        .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
        .create_lease_size = sizeof(struct create_lease),
 };
+#endif /* ALLOW_INSECURE_LEGACY */
 
 struct smb_version_values smb21_values = {
        .version_string = SMB21_VERSION_STRING,
index 084be3a..0e8c852 100644 (file)
@@ -1369,13 +1369,13 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
        struct cifs_ses *ses = sess_data->ses;
        struct TCP_Server_Info *server = sess_data->server;
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
        if (server->ops->generate_signingkey) {
                rc = server->ops->generate_signingkey(ses, server);
                if (rc) {
                        cifs_dbg(FYI,
                                "SMB3 session key generation failed\n");
-                       mutex_unlock(&server->srv_mutex);
+                       cifs_server_unlock(server);
                        return rc;
                }
        }
@@ -1383,7 +1383,7 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
                server->sequence_number = 0x2;
                server->session_estab = true;
        }
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        cifs_dbg(FYI, "SMB2/3 session established successfully\n");
        return rc;
index c3278db..5fbbec2 100644 (file)
@@ -1382,9 +1382,9 @@ void smbd_destroy(struct TCP_Server_Info *server)
        log_rdma_event(INFO, "freeing mr list\n");
        wake_up_interruptible_all(&info->wait_mr);
        while (atomic_read(&info->mr_used_count)) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                msleep(1000);
-               mutex_lock(&server->srv_mutex);
+               cifs_server_lock(server);
        }
        destroy_mr_list(info);
 
index 05eca41..bfc9bd5 100644 (file)
@@ -822,7 +822,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
        } else
                instance = exist_credits->instance;
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
 
        /*
         * We can't use credits obtained from the previous session to send this
@@ -830,14 +830,14 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
         * return -EAGAIN in such cases to let callers handle it.
         */
        if (instance != server->reconnect_instance) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                add_credits_and_wake_if(server, &credits, optype);
                return -EAGAIN;
        }
 
        mid = server->ops->setup_async_request(server, rqst);
        if (IS_ERR(mid)) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                add_credits_and_wake_if(server, &credits, optype);
                return PTR_ERR(mid);
        }
@@ -868,7 +868,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
                cifs_delete_mid(mid);
        }
 
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        if (rc == 0)
                return 0;
@@ -1109,7 +1109,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
         * of smb data.
         */
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
 
        /*
         * All the parts of the compound chain belong obtained credits from the
@@ -1119,7 +1119,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
         * handle it.
         */
        if (instance != server->reconnect_instance) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                for (j = 0; j < num_rqst; j++)
                        add_credits(server, &credits[j], optype);
                return -EAGAIN;
@@ -1131,7 +1131,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                        revert_current_mid(server, i);
                        for (j = 0; j < i; j++)
                                cifs_delete_mid(midQ[j]);
-                       mutex_unlock(&server->srv_mutex);
+                       cifs_server_unlock(server);
 
                        /* Update # of requests on wire to server */
                        for (j = 0; j < num_rqst; j++)
@@ -1163,7 +1163,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                server->sequence_number -= 2;
        }
 
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        /*
         * If sending failed for some reason or it is an oplock break that we
@@ -1190,9 +1190,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
        if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
                spin_unlock(&cifs_tcp_ses_lock);
 
-               mutex_lock(&server->srv_mutex);
+               cifs_server_lock(server);
                smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
 
                spin_lock(&cifs_tcp_ses_lock);
        }
@@ -1266,9 +1266,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                        .iov_len = resp_iov[0].iov_len
                };
                spin_unlock(&cifs_tcp_ses_lock);
-               mutex_lock(&server->srv_mutex);
+               cifs_server_lock(server);
                smb311_update_preauth_hash(ses, server, &iov, 1);
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                spin_lock(&cifs_tcp_ses_lock);
        }
        spin_unlock(&cifs_tcp_ses_lock);
@@ -1385,11 +1385,11 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
           and avoid races inside tcp sendmsg code that could cause corruption
           of smb data */
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
 
        rc = allocate_mid(ses, in_buf, &midQ);
        if (rc) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                /* Update # of requests on wire to server */
                add_credits(server, &credits, 0);
                return rc;
@@ -1397,7 +1397,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
 
        rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
        if (rc) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                goto out;
        }
 
@@ -1411,7 +1411,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
        if (rc < 0)
                server->sequence_number -= 2;
 
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        if (rc < 0)
                goto out;
@@ -1530,18 +1530,18 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
           and avoid races inside tcp sendmsg code that could cause corruption
           of smb data */
 
-       mutex_lock(&server->srv_mutex);
+       cifs_server_lock(server);
 
        rc = allocate_mid(ses, in_buf, &midQ);
        if (rc) {
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                return rc;
        }
 
        rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
        if (rc) {
                cifs_delete_mid(midQ);
-               mutex_unlock(&server->srv_mutex);
+               cifs_server_unlock(server);
                return rc;
        }
 
@@ -1554,7 +1554,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc < 0)
                server->sequence_number -= 2;
 
-       mutex_unlock(&server->srv_mutex);
+       cifs_server_unlock(server);
 
        if (rc < 0) {
                cifs_delete_mid(midQ);
index a5cc4ed..8e01d89 100644 (file)
@@ -17,6 +17,7 @@ static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space
        rreq->start     = start;
        rreq->len       = len;
        rreq->mapping   = mapping;
+       rreq->inode     = mapping->host;
        INIT_LIST_HEAD(&rreq->subrequests);
        refcount_set(&rreq->ref, 1);
        return rreq;
index bcc8335..95a4037 100644 (file)
@@ -288,7 +288,10 @@ static int erofs_fill_inode(struct inode *inode, int isdir)
        }
 
        if (erofs_inode_is_data_compressed(vi->datalayout)) {
-               err = z_erofs_fill_inode(inode);
+               if (!erofs_is_fscache_mode(inode->i_sb))
+                       err = z_erofs_fill_inode(inode);
+               else
+                       err = -EOPNOTSUPP;
                goto out_unlock;
        }
        inode->i_mapping->a_ops = &erofs_raw_access_aops;
index 95efc12..724bb57 100644 (file)
@@ -199,7 +199,6 @@ struct z_erofs_decompress_frontend {
        struct z_erofs_pagevec_ctor vector;
 
        struct z_erofs_pcluster *pcl, *tailpcl;
-       struct z_erofs_collection *cl;
        /* a pointer used to pick up inplace I/O pages */
        struct page **icpage_ptr;
        z_erofs_next_pcluster_t owned_head;
@@ -214,7 +213,7 @@ struct z_erofs_decompress_frontend {
 
 #define DECOMPRESS_FRONTEND_INIT(__i) { \
        .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
-       .mode = COLLECT_PRIMARY_FOLLOWED }
+       .mode = COLLECT_PRIMARY_FOLLOWED, .backmost = true }
 
 static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
 static DEFINE_MUTEX(z_pagemap_global_lock);
@@ -357,7 +356,7 @@ static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
        return false;
 }
 
-/* callers must be with collection lock held */
+/* callers must be with pcluster lock held */
 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
                               struct page *page, enum z_erofs_page_type type,
                               bool pvec_safereuse)
@@ -372,7 +371,7 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
 
        ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
                                      pvec_safereuse);
-       fe->cl->vcnt += (unsigned int)ret;
+       fe->pcl->vcnt += (unsigned int)ret;
        return ret ? 0 : -EAGAIN;
 }
 
@@ -405,12 +404,11 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
        f->mode = COLLECT_PRIMARY;
 }
 
-static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
-                                    struct inode *inode,
-                                    struct erofs_map_blocks *map)
+static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe,
+                                  struct inode *inode,
+                                  struct erofs_map_blocks *map)
 {
        struct z_erofs_pcluster *pcl = fe->pcl;
-       struct z_erofs_collection *cl;
        unsigned int length;
 
        /* to avoid unexpected loop formed by corrupted images */
@@ -419,8 +417,7 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
                return -EFSCORRUPTED;
        }
 
-       cl = z_erofs_primarycollection(pcl);
-       if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
+       if (pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) {
                DBG_BUGON(1);
                return -EFSCORRUPTED;
        }
@@ -443,23 +440,21 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
                        length = READ_ONCE(pcl->length);
                }
        }
-       mutex_lock(&cl->lock);
+       mutex_lock(&pcl->lock);
        /* used to check tail merging loop due to corrupted images */
        if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
                fe->tailpcl = pcl;
 
        z_erofs_try_to_claim_pcluster(fe);
-       fe->cl = cl;
        return 0;
 }
 
-static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
-                                      struct inode *inode,
-                                      struct erofs_map_blocks *map)
+static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe,
+                                    struct inode *inode,
+                                    struct erofs_map_blocks *map)
 {
        bool ztailpacking = map->m_flags & EROFS_MAP_META;
        struct z_erofs_pcluster *pcl;
-       struct z_erofs_collection *cl;
        struct erofs_workgroup *grp;
        int err;
 
@@ -482,17 +477,15 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
 
        /* new pclusters should be claimed as type 1, primary and followed */
        pcl->next = fe->owned_head;
+       pcl->pageofs_out = map->m_la & ~PAGE_MASK;
        fe->mode = COLLECT_PRIMARY_FOLLOWED;
 
-       cl = z_erofs_primarycollection(pcl);
-       cl->pageofs = map->m_la & ~PAGE_MASK;
-
        /*
         * lock all primary followed works before visible to others
         * and mutex_trylock *never* fails for a new pcluster.
         */
-       mutex_init(&cl->lock);
-       DBG_BUGON(!mutex_trylock(&cl->lock));
+       mutex_init(&pcl->lock);
+       DBG_BUGON(!mutex_trylock(&pcl->lock));
 
        if (ztailpacking) {
                pcl->obj.index = 0;     /* which indicates ztailpacking */
@@ -519,11 +512,10 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
                fe->tailpcl = pcl;
        fe->owned_head = &pcl->next;
        fe->pcl = pcl;
-       fe->cl = cl;
        return 0;
 
 err_out:
-       mutex_unlock(&cl->lock);
+       mutex_unlock(&pcl->lock);
        z_erofs_free_pcluster(pcl);
        return err;
 }
@@ -535,9 +527,9 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
        struct erofs_workgroup *grp;
        int ret;
 
-       DBG_BUGON(fe->cl);
+       DBG_BUGON(fe->pcl);
 
-       /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
+       /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
        DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
        DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
 
@@ -554,14 +546,14 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
                fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
        } else {
 tailpacking:
-               ret = z_erofs_register_collection(fe, inode, map);
+               ret = z_erofs_register_pcluster(fe, inode, map);
                if (!ret)
                        goto out;
                if (ret != -EEXIST)
                        return ret;
        }
 
-       ret = z_erofs_lookup_collection(fe, inode, map);
+       ret = z_erofs_lookup_pcluster(fe, inode, map);
        if (ret) {
                erofs_workgroup_put(&fe->pcl->obj);
                return ret;
@@ -569,7 +561,7 @@ tailpacking:
 
 out:
        z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
-                                 fe->cl->pagevec, fe->cl->vcnt);
+                                 fe->pcl->pagevec, fe->pcl->vcnt);
        /* since file-backed online pages are traversed in reverse order */
        fe->icpage_ptr = fe->pcl->compressed_pages +
                        z_erofs_pclusterpages(fe->pcl);
@@ -582,48 +574,36 @@ out:
  */
 static void z_erofs_rcu_callback(struct rcu_head *head)
 {
-       struct z_erofs_collection *const cl =
-               container_of(head, struct z_erofs_collection, rcu);
-
-       z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster,
-                                          primary_collection));
+       z_erofs_free_pcluster(container_of(head,
+                       struct z_erofs_pcluster, rcu));
 }
 
 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
 {
        struct z_erofs_pcluster *const pcl =
                container_of(grp, struct z_erofs_pcluster, obj);
-       struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
-
-       call_rcu(&cl->rcu, z_erofs_rcu_callback);
-}
 
-static void z_erofs_collection_put(struct z_erofs_collection *cl)
-{
-       struct z_erofs_pcluster *const pcl =
-               container_of(cl, struct z_erofs_pcluster, primary_collection);
-
-       erofs_workgroup_put(&pcl->obj);
+       call_rcu(&pcl->rcu, z_erofs_rcu_callback);
 }
 
 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
 {
-       struct z_erofs_collection *cl = fe->cl;
+       struct z_erofs_pcluster *pcl = fe->pcl;
 
-       if (!cl)
+       if (!pcl)
                return false;
 
        z_erofs_pagevec_ctor_exit(&fe->vector, false);
-       mutex_unlock(&cl->lock);
+       mutex_unlock(&pcl->lock);
 
        /*
         * if all pending pages are added, don't hold its reference
         * any longer if the pcluster isn't hosted by ourselves.
         */
        if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
-               z_erofs_collection_put(cl);
+               erofs_workgroup_put(&pcl->obj);
 
-       fe->cl = NULL;
+       fe->pcl = NULL;
        return true;
 }
 
@@ -663,28 +643,23 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
 repeat:
        cur = end - 1;
 
-       /* lucky, within the range of the current map_blocks */
-       if (offset + cur >= map->m_la &&
-           offset + cur < map->m_la + map->m_llen) {
-               /* didn't get a valid collection previously (very rare) */
-               if (!fe->cl)
-                       goto restart_now;
-               goto hitted;
-       }
-
-       /* go ahead the next map_blocks */
-       erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
-
-       if (z_erofs_collector_end(fe))
-               fe->backmost = false;
+       if (offset + cur < map->m_la ||
+           offset + cur >= map->m_la + map->m_llen) {
+               erofs_dbg("out-of-range map @ pos %llu", offset + cur);
 
-       map->m_la = offset + cur;
-       map->m_llen = 0;
-       err = z_erofs_map_blocks_iter(inode, map, 0);
-       if (err)
-               goto err_out;
+               if (z_erofs_collector_end(fe))
+                       fe->backmost = false;
+               map->m_la = offset + cur;
+               map->m_llen = 0;
+               err = z_erofs_map_blocks_iter(inode, map, 0);
+               if (err)
+                       goto err_out;
+       } else {
+               if (fe->pcl)
+                       goto hitted;
+               /* didn't get a valid pcluster previously (very rare) */
+       }
 
-restart_now:
        if (!(map->m_flags & EROFS_MAP_MAPPED))
                goto hitted;
 
@@ -766,7 +741,7 @@ retry:
        /* bump up the number of spiltted parts of a page */
        ++spiltted;
        /* also update nr_pages */
-       fe->cl->nr_pages = max_t(pgoff_t, fe->cl->nr_pages, index + 1);
+       fe->pcl->nr_pages = max_t(pgoff_t, fe->pcl->nr_pages, index + 1);
 next_part:
        /* can be used for verification */
        map->m_llen = offset + cur - map->m_la;
@@ -821,15 +796,13 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
 
        enum z_erofs_page_type page_type;
        bool overlapped, partial;
-       struct z_erofs_collection *cl;
        int err;
 
        might_sleep();
-       cl = z_erofs_primarycollection(pcl);
-       DBG_BUGON(!READ_ONCE(cl->nr_pages));
+       DBG_BUGON(!READ_ONCE(pcl->nr_pages));
 
-       mutex_lock(&cl->lock);
-       nr_pages = cl->nr_pages;
+       mutex_lock(&pcl->lock);
+       nr_pages = pcl->nr_pages;
 
        if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
                pages = pages_onstack;
@@ -857,9 +830,9 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
 
        err = 0;
        z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
-                                 cl->pagevec, 0);
+                                 pcl->pagevec, 0);
 
-       for (i = 0; i < cl->vcnt; ++i) {
+       for (i = 0; i < pcl->vcnt; ++i) {
                unsigned int pagenr;
 
                page = z_erofs_pagevec_dequeue(&ctor, &page_type);
@@ -945,11 +918,11 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
                goto out;
 
        llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
-       if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
+       if (nr_pages << PAGE_SHIFT >= pcl->pageofs_out + llen) {
                outputsize = llen;
                partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
        } else {
-               outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
+               outputsize = (nr_pages << PAGE_SHIFT) - pcl->pageofs_out;
                partial = true;
        }
 
@@ -963,7 +936,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
                                        .in = compressed_pages,
                                        .out = pages,
                                        .pageofs_in = pcl->pageofs_in,
-                                       .pageofs_out = cl->pageofs,
+                                       .pageofs_out = pcl->pageofs_out,
                                        .inputsize = inputsize,
                                        .outputsize = outputsize,
                                        .alg = pcl->algorithmformat,
@@ -1012,16 +985,12 @@ out:
        else if (pages != pages_onstack)
                kvfree(pages);
 
-       cl->nr_pages = 0;
-       cl->vcnt = 0;
+       pcl->nr_pages = 0;
+       pcl->vcnt = 0;
 
-       /* all cl locks MUST be taken before the following line */
+       /* pcluster lock MUST be taken before the following line */
        WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
-
-       /* all cl locks SHOULD be released right now */
-       mutex_unlock(&cl->lock);
-
-       z_erofs_collection_put(cl);
+       mutex_unlock(&pcl->lock);
        return err;
 }
 
@@ -1043,6 +1012,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
                owned = READ_ONCE(pcl->next);
 
                z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
+               erofs_workgroup_put(&pcl->obj);
        }
 }
 
@@ -1466,22 +1436,19 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
                struct page *page;
 
                page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
-               if (!page)
-                       goto skip;
-
-               if (PageUptodate(page)) {
-                       unlock_page(page);
+               if (page) {
+                       if (PageUptodate(page)) {
+                               unlock_page(page);
+                       } else {
+                               err = z_erofs_do_read_page(f, page, pagepool);
+                               if (err)
+                                       erofs_err(inode->i_sb,
+                                                 "readmore error at page %lu @ nid %llu",
+                                                 index, EROFS_I(inode)->nid);
+                       }
                        put_page(page);
-                       goto skip;
                }
 
-               err = z_erofs_do_read_page(f, page, pagepool);
-               if (err)
-                       erofs_err(inode->i_sb,
-                                 "readmore error at page %lu @ nid %llu",
-                                 index, EROFS_I(inode)->nid);
-               put_page(page);
-skip:
                if (cur < PAGE_SIZE)
                        break;
                cur = (index << PAGE_SHIFT) - 1;
index 800b11c..58053bb 100644 (file)
 #define Z_EROFS_PCLUSTER_MAX_PAGES     (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
 #define Z_EROFS_NR_INLINE_PAGEVECS      3
 
+#define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
+#define Z_EROFS_PCLUSTER_LENGTH_BIT     1
+
+/*
+ * let's leave a type here in case of introducing
+ * another tagged pointer later.
+ */
+typedef void *z_erofs_next_pcluster_t;
+
 /*
  * Structure fields follow one of the following exclusion rules.
  *
  * I: Modifiable by initialization/destruction paths and read-only
  *    for everyone else;
  *
- * L: Field should be protected by pageset lock;
+ * L: Field should be protected by the pcluster lock;
  *
  * A: Field should be accessed / updated in atomic for parallelized code.
  */
-struct z_erofs_collection {
+struct z_erofs_pcluster {
+       struct erofs_workgroup obj;
        struct mutex lock;
 
+       /* A: point to next chained pcluster or TAILs */
+       z_erofs_next_pcluster_t next;
+
+       /* A: lower limit of decompressed length and if full length or not */
+       unsigned int length;
+
        /* I: page offset of start position of decompression */
-       unsigned short pageofs;
+       unsigned short pageofs_out;
+
+       /* I: page offset of inline compressed data */
+       unsigned short pageofs_in;
 
        /* L: maximum relative page index in pagevec[] */
        unsigned short nr_pages;
@@ -41,29 +60,6 @@ struct z_erofs_collection {
                /* I: can be used to free the pcluster by RCU. */
                struct rcu_head rcu;
        };
-};
-
-#define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
-#define Z_EROFS_PCLUSTER_LENGTH_BIT     1
-
-/*
- * let's leave a type here in case of introducing
- * another tagged pointer later.
- */
-typedef void *z_erofs_next_pcluster_t;
-
-struct z_erofs_pcluster {
-       struct erofs_workgroup obj;
-       struct z_erofs_collection primary_collection;
-
-       /* A: point to next chained pcluster or TAILs */
-       z_erofs_next_pcluster_t next;
-
-       /* A: lower limit of decompressed length and if full length or not */
-       unsigned int length;
-
-       /* I: page offset of inline compressed data */
-       unsigned short pageofs_in;
 
        union {
                /* I: physical cluster size in pages */
@@ -80,8 +76,6 @@ struct z_erofs_pcluster {
        struct page *compressed_pages[];
 };
 
-#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
-
 /* let's avoid the valid 32-bit kernel addresses */
 
 /* the chained workgroup has't submitted io (still open) */
index 14b4b37..0989fb8 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1312,9 +1312,7 @@ int begin_new_exec(struct linux_binprm * bprm)
        if (retval)
                goto out_unlock;
 
-       if (me->flags & PF_KTHREAD)
-               free_kthread_struct(me);
-       me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
+       me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC |
                                        PF_NOFREEZE | PF_NO_SETAFFINITY);
        flush_thread();
        me->personality &= ~bprm->per_clear;
@@ -1959,6 +1957,10 @@ int kernel_execve(const char *kernel_filename,
        int fd = AT_FDCWD;
        int retval;
 
+       /* It is non-sense for kernel threads to call execve */
+       if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
+               return -EINVAL;
+
        filename = getname_kernel(kernel_filename);
        if (IS_ERR(filename))
                return PTR_ERR(filename);
index 0106eba..3ef80d0 100644 (file)
@@ -145,7 +145,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
        if (err)
                goto out_err;
        dprintk("%s: found name: %s\n", __func__, nbuf);
-       tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
+       tmp = lookup_one_unlocked(mnt_user_ns(mnt), nbuf, parent, strlen(nbuf));
        if (IS_ERR(tmp)) {
                dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
                err = PTR_ERR(tmp);
@@ -525,7 +525,8 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
                }
 
                inode_lock(target_dir->d_inode);
-               nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+               nresult = lookup_one(mnt_user_ns(mnt), nbuf,
+                                    target_dir, strlen(nbuf));
                if (!IS_ERR(nresult)) {
                        if (unlikely(nresult->d_inode != result->d_inode)) {
                                dput(nresult);
index 456c1e8..6d8b2bf 100644 (file)
@@ -98,13 +98,7 @@ repeat:
        }
 
        if (unlikely(!PageUptodate(page))) {
-               if (page->index == sbi->metapage_eio_ofs) {
-                       if (sbi->metapage_eio_cnt++ == MAX_RETRY_META_PAGE_EIO)
-                               set_ckpt_flags(sbi, CP_ERROR_FLAG);
-               } else {
-                       sbi->metapage_eio_ofs = page->index;
-                       sbi->metapage_eio_cnt = 0;
-               }
+               f2fs_handle_page_eio(sbi, page->index, META);
                f2fs_put_page(page, 1);
                return ERR_PTR(-EIO);
        }
@@ -158,7 +152,7 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
                f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
                         blkaddr, exist);
                set_sbi_flag(sbi, SBI_NEED_FSCK);
-               WARN_ON(1);
+               dump_stack();
        }
        return exist;
 }
@@ -196,7 +190,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
                        f2fs_warn(sbi, "access invalid blkaddr:%u",
                                  blkaddr);
                        set_sbi_flag(sbi, SBI_NEED_FSCK);
-                       WARN_ON(1);
+                       dump_stack();
                        return false;
                } else {
                        return __is_bitmap_valid(sbi, blkaddr, type);
@@ -1010,9 +1004,7 @@ static void __add_dirty_inode(struct inode *inode, enum inode_type type)
                return;
 
        set_inode_flag(inode, flag);
-       if (!f2fs_is_volatile_file(inode))
-               list_add_tail(&F2FS_I(inode)->dirty_list,
-                                               &sbi->inode_list[type]);
+       list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
        stat_inc_dirty_inode(sbi, type);
 }
 
index 8f38c26..7fcbcf9 100644 (file)
@@ -69,8 +69,7 @@ static bool __is_cp_guaranteed(struct page *page)
 
        if (f2fs_is_compressed_page(page))
                return false;
-       if ((S_ISREG(inode->i_mode) &&
-                       (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
+       if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
                        page_private_gcing(page))
                return true;
        return false;
@@ -585,6 +584,34 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
        return false;
 }
 
+int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
+{
+       int i;
+
+       for (i = 0; i < NR_PAGE_TYPE; i++) {
+               int n = (i == META) ? 1 : NR_TEMP_TYPE;
+               int j;
+
+               sbi->write_io[i] = f2fs_kmalloc(sbi,
+                               array_size(n, sizeof(struct f2fs_bio_info)),
+                               GFP_KERNEL);
+               if (!sbi->write_io[i])
+                       return -ENOMEM;
+
+               for (j = HOT; j < n; j++) {
+                       init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
+                       sbi->write_io[i][j].sbi = sbi;
+                       sbi->write_io[i][j].bio = NULL;
+                       spin_lock_init(&sbi->write_io[i][j].io_lock);
+                       INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
+                       INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
+                       init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
+               }
+       }
+
+       return 0;
+}
+
 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
                                enum page_type type, enum temp_type temp)
 {
@@ -2564,7 +2591,12 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
        bool ipu_force = false;
        int err = 0;
 
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       /* Use COW inode to make dnode_of_data for atomic write */
+       if (f2fs_is_atomic_file(inode))
+               set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
+       else
+               set_new_dnode(&dn, inode, NULL, NULL, 0);
+
        if (need_inplace_update(fio) &&
                        f2fs_lookup_extent_cache(inode, page->index, &ei)) {
                fio->old_blkaddr = ei.blk + page->index - ei.fofs;
@@ -2601,6 +2633,7 @@ got_it:
                err = -EFSCORRUPTED;
                goto out_writepage;
        }
+
        /*
         * If current allocation needs SSR,
         * it had better in-place writes for updated data.
@@ -2737,11 +2770,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
 write:
        if (f2fs_is_drop_cache(inode))
                goto out;
-       /* we should not write 0'th page having journal header */
-       if (f2fs_is_volatile_file(inode) && (!page->index ||
-                       (!wbc->for_reclaim &&
-                       f2fs_available_free_memory(sbi, BASE_CHECK))))
-               goto redirty_out;
 
        /* Dentry/quota blocks are controlled by checkpoint */
        if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
@@ -3314,6 +3342,100 @@ unlock_out:
        return err;
 }
 
+static int __find_data_block(struct inode *inode, pgoff_t index,
+                               block_t *blk_addr)
+{
+       struct dnode_of_data dn;
+       struct page *ipage;
+       struct extent_info ei = {0, };
+       int err = 0;
+
+       ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
+       if (IS_ERR(ipage))
+               return PTR_ERR(ipage);
+
+       set_new_dnode(&dn, inode, ipage, ipage, 0);
+
+       if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+               dn.data_blkaddr = ei.blk + index - ei.fofs;
+       } else {
+               /* hole case */
+               err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+               if (err) {
+                       dn.data_blkaddr = NULL_ADDR;
+                       err = 0;
+               }
+       }
+       *blk_addr = dn.data_blkaddr;
+       f2fs_put_dnode(&dn);
+       return err;
+}
+
+static int __reserve_data_block(struct inode *inode, pgoff_t index,
+                               block_t *blk_addr, bool *node_changed)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct dnode_of_data dn;
+       struct page *ipage;
+       int err = 0;
+
+       f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+
+       ipage = f2fs_get_node_page(sbi, inode->i_ino);
+       if (IS_ERR(ipage)) {
+               err = PTR_ERR(ipage);
+               goto unlock_out;
+       }
+       set_new_dnode(&dn, inode, ipage, ipage, 0);
+
+       err = f2fs_get_block(&dn, index);
+
+       *blk_addr = dn.data_blkaddr;
+       *node_changed = dn.node_changed;
+       f2fs_put_dnode(&dn);
+
+unlock_out:
+       f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+       return err;
+}
+
+static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
+                       struct page *page, loff_t pos, unsigned int len,
+                       block_t *blk_addr, bool *node_changed)
+{
+       struct inode *inode = page->mapping->host;
+       struct inode *cow_inode = F2FS_I(inode)->cow_inode;
+       pgoff_t index = page->index;
+       int err = 0;
+       block_t ori_blk_addr;
+
+       /* If pos is beyond the end of file, reserve a new block in COW inode */
+       if ((pos & PAGE_MASK) >= i_size_read(inode))
+               return __reserve_data_block(cow_inode, index, blk_addr,
+                                       node_changed);
+
+       /* Look for the block in COW inode first */
+       err = __find_data_block(cow_inode, index, blk_addr);
+       if (err)
+               return err;
+       else if (*blk_addr != NULL_ADDR)
+               return 0;
+
+       /* Look for the block in the original inode */
+       err = __find_data_block(inode, index, &ori_blk_addr);
+       if (err)
+               return err;
+
+       /* Finally, we should reserve a new block in COW inode for the update */
+       err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
+       if (err)
+               return err;
+
+       if (ori_blk_addr != NULL_ADDR)
+               *blk_addr = ori_blk_addr;
+       return 0;
+}
+
 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
                loff_t pos, unsigned len, struct page **pagep, void **fsdata)
 {
@@ -3321,7 +3443,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct page *page = NULL;
        pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
-       bool need_balance = false, drop_atomic = false;
+       bool need_balance = false;
        block_t blkaddr = NULL_ADDR;
        int err = 0;
 
@@ -3332,14 +3454,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
                goto fail;
        }
 
-       if ((f2fs_is_atomic_file(inode) &&
-                       !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
-                       is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
-               err = -ENOMEM;
-               drop_atomic = true;
-               goto fail;
-       }
-
        /*
         * We should check this at this moment to avoid deadlock on inode page
         * and #0 page. The locking rule for inline_data conversion should be:
@@ -3387,7 +3501,11 @@ repeat:
 
        *pagep = page;
 
-       err = prepare_write_begin(sbi, page, pos, len,
+       if (f2fs_is_atomic_file(inode))
+               err = prepare_atomic_write_begin(sbi, page, pos, len,
+                                       &blkaddr, &need_balance);
+       else
+               err = prepare_write_begin(sbi, page, pos, len,
                                        &blkaddr, &need_balance);
        if (err)
                goto fail;
@@ -3443,8 +3561,6 @@ repeat:
 fail:
        f2fs_put_page(page, 1);
        f2fs_write_failed(inode, pos + len);
-       if (drop_atomic)
-               f2fs_drop_inmem_pages_all(sbi, false);
        return err;
 }
 
@@ -3488,8 +3604,12 @@ static int f2fs_write_end(struct file *file,
        set_page_dirty(page);
 
        if (pos + copied > i_size_read(inode) &&
-           !f2fs_verity_in_progress(inode))
+           !f2fs_verity_in_progress(inode)) {
                f2fs_i_size_write(inode, pos + copied);
+               if (f2fs_is_atomic_file(inode))
+                       f2fs_i_size_write(F2FS_I(inode)->cow_inode,
+                                       pos + copied);
+       }
 unlock_out:
        f2fs_put_page(page, 1);
        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -3522,9 +3642,6 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
                        inode->i_ino == F2FS_COMPRESS_INO(sbi))
                clear_page_private_data(&folio->page);
 
-       if (page_private_atomic(&folio->page))
-               return f2fs_drop_inmem_page(inode, &folio->page);
-
        folio_detach_private(folio);
 }
 
@@ -3536,10 +3653,6 @@ bool f2fs_release_folio(struct folio *folio, gfp_t wait)
        if (folio_test_dirty(folio))
                return false;
 
-       /* This is atomic written page, keep Private */
-       if (page_private_atomic(&folio->page))
-               return false;
-
        sbi = F2FS_M_SB(folio->mapping);
        if (test_opt(sbi, COMPRESS_CACHE)) {
                struct inode *inode = folio->mapping->host;
@@ -3565,18 +3678,6 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
                folio_mark_uptodate(folio);
        BUG_ON(folio_test_swapcache(folio));
 
-       if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
-               if (!page_private_atomic(&folio->page)) {
-                       f2fs_register_inmem_page(inode, &folio->page);
-                       return true;
-               }
-               /*
-                * Previously, this page has been registered, we just
-                * return here.
-                */
-               return false;
-       }
-
        if (!folio_test_dirty(folio)) {
                filemap_dirty_folio(mapping, folio);
                f2fs_update_dirty_folio(inode, folio);
@@ -3656,42 +3757,14 @@ out:
 int f2fs_migrate_page(struct address_space *mapping,
                struct page *newpage, struct page *page, enum migrate_mode mode)
 {
-       int rc, extra_count;
-       struct f2fs_inode_info *fi = F2FS_I(mapping->host);
-       bool atomic_written = page_private_atomic(page);
+       int rc, extra_count = 0;
 
        BUG_ON(PageWriteback(page));
 
-       /* migrating an atomic written page is safe with the inmem_lock hold */
-       if (atomic_written) {
-               if (mode != MIGRATE_SYNC)
-                       return -EBUSY;
-               if (!mutex_trylock(&fi->inmem_lock))
-                       return -EAGAIN;
-       }
-
-       /* one extra reference was held for atomic_write page */
-       extra_count = atomic_written ? 1 : 0;
        rc = migrate_page_move_mapping(mapping, newpage,
                                page, extra_count);
-       if (rc != MIGRATEPAGE_SUCCESS) {
-               if (atomic_written)
-                       mutex_unlock(&fi->inmem_lock);
+       if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
-       }
-
-       if (atomic_written) {
-               struct inmem_pages *cur;
-
-               list_for_each_entry(cur, &fi->inmem_pages, list)
-                       if (cur->page == page) {
-                               cur->page = newpage;
-                               break;
-                       }
-               mutex_unlock(&fi->inmem_lock);
-               put_page(page);
-               get_page(newpage);
-       }
 
        /* guarantee to start from no stale private field */
        set_page_private(newpage, 0);
index fcdf253..c92625e 100644 (file)
@@ -91,11 +91,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
        si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
        si->nquota_files = sbi->nquota_files;
        si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
-       si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
        si->aw_cnt = sbi->atomic_files;
-       si->vw_cnt = atomic_read(&sbi->vw_cnt);
        si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
-       si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
        si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
        si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
        si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
@@ -167,8 +164,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
        si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
        si->io_skip_bggc = sbi->io_skip_bggc;
        si->other_skip_bggc = sbi->other_skip_bggc;
-       si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
-       si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
        si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
                * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
                / 2;
@@ -296,7 +291,6 @@ get_cache:
                                sizeof(struct nat_entry);
        si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
                                sizeof(struct nat_entry_set);
-       si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
        for (i = 0; i < MAX_INO_ENTRY; i++)
                si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
        si->cache_mem += atomic_read(&sbi->total_ext_tree) *
@@ -491,10 +485,6 @@ static int stat_show(struct seq_file *s, void *v)
                                si->bg_data_blks);
                seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
                                si->bg_node_blks);
-               seq_printf(s, "Skipped : atomic write %llu (%llu)\n",
-                               si->skipped_atomic_files[BG_GC] +
-                               si->skipped_atomic_files[FG_GC],
-                               si->skipped_atomic_files[BG_GC]);
                seq_printf(s, "BG skip : IO: %u, Other: %u\n",
                                si->io_skip_bggc, si->other_skip_bggc);
                seq_puts(s, "\nExtent Cache:\n");
@@ -519,10 +509,8 @@ static int stat_show(struct seq_file *s, void *v)
                           si->flush_list_empty,
                           si->nr_discarding, si->nr_discarded,
                           si->nr_discard_cmd, si->undiscard_blks);
-               seq_printf(s, "  - inmem: %4d, atomic IO: %4d (Max. %4d), "
-                       "volatile IO: %4d (Max. %4d)\n",
-                          si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
-                          si->vw_cnt, si->max_vw_cnt);
+               seq_printf(s, "  - atomic IO: %4d (Max. %4d)\n",
+                          si->aw_cnt, si->max_aw_cnt);
                seq_printf(s, "  - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
                seq_printf(s, "  - nodes: %4d in %4d\n",
                           si->ndirty_node, si->node_pages);
@@ -623,9 +611,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
        for (i = META_CP; i < META_MAX; i++)
                atomic_set(&sbi->meta_count[i], 0);
 
-       atomic_set(&sbi->vw_cnt, 0);
        atomic_set(&sbi->max_aw_cnt, 0);
-       atomic_set(&sbi->max_vw_cnt, 0);
 
        raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
        list_add_tail(&si->stat_list, &f2fs_stat_list);
index a0e5193..d5bd793 100644 (file)
@@ -82,7 +82,8 @@ int f2fs_init_casefolded_name(const struct inode *dir,
 #if IS_ENABLED(CONFIG_UNICODE)
        struct super_block *sb = dir->i_sb;
 
-       if (IS_CASEFOLDED(dir)) {
+       if (IS_CASEFOLDED(dir) &&
+           !is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) {
                fname->cf_name.name = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
                                        GFP_NOFS, false, F2FS_SB(sb));
                if (!fname->cf_name.name)
index 10d1f13..d9bbecd 100644 (file)
@@ -509,11 +509,11 @@ struct f2fs_filename {
 #if IS_ENABLED(CONFIG_UNICODE)
        /*
         * For casefolded directories: the casefolded name, but it's left NULL
-        * if the original name is not valid Unicode, if the directory is both
-        * casefolded and encrypted and its encryption key is unavailable, or if
-        * the filesystem is doing an internal operation where usr_fname is also
-        * NULL.  In all these cases we fall back to treating the name as an
-        * opaque byte sequence.
+        * if the original name is not valid Unicode, if the original name is
+        * "." or "..", if the directory is both casefolded and encrypted and
+        * its encryption key is unavailable, or if the filesystem is doing an
+        * internal operation where usr_fname is also NULL.  In all these cases
+        * we fall back to treating the name as an opaque byte sequence.
         */
        struct fscrypt_str cf_name;
 #endif
@@ -579,8 +579,8 @@ enum {
 /* maximum retry quota flush count */
 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT                8
 
-/* maximum retry of EIO'ed meta page */
-#define MAX_RETRY_META_PAGE_EIO                        100
+/* maximum retry of EIO'ed page */
+#define MAX_RETRY_PAGE_EIO                     100
 
 #define F2FS_LINK_MAX  0xffffffff      /* maximum link count per file */
 
@@ -717,7 +717,6 @@ enum {
 
 enum {
        GC_FAILURE_PIN,
-       GC_FAILURE_ATOMIC,
        MAX_GC_FAILURE
 };
 
@@ -739,8 +738,6 @@ enum {
        FI_UPDATE_WRITE,        /* inode has in-place-update data */
        FI_NEED_IPU,            /* used for ipu per file */
        FI_ATOMIC_FILE,         /* indicate atomic file */
-       FI_ATOMIC_COMMIT,       /* indicate the state of atomical committing */
-       FI_VOLATILE_FILE,       /* indicate volatile file */
        FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
        FI_DROP_CACHE,          /* drop dirty page cache */
        FI_DATA_EXIST,          /* indicate data exists */
@@ -753,7 +750,6 @@ enum {
        FI_EXTRA_ATTR,          /* indicate file has extra attribute */
        FI_PROJ_INHERIT,        /* indicate file inherits projectid */
        FI_PIN_FILE,            /* indicate file should not be gced */
-       FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
        FI_VERITY_IN_PROGRESS,  /* building fs-verity Merkle tree */
        FI_COMPRESSED_FILE,     /* indicate file's data can be compressed */
        FI_COMPRESS_CORRUPT,    /* indicate compressed cluster is corrupted */
@@ -795,11 +791,9 @@ struct f2fs_inode_info {
 #endif
        struct list_head dirty_list;    /* dirty list for dirs and files */
        struct list_head gdirty_list;   /* linked in global dirty list */
-       struct list_head inmem_ilist;   /* list for inmem inodes */
-       struct list_head inmem_pages;   /* inmemory pages managed by f2fs */
-       struct task_struct *inmem_task; /* store inmemory task */
-       struct mutex inmem_lock;        /* lock for inmemory pages */
+       struct task_struct *atomic_write_task;  /* store atomic write task */
        struct extent_tree *extent_tree;        /* cached extent_tree entry */
+       struct inode *cow_inode;        /* copy-on-write inode for atomic write */
 
        /* avoid racing between foreground op and gc */
        struct f2fs_rwsem i_gc_rwsem[2];
@@ -1093,7 +1087,6 @@ enum count_type {
        F2FS_DIRTY_QDATA,
        F2FS_DIRTY_NODES,
        F2FS_DIRTY_META,
-       F2FS_INMEM_PAGES,
        F2FS_DIRTY_IMETA,
        F2FS_WB_CP_DATA,
        F2FS_WB_DATA,
@@ -1118,16 +1111,12 @@ enum count_type {
  */
 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
 enum page_type {
-       DATA,
-       NODE,
+       DATA = 0,
+       NODE = 1,       /* should not change this */
        META,
        NR_PAGE_TYPE,
        META_FLUSH,
-       INMEM,          /* the below types are used by tracepoints only. */
-       INMEM_DROP,
-       INMEM_INVALIDATE,
-       INMEM_REVOKE,
-       IPU,
+       IPU,            /* the below types are used by tracepoints only. */
        OPU,
 };
 
@@ -1277,6 +1266,15 @@ struct atgc_management {
        unsigned long long age_threshold;       /* age threshold */
 };
 
+struct f2fs_gc_control {
+       unsigned int victim_segno;      /* target victim segment number */
+       int init_gc_type;               /* FG_GC or BG_GC */
+       bool no_bg_gc;                  /* check the space and stop bg_gc */
+       bool should_migrate_blocks;     /* should migrate blocks */
+       bool err_gc_skipped;            /* return EAGAIN if GC skipped */
+       unsigned int nr_free_secs;      /* # of free sections to do GC */
+};
+
 /* For s_flag in struct f2fs_sb_info */
 enum {
        SBI_IS_DIRTY,                           /* dirty flag for checkpoint */
@@ -1615,8 +1613,8 @@ struct f2fs_sb_info {
        /* keep migration IO order for LFS mode */
        struct f2fs_rwsem io_order_lock;
        mempool_t *write_io_dummy;              /* Dummy pages */
-       pgoff_t metapage_eio_ofs;               /* EIO page offset */
-       int metapage_eio_cnt;                   /* EIO count */
+       pgoff_t page_eio_ofs[NR_PAGE_TYPE];     /* EIO page offset */
+       int page_eio_cnt[NR_PAGE_TYPE];         /* EIO count */
 
        /* for checkpoint */
        struct f2fs_checkpoint *ckpt;           /* raw checkpoint pointer */
@@ -1719,7 +1717,6 @@ struct f2fs_sb_info {
 
        /* for skip statistic */
        unsigned int atomic_files;              /* # of opened atomic file */
-       unsigned long long skipped_atomic_files[2];     /* FG_GC and BG_GC */
        unsigned long long skipped_gc_rwsem;            /* FG_GC only */
 
        /* threshold for gc trials on pinned files */
@@ -1750,9 +1747,7 @@ struct f2fs_sb_info {
        atomic_t inline_dir;                    /* # of inline_dentry inodes */
        atomic_t compr_inode;                   /* # of compressed inodes */
        atomic64_t compr_blocks;                /* # of compressed blocks */
-       atomic_t vw_cnt;                        /* # of volatile writes */
        atomic_t max_aw_cnt;                    /* max # of atomic writes */
-       atomic_t max_vw_cnt;                    /* max # of volatile writes */
        unsigned int io_skip_bggc;              /* skip background gc for in-flight IO */
        unsigned int other_skip_bggc;           /* skip background gc for other reasons */
        unsigned int ndirty_inode[NR_INODE_TYPE];       /* # of dirty inodes */
@@ -1763,7 +1758,7 @@ struct f2fs_sb_info {
        unsigned int data_io_flag;
        unsigned int node_io_flag;
 
-       /* For sysfs suppport */
+       /* For sysfs support */
        struct kobject s_kobj;                  /* /sys/fs/f2fs/<devname> */
        struct completion s_kobj_unregister;
 
@@ -2606,11 +2601,17 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
 {
        spin_lock(&sbi->stat_lock);
 
-       f2fs_bug_on(sbi, !sbi->total_valid_block_count);
-       f2fs_bug_on(sbi, !sbi->total_valid_node_count);
+       if (unlikely(!sbi->total_valid_block_count ||
+                       !sbi->total_valid_node_count)) {
+               f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
+                         sbi->total_valid_block_count,
+                         sbi->total_valid_node_count);
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+       } else {
+               sbi->total_valid_block_count--;
+               sbi->total_valid_node_count--;
+       }
 
-       sbi->total_valid_node_count--;
-       sbi->total_valid_block_count--;
        if (sbi->reserved_blocks &&
                sbi->current_reserved_blocks < sbi->reserved_blocks)
                sbi->current_reserved_blocks++;
@@ -3173,6 +3174,10 @@ static inline int inline_xattr_size(struct inode *inode)
        return 0;
 }
 
+/*
+ * Notice: check inline_data flag without inode page lock is unsafe.
+ * It could change at any time by f2fs_convert_inline_page().
+ */
 static inline int f2fs_has_inline_data(struct inode *inode)
 {
        return is_inode_flag_set(inode, FI_INLINE_DATA);
@@ -3203,16 +3208,6 @@ static inline bool f2fs_is_atomic_file(struct inode *inode)
        return is_inode_flag_set(inode, FI_ATOMIC_FILE);
 }
 
-static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
-{
-       return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
-}
-
-static inline bool f2fs_is_volatile_file(struct inode *inode)
-{
-       return is_inode_flag_set(inode, FI_VOLATILE_FILE);
-}
-
 static inline bool f2fs_is_first_block_written(struct inode *inode)
 {
        return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
@@ -3445,6 +3440,8 @@ void f2fs_handle_failed_inode(struct inode *inode);
 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
                                                        bool hot, bool set);
 struct dentry *f2fs_get_parent(struct dentry *child);
+int f2fs_get_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+                    struct inode **new_inode);
 
 /*
  * dir.c
@@ -3580,11 +3577,8 @@ void f2fs_destroy_node_manager_caches(void);
  * segment.c
  */
 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
-void f2fs_register_inmem_page(struct inode *inode, struct page *page);
-void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
-void f2fs_drop_inmem_pages(struct inode *inode);
-void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
-int f2fs_commit_inmem_pages(struct inode *inode);
+int f2fs_commit_atomic_write(struct inode *inode);
+void f2fs_abort_atomic_write(struct inode *inode, bool clean);
 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
@@ -3726,6 +3720,7 @@ int f2fs_init_bio_entry_cache(void);
 void f2fs_destroy_bio_entry_cache(void);
 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
                                struct bio *bio, enum page_type type);
+int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
                                struct inode *inode, struct page *page,
@@ -3787,8 +3782,7 @@ extern const struct iomap_ops f2fs_iomap_ops;
 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
-                       unsigned int segno);
+int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
 int __init f2fs_create_garbage_collection_cache(void);
@@ -3816,7 +3810,6 @@ struct f2fs_stat_info {
        int ext_tree, zombie_tree, ext_node;
        int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
        int ndirty_data, ndirty_qdata;
-       int inmem_pages;
        unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
        int nats, dirty_nats, sits, dirty_sits;
        int free_nids, avail_nids, alloc_nids;
@@ -3834,7 +3827,7 @@ struct f2fs_stat_info {
        int inline_xattr, inline_inode, inline_dir, append, update, orphans;
        int compr_inode;
        unsigned long long compr_blocks;
-       int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
+       int aw_cnt, max_aw_cnt;
        unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
        unsigned int bimodal, avg_vblocks;
        int util_free, util_valid, util_invalid;
@@ -3846,7 +3839,6 @@ struct f2fs_stat_info {
        int bg_node_segs, bg_data_segs;
        int tot_blks, data_blks, node_blks;
        int bg_data_blks, bg_node_blks;
-       unsigned long long skipped_atomic_files[2];
        int curseg[NR_CURSEG_TYPE];
        int cursec[NR_CURSEG_TYPE];
        int curzone[NR_CURSEG_TYPE];
@@ -3946,17 +3938,6 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
                if (cur > max)                                          \
                        atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
        } while (0)
-#define stat_inc_volatile_write(inode)                                 \
-               (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
-#define stat_dec_volatile_write(inode)                                 \
-               (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
-#define stat_update_max_volatile_write(inode)                          \
-       do {                                                            \
-               int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt);       \
-               int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt);   \
-               if (cur > max)                                          \
-                       atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
-       } while (0)
 #define stat_inc_seg_count(sbi, type, gc_type)                         \
        do {                                                            \
                struct f2fs_stat_info *si = F2FS_STAT(sbi);             \
@@ -4018,9 +3999,6 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
 #define stat_add_compr_blocks(inode, blocks)           do { } while (0)
 #define stat_sub_compr_blocks(inode, blocks)           do { } while (0)
 #define stat_update_max_atomic_write(inode)            do { } while (0)
-#define stat_inc_volatile_write(inode)                 do { } while (0)
-#define stat_dec_volatile_write(inode)                 do { } while (0)
-#define stat_update_max_volatile_write(inode)          do { } while (0)
 #define stat_inc_meta_count(sbi, blkaddr)              do { } while (0)
 #define stat_inc_seg_type(sbi, curseg)                 do { } while (0)
 #define stat_inc_block_count(sbi, curseg)              do { } while (0)
@@ -4053,6 +4031,7 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
  * inline.c
  */
 bool f2fs_may_inline_data(struct inode *inode);
+bool f2fs_sanity_check_inline_data(struct inode *inode);
 bool f2fs_may_inline_dentry(struct inode *inode);
 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
 void f2fs_truncate_inline_inode(struct inode *inode,
@@ -4422,8 +4401,7 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
 static inline bool f2fs_may_compress(struct inode *inode)
 {
        if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
-                               f2fs_is_atomic_file(inode) ||
-                               f2fs_is_volatile_file(inode))
+                               f2fs_is_atomic_file(inode))
                return false;
        return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
 }
@@ -4431,8 +4409,8 @@ static inline bool f2fs_may_compress(struct inode *inode)
 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
                                                u64 blocks, bool add)
 {
-       int diff = F2FS_I(inode)->i_cluster_size - blocks;
        struct f2fs_inode_info *fi = F2FS_I(inode);
+       int diff = fi->i_cluster_size - blocks;
 
        /* don't update i_compr_blocks if saved blocks were released */
        if (!add && !atomic_read(&fi->i_compr_blocks))
@@ -4540,6 +4518,21 @@ static inline void f2fs_io_schedule_timeout(long timeout)
        io_schedule_timeout(timeout);
 }
 
+static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
+                                       enum page_type type)
+{
+       if (unlikely(f2fs_cp_error(sbi)))
+               return;
+
+       if (ofs == sbi->page_eio_ofs[type]) {
+               if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
+                       set_ckpt_flags(sbi, CP_ERROR_FLAG);
+       } else {
+               sbi->page_eio_ofs[type] = ofs;
+               sbi->page_eio_cnt[type] = 0;
+       }
+}
+
 #define EFSBADCRC      EBADMSG         /* Bad CRC detected */
 #define EFSCORRUPTED   EUCLEAN         /* Filesystem is corrupted */
 
index 100637b..bd14cef 100644 (file)
@@ -372,7 +372,8 @@ sync_nodes:
        f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
        clear_inode_flag(inode, FI_APPEND_WRITE);
 flush_out:
-       if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
+       if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
+           (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
                ret = f2fs_issue_flush(sbi, inode->i_ino);
        if (!ret) {
                f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
@@ -1437,11 +1438,19 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
                        ret = -ENOSPC;
                        break;
                }
-               if (dn->data_blkaddr != NEW_ADDR) {
-                       f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
-                       dn->data_blkaddr = NEW_ADDR;
-                       f2fs_set_data_blkaddr(dn);
+
+               if (dn->data_blkaddr == NEW_ADDR)
+                       continue;
+
+               if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
+                                       DATA_GENERIC_ENHANCE)) {
+                       ret = -EFSCORRUPTED;
+                       break;
                }
+
+               f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+               dn->data_blkaddr = NEW_ADDR;
+               f2fs_set_data_blkaddr(dn);
        }
 
        f2fs_update_extent_cache_range(dn, start, 0, index - start);
@@ -1638,6 +1647,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
        struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
                        .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
                        .m_may_create = true };
+       struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
+                       .init_gc_type = FG_GC,
+                       .should_migrate_blocks = false,
+                       .err_gc_skipped = true,
+                       .nr_free_secs = 0 };
        pgoff_t pg_start, pg_end;
        loff_t new_size = i_size_read(inode);
        loff_t off_end;
@@ -1675,8 +1689,8 @@ next_alloc:
                if (has_not_enough_free_secs(sbi, 0,
                        GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
                        f2fs_down_write(&sbi->gc_lock);
-                       err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
-                       if (err && err != -ENODATA && err != -EAGAIN)
+                       err = f2fs_gc(sbi, &gc_control);
+                       if (err && err != -ENODATA)
                                goto out_err;
                }
 
@@ -1766,6 +1780,10 @@ static long f2fs_fallocate(struct file *file, int mode,
 
        inode_lock(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out;
+
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                if (offset >= inode->i_size)
                        goto out;
@@ -1804,16 +1822,8 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
                        atomic_read(&inode->i_writecount) != 1)
                return 0;
 
-       /* some remained atomic pages should discarded */
        if (f2fs_is_atomic_file(inode))
-               f2fs_drop_inmem_pages(inode);
-       if (f2fs_is_volatile_file(inode)) {
-               set_inode_flag(inode, FI_DROP_CACHE);
-               filemap_fdatawrite(inode->i_mapping);
-               clear_inode_flag(inode, FI_DROP_CACHE);
-               clear_inode_flag(inode, FI_VOLATILE_FILE);
-               stat_dec_volatile_write(inode);
-       }
+               f2fs_abort_atomic_write(inode, true);
        return 0;
 }
 
@@ -1828,8 +1838,8 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
         * before dropping file lock, it needs to do in ->flush.
         */
        if (f2fs_is_atomic_file(inode) &&
-                       F2FS_I(inode)->inmem_task == current)
-               f2fs_drop_inmem_pages(inode);
+                       F2FS_I(inode)->atomic_write_task == current)
+               f2fs_abort_atomic_write(inode, true);
        return 0;
 }
 
@@ -1992,6 +2002,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
        struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
        struct f2fs_inode_info *fi = F2FS_I(inode);
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct inode *pinode;
        int ret;
 
        if (!inode_owner_or_capable(mnt_userns, inode))
@@ -2014,44 +2025,55 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
                goto out;
        }
 
-       if (f2fs_is_atomic_file(inode)) {
-               if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
-                       ret = -EINVAL;
+       if (f2fs_is_atomic_file(inode))
                goto out;
-       }
 
        ret = f2fs_convert_inline_inode(inode);
        if (ret)
                goto out;
 
-       f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
 
        /*
         * Should wait end_io to count F2FS_WB_CP_DATA correctly by
         * f2fs_is_atomic_file.
         */
        if (get_dirty_pages(inode))
-               f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+               f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
                          inode->i_ino, get_dirty_pages(inode));
        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
        if (ret) {
-               f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+               f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+               goto out;
+       }
+
+       /* Create a COW inode for atomic write */
+       pinode = f2fs_iget(inode->i_sb, fi->i_pino);
+       if (IS_ERR(pinode)) {
+               f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+               ret = PTR_ERR(pinode);
+               goto out;
+       }
+
+       ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
+       iput(pinode);
+       if (ret) {
+               f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
                goto out;
        }
+       f2fs_i_size_write(fi->cow_inode, i_size_read(inode));
 
        spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
-       if (list_empty(&fi->inmem_ilist))
-               list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
        sbi->atomic_files++;
        spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
 
-       /* add inode in inmem_list first and set atomic_file */
        set_inode_flag(inode, FI_ATOMIC_FILE);
-       clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
-       f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       set_inode_flag(fi->cow_inode, FI_ATOMIC_FILE);
+       clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+       f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
 
-       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-       F2FS_I(inode)->inmem_task = current;
+       f2fs_update_time(sbi, REQ_TIME);
+       fi->atomic_write_task = current;
        stat_update_max_atomic_write(inode);
 out:
        inode_unlock(inode);
@@ -2076,127 +2098,20 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
 
        inode_lock(inode);
 
-       if (f2fs_is_volatile_file(inode)) {
-               ret = -EINVAL;
-               goto err_out;
-       }
-
        if (f2fs_is_atomic_file(inode)) {
-               ret = f2fs_commit_inmem_pages(inode);
+               ret = f2fs_commit_atomic_write(inode);
                if (ret)
-                       goto err_out;
+                       goto unlock_out;
 
                ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
                if (!ret)
-                       f2fs_drop_inmem_pages(inode);
+                       f2fs_abort_atomic_write(inode, false);
        } else {
                ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
        }
-err_out:
-       if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
-               clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
-               ret = -EINVAL;
-       }
-       inode_unlock(inode);
-       mnt_drop_write_file(filp);
-       return ret;
-}
-
-static int f2fs_ioc_start_volatile_write(struct file *filp)
-{
-       struct inode *inode = file_inode(filp);
-       struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
-       int ret;
-
-       if (!inode_owner_or_capable(mnt_userns, inode))
-               return -EACCES;
-
-       if (!S_ISREG(inode->i_mode))
-               return -EINVAL;
-
-       ret = mnt_want_write_file(filp);
-       if (ret)
-               return ret;
-
-       inode_lock(inode);
-
-       if (f2fs_is_volatile_file(inode))
-               goto out;
-
-       ret = f2fs_convert_inline_inode(inode);
-       if (ret)
-               goto out;
-
-       stat_inc_volatile_write(inode);
-       stat_update_max_volatile_write(inode);
-
-       set_inode_flag(inode, FI_VOLATILE_FILE);
-       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-out:
-       inode_unlock(inode);
-       mnt_drop_write_file(filp);
-       return ret;
-}
-
-static int f2fs_ioc_release_volatile_write(struct file *filp)
-{
-       struct inode *inode = file_inode(filp);
-       struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
-       int ret;
-
-       if (!inode_owner_or_capable(mnt_userns, inode))
-               return -EACCES;
-
-       ret = mnt_want_write_file(filp);
-       if (ret)
-               return ret;
-
-       inode_lock(inode);
-
-       if (!f2fs_is_volatile_file(inode))
-               goto out;
-
-       if (!f2fs_is_first_block_written(inode)) {
-               ret = truncate_partial_data_page(inode, 0, true);
-               goto out;
-       }
-
-       ret = punch_hole(inode, 0, F2FS_BLKSIZE);
-out:
-       inode_unlock(inode);
-       mnt_drop_write_file(filp);
-       return ret;
-}
-
-static int f2fs_ioc_abort_volatile_write(struct file *filp)
-{
-       struct inode *inode = file_inode(filp);
-       struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
-       int ret;
-
-       if (!inode_owner_or_capable(mnt_userns, inode))
-               return -EACCES;
-
-       ret = mnt_want_write_file(filp);
-       if (ret)
-               return ret;
-
-       inode_lock(inode);
-
-       if (f2fs_is_atomic_file(inode))
-               f2fs_drop_inmem_pages(inode);
-       if (f2fs_is_volatile_file(inode)) {
-               clear_inode_flag(inode, FI_VOLATILE_FILE);
-               stat_dec_volatile_write(inode);
-               ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
-       }
-
-       clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
-
+unlock_out:
        inode_unlock(inode);
-
        mnt_drop_write_file(filp);
-       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
        return ret;
 }
 
@@ -2437,6 +2352,10 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
+                       .no_bg_gc = false,
+                       .should_migrate_blocks = false,
+                       .nr_free_secs = 0 };
        __u32 sync;
        int ret;
 
@@ -2462,7 +2381,9 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
                f2fs_down_write(&sbi->gc_lock);
        }
 
-       ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
+       gc_control.init_gc_type = sync ? FG_GC : BG_GC;
+       gc_control.err_gc_skipped = sync;
+       ret = f2fs_gc(sbi, &gc_control);
 out:
        mnt_drop_write_file(filp);
        return ret;
@@ -2471,6 +2392,12 @@ out:
 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
+       struct f2fs_gc_control gc_control = {
+                       .init_gc_type = range->sync ? FG_GC : BG_GC,
+                       .no_bg_gc = false,
+                       .should_migrate_blocks = false,
+                       .err_gc_skipped = range->sync,
+                       .nr_free_secs = 0 };
        u64 end;
        int ret;
 
@@ -2498,8 +2425,8 @@ do_more:
                f2fs_down_write(&sbi->gc_lock);
        }
 
-       ret = f2fs_gc(sbi, range->sync, true, false,
-                               GET_SEGNO(sbi, range->start));
+       gc_control.victim_segno = GET_SEGNO(sbi, range->start);
+       ret = f2fs_gc(sbi, &gc_control);
        if (ret) {
                if (ret == -EBUSY)
                        ret = -EAGAIN;
@@ -2674,6 +2601,7 @@ do_map:
                        }
 
                        set_page_dirty(page);
+                       set_page_private_gcing(page);
                        f2fs_put_page(page, 1);
 
                        idx++;
@@ -2913,6 +2841,11 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
        unsigned int start_segno = 0, end_segno = 0;
        unsigned int dev_start_segno = 0, dev_end_segno = 0;
        struct f2fs_flush_device range;
+       struct f2fs_gc_control gc_control = {
+                       .init_gc_type = FG_GC,
+                       .should_migrate_blocks = true,
+                       .err_gc_skipped = true,
+                       .nr_free_secs = 0 };
        int ret;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -2956,7 +2889,9 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
                sm->last_victim[GC_CB] = end_segno + 1;
                sm->last_victim[GC_GREEDY] = end_segno + 1;
                sm->last_victim[ALLOC_NEXT] = end_segno + 1;
-               ret = f2fs_gc(sbi, true, true, true, start_segno);
+
+               gc_control.victim_segno = start_segno;
+               ret = f2fs_gc(sbi, &gc_control);
                if (ret == -EAGAIN)
                        ret = 0;
                else if (ret < 0)
@@ -3017,7 +2952,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
 
        kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
 
-       if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
+       if (projid_eq(kprojid, fi->i_projid))
                return 0;
 
        err = -EPERM;
@@ -3037,7 +2972,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
        if (err)
                goto out_unlock;
 
-       F2FS_I(inode)->i_projid = kprojid;
+       fi->i_projid = kprojid;
        inode->i_ctime = current_time(inode);
        f2fs_mark_inode_dirty_sync(inode, true);
 out_unlock:
@@ -3987,7 +3922,7 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
        struct f2fs_inode_info *fi = F2FS_I(inode);
        pgoff_t page_idx = 0, last_idx;
        unsigned int blk_per_seg = sbi->blocks_per_seg;
-       int cluster_size = F2FS_I(inode)->i_cluster_size;
+       int cluster_size = fi->i_cluster_size;
        int count, ret;
 
        if (!f2fs_sb_has_compression(sbi) ||
@@ -4010,11 +3945,6 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
                goto out;
        }
 
-       if (f2fs_is_mmap_file(inode)) {
-               ret = -EBUSY;
-               goto out;
-       }
-
        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
        if (ret)
                goto out;
@@ -4082,11 +4012,6 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
                goto out;
        }
 
-       if (f2fs_is_mmap_file(inode)) {
-               ret = -EBUSY;
-               goto out;
-       }
-
        ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
        if (ret)
                goto out;
@@ -4136,11 +4061,9 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case F2FS_IOC_COMMIT_ATOMIC_WRITE:
                return f2fs_ioc_commit_atomic_write(filp);
        case F2FS_IOC_START_VOLATILE_WRITE:
-               return f2fs_ioc_start_volatile_write(filp);
        case F2FS_IOC_RELEASE_VOLATILE_WRITE:
-               return f2fs_ioc_release_volatile_write(filp);
        case F2FS_IOC_ABORT_VOLATILE_WRITE:
-               return f2fs_ioc_abort_volatile_write(filp);
+               return -EOPNOTSUPP;
        case F2FS_IOC_SHUTDOWN:
                return f2fs_ioc_shutdown(filp, arg);
        case FITRIM:
@@ -4328,17 +4251,39 @@ out:
 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct inode *inode = file_inode(iocb->ki_filp);
+       const loff_t pos = iocb->ki_pos;
        ssize_t ret;
 
        if (!f2fs_is_compress_backend_ready(inode))
                return -EOPNOTSUPP;
 
-       if (f2fs_should_use_dio(inode, iocb, to))
-               return f2fs_dio_read_iter(iocb, to);
+       if (trace_f2fs_dataread_start_enabled()) {
+               char *p = f2fs_kmalloc(F2FS_I_SB(inode), PATH_MAX, GFP_KERNEL);
+               char *path;
+
+               if (!p)
+                       goto skip_read_trace;
+
+               path = dentry_path_raw(file_dentry(iocb->ki_filp), p, PATH_MAX);
+               if (IS_ERR(path)) {
+                       kfree(p);
+                       goto skip_read_trace;
+               }
 
-       ret = filemap_read(iocb, to, 0);
-       if (ret > 0)
-               f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
+               trace_f2fs_dataread_start(inode, pos, iov_iter_count(to),
+                                       current->pid, path, current->comm);
+               kfree(p);
+       }
+skip_read_trace:
+       if (f2fs_should_use_dio(inode, iocb, to)) {
+               ret = f2fs_dio_read_iter(iocb, to);
+       } else {
+               ret = filemap_read(iocb, to, 0);
+               if (ret > 0)
+                       f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
+       }
+       if (trace_f2fs_dataread_end_enabled())
+               trace_f2fs_dataread_end(inode, pos, ret);
        return ret;
 }
 
@@ -4630,14 +4575,36 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        /* Possibly preallocate the blocks for the write. */
        target_size = iocb->ki_pos + iov_iter_count(from);
        preallocated = f2fs_preallocate_blocks(iocb, from, dio);
-       if (preallocated < 0)
+       if (preallocated < 0) {
                ret = preallocated;
-       else
+       } else {
+               if (trace_f2fs_datawrite_start_enabled()) {
+                       char *p = f2fs_kmalloc(F2FS_I_SB(inode),
+                                               PATH_MAX, GFP_KERNEL);
+                       char *path;
+
+                       if (!p)
+                               goto skip_write_trace;
+                       path = dentry_path_raw(file_dentry(iocb->ki_filp),
+                                                               p, PATH_MAX);
+                       if (IS_ERR(path)) {
+                               kfree(p);
+                               goto skip_write_trace;
+                       }
+                       trace_f2fs_datawrite_start(inode, orig_pos, orig_count,
+                                       current->pid, path, current->comm);
+                       kfree(p);
+               }
+skip_write_trace:
                /* Do the actual write. */
                ret = dio ?
                        f2fs_dio_write_iter(iocb, from, &may_need_sync):
                        f2fs_buffered_write_iter(iocb, from);
 
+               if (trace_f2fs_datawrite_end_enabled())
+                       trace_f2fs_datawrite_end(inode, orig_pos, ret);
+       }
+
        /* Don't leave any preallocated blocks around past i_size. */
        if (preallocated && i_size_read(inode) < target_size) {
                f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
index ea5b93b..d5fb426 100644 (file)
@@ -35,6 +35,10 @@ static int gc_thread_func(void *data)
        wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
        wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
        unsigned int wait_ms;
+       struct f2fs_gc_control gc_control = {
+               .victim_segno = NULL_SEGNO,
+               .should_migrate_blocks = false,
+               .err_gc_skipped = false };
 
        wait_ms = gc_th->min_sleep_time;
 
@@ -141,8 +145,12 @@ do_gc:
                if (foreground)
                        sync_mode = false;
 
+               gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
+               gc_control.no_bg_gc = foreground;
+               gc_control.nr_free_secs = foreground ? 1 : 0;
+
                /* if return value is not zero, no victim was selected */
-               if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
+               if (f2fs_gc(sbi, &gc_control))
                        wait_ms = gc_th->no_gc_sleep_time;
 
                if (foreground)
@@ -646,6 +654,54 @@ static void release_victim_entry(struct f2fs_sb_info *sbi)
        f2fs_bug_on(sbi, !list_empty(&am->victim_list));
 }
 
+static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+       struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+       unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
+       if (!dirty_i->enable_pin_section)
+               return false;
+       if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
+               dirty_i->pinned_secmap_cnt++;
+       return true;
+}
+
+static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
+{
+       return dirty_i->pinned_secmap_cnt;
+}
+
+static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
+                                               unsigned int secno)
+{
+       return dirty_i->enable_pin_section &&
+               f2fs_pinned_section_exists(dirty_i) &&
+               test_bit(secno, dirty_i->pinned_secmap);
+}
+
+static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
+{
+       unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
+
+       if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
+               memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
+               DIRTY_I(sbi)->pinned_secmap_cnt = 0;
+       }
+       DIRTY_I(sbi)->enable_pin_section = enable;
+}
+
+static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
+                                                       unsigned int segno)
+{
+       if (!f2fs_is_pinned_file(inode))
+               return 0;
+       if (gc_type != FG_GC)
+               return -EBUSY;
+       if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
+               f2fs_pin_file_control(inode, true);
+       return -EAGAIN;
+}
+
 /*
  * This function is called from two paths.
  * One is garbage collection and the other is SSR segment selection.
@@ -787,6 +843,9 @@ retry:
                if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
                        goto next;
 
+               if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
+                       goto next;
+
                if (is_atgc) {
                        add_victim_entry(sbi, &p, segno);
                        goto next;
@@ -1194,18 +1253,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
                goto out;
        }
 
-       if (f2fs_is_atomic_file(inode)) {
-               F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
-               F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
-               err = -EAGAIN;
-               goto out;
-       }
-
-       if (f2fs_is_pinned_file(inode)) {
-               f2fs_pin_file_control(inode, true);
-               err = -EAGAIN;
+       err = f2fs_gc_pinned_control(inode, gc_type, segno);
+       if (err)
                goto out;
-       }
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
@@ -1344,18 +1394,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
                goto out;
        }
 
-       if (f2fs_is_atomic_file(inode)) {
-               F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
-               F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
-               err = -EAGAIN;
-               goto out;
-       }
-       if (f2fs_is_pinned_file(inode)) {
-               if (gc_type == FG_GC)
-                       f2fs_pin_file_control(inode, true);
-               err = -EAGAIN;
+       err = f2fs_gc_pinned_control(inode, gc_type, segno);
+       if (err)
                goto out;
-       }
 
        if (gc_type == BG_GC) {
                if (PageWriteback(page)) {
@@ -1475,11 +1516,19 @@ next_step:
                ofs_in_node = le16_to_cpu(entry->ofs_in_node);
 
                if (phase == 3) {
+                       int err;
+
                        inode = f2fs_iget(sb, dni.ino);
                        if (IS_ERR(inode) || is_bad_inode(inode) ||
                                        special_file(inode->i_mode))
                                continue;
 
+                       err = f2fs_gc_pinned_control(inode, gc_type, segno);
+                       if (err == -EAGAIN) {
+                               iput(inode);
+                               return submitted;
+                       }
+
                        if (!f2fs_down_write_trylock(
                                &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
                                iput(inode);
@@ -1699,23 +1748,21 @@ skip:
        return seg_freed;
 }
 
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
-                       bool background, bool force, unsigned int segno)
+int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
 {
-       int gc_type = sync ? FG_GC : BG_GC;
+       int gc_type = gc_control->init_gc_type;
+       unsigned int segno = gc_control->victim_segno;
        int sec_freed = 0, seg_freed = 0, total_freed = 0;
        int ret = 0;
        struct cp_control cpc;
-       unsigned int init_segno = segno;
        struct gc_inode_list gc_list = {
                .ilist = LIST_HEAD_INIT(gc_list.ilist),
                .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
        };
-       unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
-       unsigned long long first_skipped;
        unsigned int skipped_round = 0, round = 0;
 
-       trace_f2fs_gc_begin(sbi->sb, sync, background,
+       trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
+                               gc_control->nr_free_secs,
                                get_pages(sbi, F2FS_DIRTY_NODES),
                                get_pages(sbi, F2FS_DIRTY_DENTS),
                                get_pages(sbi, F2FS_DIRTY_IMETA),
@@ -1726,7 +1773,6 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
 
        cpc.reason = __get_cp_reason(sbi);
        sbi->skipped_gc_rwsem = 0;
-       first_skipped = last_skipped;
 gc_more:
        if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
                ret = -EINVAL;
@@ -1743,8 +1789,7 @@ gc_more:
                 * threshold, we can make them free by checkpoint. Then, we
                 * secure free segments which doesn't need fggc any more.
                 */
-               if (prefree_segments(sbi) &&
-                               !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
+               if (prefree_segments(sbi)) {
                        ret = f2fs_write_checkpoint(sbi, &cpc);
                        if (ret)
                                goto stop;
@@ -1754,54 +1799,69 @@ gc_more:
        }
 
        /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
-       if (gc_type == BG_GC && !background) {
+       if (gc_type == BG_GC && gc_control->no_bg_gc) {
                ret = -EINVAL;
                goto stop;
        }
+retry:
        ret = __get_victim(sbi, &segno, gc_type);
-       if (ret)
+       if (ret) {
+               /* allow to search victim from sections has pinned data */
+               if (ret == -ENODATA && gc_type == FG_GC &&
+                               f2fs_pinned_section_exists(DIRTY_I(sbi))) {
+                       f2fs_unpin_all_sections(sbi, false);
+                       goto retry;
+               }
                goto stop;
+       }
 
-       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
-       if (gc_type == FG_GC &&
-               seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
-               sec_freed++;
+       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
+                               gc_control->should_migrate_blocks);
        total_freed += seg_freed;
 
-       if (gc_type == FG_GC) {
-               if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
-                                               sbi->skipped_gc_rwsem)
-                       skipped_round++;
-               last_skipped = sbi->skipped_atomic_files[FG_GC];
-               round++;
-       }
+       if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
+               sec_freed++;
 
        if (gc_type == FG_GC)
                sbi->cur_victim_sec = NULL_SEGNO;
 
-       if (sync)
+       if (gc_control->init_gc_type == FG_GC ||
+           !has_not_enough_free_secs(sbi,
+                               (gc_type == FG_GC) ? sec_freed : 0, 0)) {
+               if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs)
+                       goto go_gc_more;
                goto stop;
+       }
 
-       if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
-               if (skipped_round <= MAX_SKIP_GC_COUNT ||
-                                       skipped_round * 2 < round) {
-                       segno = NULL_SEGNO;
-                       goto gc_more;
+       /* FG_GC stops GC by skip_count */
+       if (gc_type == FG_GC) {
+               if (sbi->skipped_gc_rwsem)
+                       skipped_round++;
+               round++;
+               if (skipped_round > MAX_SKIP_GC_COUNT &&
+                               skipped_round * 2 >= round) {
+                       ret = f2fs_write_checkpoint(sbi, &cpc);
+                       goto stop;
                }
+       }
 
-               if (first_skipped < last_skipped &&
-                               (last_skipped - first_skipped) >
-                                               sbi->skipped_gc_rwsem) {
-                       f2fs_drop_inmem_pages_all(sbi, true);
-                       segno = NULL_SEGNO;
-                       goto gc_more;
-               }
-               if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
-                       ret = f2fs_write_checkpoint(sbi, &cpc);
+       /* Write checkpoint to reclaim prefree segments */
+       if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
+                               prefree_segments(sbi)) {
+               ret = f2fs_write_checkpoint(sbi, &cpc);
+               if (ret)
+                       goto stop;
        }
+go_gc_more:
+       segno = NULL_SEGNO;
+       goto gc_more;
+
 stop:
        SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
-       SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
+       SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
+
+       if (gc_type == FG_GC)
+               f2fs_unpin_all_sections(sbi, true);
 
        trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
                                get_pages(sbi, F2FS_DIRTY_NODES),
@@ -1816,7 +1876,7 @@ stop:
 
        put_gc_inode(&gc_list);
 
-       if (sync && !ret)
+       if (gc_control->err_gc_skipped && !ret)
                ret = sec_freed ? 0 : -EAGAIN;
        return ret;
 }
index 3cb1e7a..049ce50 100644 (file)
@@ -91,7 +91,7 @@ static u32 TEA_hash_name(const u8 *p, size_t len)
 /*
  * Compute @fname->hash.  For all directories, @fname->disk_name must be set.
  * For casefolded directories, @fname->usr_fname must be set, and also
- * @fname->cf_name if the filename is valid Unicode.
+ * @fname->cf_name if the filename is valid Unicode and is not "." or "..".
  */
 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
 {
@@ -110,10 +110,11 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
                /*
                 * If the casefolded name is provided, hash it instead of the
                 * on-disk name.  If the casefolded name is *not* provided, that
-                * should only be because the name wasn't valid Unicode, so fall
-                * back to treating the name as an opaque byte sequence.  Note
-                * that to handle encrypted directories, the fallback must use
-                * usr_fname (plaintext) rather than disk_name (ciphertext).
+                * should only be because the name wasn't valid Unicode or was
+                * "." or "..", so fall back to treating the name as an opaque
+                * byte sequence.  Note that to handle encrypted directories,
+                * the fallback must use usr_fname (plaintext) rather than
+                * disk_name (ciphertext).
                 */
                WARN_ON_ONCE(!fname->usr_fname->name);
                if (fname->cf_name.name) {
index a578bf8..bf46a7d 100644 (file)
 #include "node.h"
 #include <trace/events/f2fs.h>
 
-bool f2fs_may_inline_data(struct inode *inode)
+static bool support_inline_data(struct inode *inode)
 {
        if (f2fs_is_atomic_file(inode))
                return false;
-
        if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
                return false;
-
        if (i_size_read(inode) > MAX_INLINE_DATA(inode))
                return false;
+       return true;
+}
 
-       if (f2fs_post_read_required(inode))
+bool f2fs_may_inline_data(struct inode *inode)
+{
+       if (!support_inline_data(inode))
                return false;
 
-       return true;
+       return !f2fs_post_read_required(inode);
+}
+
+bool f2fs_sanity_check_inline_data(struct inode *inode)
+{
+       if (!f2fs_has_inline_data(inode))
+               return false;
+
+       if (!support_inline_data(inode))
+               return true;
+
+       /*
+        * used by sanity_check_inode(), when disk layout fields has not
+        * been synchronized to inmem fields.
+        */
+       return (S_ISREG(inode->i_mode) &&
+               (file_is_encrypt(inode) || file_is_verity(inode) ||
+               (F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
 }
 
 bool f2fs_may_inline_dentry(struct inode *inode)
index 8363923..fc55f5b 100644 (file)
@@ -260,8 +260,8 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
                return false;
        }
 
-       if (F2FS_I(inode)->extent_tree) {
-               struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+       if (fi->extent_tree) {
+               struct extent_info *ei = &fi->extent_tree->largest;
 
                if (ei->len &&
                        (!f2fs_is_valid_blkaddr(sbi, ei->blk,
@@ -276,8 +276,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
                }
        }
 
-       if (f2fs_has_inline_data(inode) &&
-                       (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+       if (f2fs_sanity_check_inline_data(inode)) {
                set_sbi_flag(sbi, SBI_NEED_FSCK);
                f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
                          __func__, inode->i_ino, inode->i_mode);
@@ -466,10 +465,10 @@ static int do_read_inode(struct inode *inode)
                }
        }
 
-       F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
-       F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
-       F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
-       F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+       fi->i_disk_time[0] = inode->i_atime;
+       fi->i_disk_time[1] = inode->i_ctime;
+       fi->i_disk_time[2] = inode->i_mtime;
+       fi->i_disk_time[3] = fi->i_crtime;
        f2fs_put_page(node_page, 1);
 
        stat_inc_inline_xattr(inode);
@@ -745,9 +744,8 @@ void f2fs_evict_inode(struct inode *inode)
        nid_t xnid = F2FS_I(inode)->i_xattr_nid;
        int err = 0;
 
-       /* some remained atomic pages should discarded */
        if (f2fs_is_atomic_file(inode))
-               f2fs_drop_inmem_pages(inode);
+               f2fs_abort_atomic_write(inode, true);
 
        trace_f2fs_evict_inode(inode);
        truncate_inode_pages_final(&inode->i_data);
@@ -796,8 +794,22 @@ retry:
                f2fs_lock_op(sbi);
                err = f2fs_remove_inode_page(inode);
                f2fs_unlock_op(sbi);
-               if (err == -ENOENT)
+               if (err == -ENOENT) {
                        err = 0;
+
+                       /*
+                        * in fuzzed image, another node may has the same
+                        * block address as inode's, if it was truncated
+                        * previously, truncation of inode node will fail.
+                        */
+                       if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+                               f2fs_warn(F2FS_I_SB(inode),
+                                       "f2fs_evict_inode: inconsistent node id, ino:%lu",
+                                       inode->i_ino);
+                               f2fs_inode_synced(inode);
+                               set_sbi_flag(sbi, SBI_NEED_FSCK);
+                       }
+               }
        }
 
        /* give more chances, if ENOMEM case */
index 5ed79b2..c549acb 100644 (file)
@@ -37,13 +37,10 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
-       f2fs_lock_op(sbi);
        if (!f2fs_alloc_nid(sbi, &ino)) {
-               f2fs_unlock_op(sbi);
                err = -ENOSPC;
                goto fail;
        }
-       f2fs_unlock_op(sbi);
 
        nid_free = true;
 
@@ -461,6 +458,13 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
                return 0;
        }
 
+       if (!S_ISDIR(dir->i_mode)) {
+               f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)",
+                         dir->i_ino, dir->i_mode, pino);
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               return -ENOTDIR;
+       }
+
        err = f2fs_dquot_initialize(dir);
        if (err)
                return err;
@@ -836,8 +840,8 @@ out:
 }
 
 static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
-                                       struct dentry *dentry, umode_t mode,
-                                       struct inode **whiteout)
+                         struct dentry *dentry, umode_t mode, bool is_whiteout,
+                         struct inode **new_inode)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
        struct inode *inode;
@@ -851,7 +855,7 @@ static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
-       if (whiteout) {
+       if (is_whiteout) {
                init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
                inode->i_op = &f2fs_special_inode_operations;
        } else {
@@ -876,21 +880,25 @@ static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
        f2fs_add_orphan_inode(inode);
        f2fs_alloc_nid_done(sbi, inode->i_ino);
 
-       if (whiteout) {
+       if (is_whiteout) {
                f2fs_i_links_write(inode, false);
 
                spin_lock(&inode->i_lock);
                inode->i_state |= I_LINKABLE;
                spin_unlock(&inode->i_lock);
-
-               *whiteout = inode;
        } else {
-               d_tmpfile(dentry, inode);
+               if (dentry)
+                       d_tmpfile(dentry, inode);
+               else
+                       f2fs_i_links_write(inode, false);
        }
        /* link_count was changed by d_tmpfile as well. */
        f2fs_unlock_op(sbi);
        unlock_new_inode(inode);
 
+       if (new_inode)
+               *new_inode = inode;
+
        f2fs_balance_fs(sbi, true);
        return 0;
 
@@ -911,7 +919,7 @@ static int f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
        if (!f2fs_is_checkpoint_ready(sbi))
                return -ENOSPC;
 
-       return __f2fs_tmpfile(mnt_userns, dir, dentry, mode, NULL);
+       return __f2fs_tmpfile(mnt_userns, dir, dentry, mode, false, NULL);
 }
 
 static int f2fs_create_whiteout(struct user_namespace *mnt_userns,
@@ -921,7 +929,13 @@ static int f2fs_create_whiteout(struct user_namespace *mnt_userns,
                return -EIO;
 
        return __f2fs_tmpfile(mnt_userns, dir, NULL,
-                               S_IFCHR | WHITEOUT_MODE, whiteout);
+                               S_IFCHR | WHITEOUT_MODE, true, whiteout);
+}
+
+int f2fs_get_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+                    struct inode **new_inode)
+{
+       return __f2fs_tmpfile(mnt_userns, dir, NULL, S_IFREG, false, new_inode);
 }
 
 static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
index 8ccff18..836c79a 100644 (file)
@@ -90,10 +90,6 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
                                atomic_read(&sbi->total_ext_node) *
                                sizeof(struct extent_node)) >> PAGE_SHIFT;
                res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
-       } else if (type == INMEM_PAGES) {
-               /* it allows 20% / total_ram for inmemory pages */
-               mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
-               res = mem_size < (val.totalram / 5);
        } else if (type == DISCARD_CACHE) {
                mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
                                sizeof(struct discard_cmd)) >> PAGE_SHIFT;
@@ -1416,8 +1412,7 @@ repeat:
 
        err = read_node_page(page, 0);
        if (err < 0) {
-               f2fs_put_page(page, 1);
-               return ERR_PTR(err);
+               goto out_put_err;
        } else if (err == LOCKED_PAGE) {
                err = 0;
                goto page_hit;
@@ -1443,19 +1438,21 @@ repeat:
                goto out_err;
        }
 page_hit:
-       if (unlikely(nid != nid_of_node(page))) {
-               f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
+       if (likely(nid == nid_of_node(page)))
+               return page;
+
+       f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
                          nid, nid_of_node(page), ino_of_node(page),
                          ofs_of_node(page), cpver_of_node(page),
                          next_blkaddr_of_node(page));
-               set_sbi_flag(sbi, SBI_NEED_FSCK);
-               err = -EINVAL;
+       set_sbi_flag(sbi, SBI_NEED_FSCK);
+       err = -EINVAL;
 out_err:
-               ClearPageUptodate(page);
-               f2fs_put_page(page, 1);
-               return ERR_PTR(err);
-       }
-       return page;
+       ClearPageUptodate(page);
+out_put_err:
+       f2fs_handle_page_eio(sbi, page->index, NODE);
+       f2fs_put_page(page, 1);
+       return ERR_PTR(err);
 }
 
 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1631,7 +1628,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
                goto redirty_out;
        }
 
-       if (atomic && !test_opt(sbi, NOBARRIER))
+       if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
                fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 
        /* should add to global list before clearing PAGECACHE status */
index 4c1d34b..3c09cae 100644 (file)
@@ -147,7 +147,6 @@ enum mem_type {
        DIRTY_DENTS,    /* indicates dirty dentry pages */
        INO_ENTRIES,    /* indicates inode entries */
        EXTENT_CACHE,   /* indicates extent cache */
-       INMEM_PAGES,    /* indicates inmemory pages */
        DISCARD_CACHE,  /* indicates memory of cached discard cmds */
        COMPRESS_PAGE,  /* indicates memory of cached compressed pages */
        BASE_CHECK,     /* check kernel status */
index 7225ce0..874c1b9 100644 (file)
@@ -30,7 +30,7 @@
 static struct kmem_cache *discard_entry_slab;
 static struct kmem_cache *discard_cmd_slab;
 static struct kmem_cache *sit_entry_set_slab;
-static struct kmem_cache *inmem_entry_slab;
+static struct kmem_cache *revoke_entry_slab;
 
 static unsigned long __reverse_ulong(unsigned char *str)
 {
@@ -185,301 +185,175 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
                        SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
 }
 
-void f2fs_register_inmem_page(struct inode *inode, struct page *page)
+void f2fs_abort_atomic_write(struct inode *inode, bool clean)
 {
-       struct inmem_pages *new;
-
-       set_page_private_atomic(page);
-
-       new = f2fs_kmem_cache_alloc(inmem_entry_slab,
-                                       GFP_NOFS, true, NULL);
-
-       /* add atomic page indices to the list */
-       new->page = page;
-       INIT_LIST_HEAD(&new->list);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_inode_info *fi = F2FS_I(inode);
 
-       /* increase reference count with clean state */
-       get_page(page);
-       mutex_lock(&F2FS_I(inode)->inmem_lock);
-       list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
-       inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
-       mutex_unlock(&F2FS_I(inode)->inmem_lock);
+       if (f2fs_is_atomic_file(inode)) {
+               if (clean)
+                       truncate_inode_pages_final(inode->i_mapping);
+               clear_inode_flag(fi->cow_inode, FI_ATOMIC_FILE);
+               iput(fi->cow_inode);
+               fi->cow_inode = NULL;
+               clear_inode_flag(inode, FI_ATOMIC_FILE);
 
-       trace_f2fs_register_inmem_page(page, INMEM);
+               spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+               sbi->atomic_files--;
+               spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+       }
 }
 
-static int __revoke_inmem_pages(struct inode *inode,
-                               struct list_head *head, bool drop, bool recover,
-                               bool trylock)
+static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+                       block_t new_addr, block_t *old_addr, bool recover)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       struct inmem_pages *cur, *tmp;
-       int err = 0;
-
-       list_for_each_entry_safe(cur, tmp, head, list) {
-               struct page *page = cur->page;
-
-               if (drop)
-                       trace_f2fs_commit_inmem_page(page, INMEM_DROP);
-
-               if (trylock) {
-                       /*
-                        * to avoid deadlock in between page lock and
-                        * inmem_lock.
-                        */
-                       if (!trylock_page(page))
-                               continue;
-               } else {
-                       lock_page(page);
-               }
-
-               f2fs_wait_on_page_writeback(page, DATA, true, true);
-
-               if (recover) {
-                       struct dnode_of_data dn;
-                       struct node_info ni;
+       struct dnode_of_data dn;
+       struct node_info ni;
+       int err;
 
-                       trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
 retry:
-                       set_new_dnode(&dn, inode, NULL, NULL, 0);
-                       err = f2fs_get_dnode_of_data(&dn, page->index,
-                                                               LOOKUP_NODE);
-                       if (err) {
-                               if (err == -ENOMEM) {
-                                       memalloc_retry_wait(GFP_NOFS);
-                                       goto retry;
-                               }
-                               err = -EAGAIN;
-                               goto next;
-                       }
-
-                       err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
-                       if (err) {
-                               f2fs_put_dnode(&dn);
-                               return err;
-                       }
-
-                       if (cur->old_addr == NEW_ADDR) {
-                               f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
-                               f2fs_update_data_blkaddr(&dn, NEW_ADDR);
-                       } else
-                               f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
-                                       cur->old_addr, ni.version, true, true);
-                       f2fs_put_dnode(&dn);
-               }
-next:
-               /* we don't need to invalidate this in the sccessful status */
-               if (drop || recover) {
-                       ClearPageUptodate(page);
-                       clear_page_private_gcing(page);
+       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE_RA);
+       if (err) {
+               if (err == -ENOMEM) {
+                       f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+                       goto retry;
                }
-               detach_page_private(page);
-               set_page_private(page, 0);
-               f2fs_put_page(page, 1);
-
-               list_del(&cur->list);
-               kmem_cache_free(inmem_entry_slab, cur);
-               dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+               return err;
        }
-       return err;
-}
 
-void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
-{
-       struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
-       struct inode *inode;
-       struct f2fs_inode_info *fi;
-       unsigned int count = sbi->atomic_files;
-       unsigned int looped = 0;
-next:
-       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
-       if (list_empty(head)) {
-               spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
-               return;
+       err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
+       if (err) {
+               f2fs_put_dnode(&dn);
+               return err;
        }
-       fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
-       inode = igrab(&fi->vfs_inode);
-       if (inode)
-               list_move_tail(&fi->inmem_ilist, head);
-       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
 
-       if (inode) {
-               if (gc_failure) {
-                       if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
-                               goto skip;
+       if (recover) {
+               /* dn.data_blkaddr is always valid */
+               if (!__is_valid_data_blkaddr(new_addr)) {
+                       if (new_addr == NULL_ADDR)
+                               dec_valid_block_count(sbi, inode, 1);
+                       f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
+                       f2fs_update_data_blkaddr(&dn, new_addr);
+               } else {
+                       f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+                               new_addr, ni.version, true, true);
                }
-               set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
-               f2fs_drop_inmem_pages(inode);
-skip:
-               iput(inode);
-       }
-       f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
-       if (gc_failure) {
-               if (++looped >= count)
-                       return;
-       }
-       goto next;
-}
-
-void f2fs_drop_inmem_pages(struct inode *inode)
-{
-       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       struct f2fs_inode_info *fi = F2FS_I(inode);
+       } else {
+               blkcnt_t count = 1;
 
-       do {
-               mutex_lock(&fi->inmem_lock);
-               if (list_empty(&fi->inmem_pages)) {
-                       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
-
-                       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
-                       if (!list_empty(&fi->inmem_ilist))
-                               list_del_init(&fi->inmem_ilist);
-                       if (f2fs_is_atomic_file(inode)) {
-                               clear_inode_flag(inode, FI_ATOMIC_FILE);
-                               sbi->atomic_files--;
-                       }
-                       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+               *old_addr = dn.data_blkaddr;
+               f2fs_truncate_data_blocks_range(&dn, 1);
+               dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
+               inc_valid_block_count(sbi, inode, &count);
+               f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
+                                       ni.version, true, false);
+       }
 
-                       mutex_unlock(&fi->inmem_lock);
-                       break;
-               }
-               __revoke_inmem_pages(inode, &fi->inmem_pages,
-                                               true, false, true);
-               mutex_unlock(&fi->inmem_lock);
-       } while (1);
+       f2fs_put_dnode(&dn);
+       return 0;
 }
 
-void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
+static void __complete_revoke_list(struct inode *inode, struct list_head *head,
+                                       bool revoke)
 {
-       struct f2fs_inode_info *fi = F2FS_I(inode);
-       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       struct list_head *head = &fi->inmem_pages;
-       struct inmem_pages *cur = NULL;
-
-       f2fs_bug_on(sbi, !page_private_atomic(page));
+       struct revoke_entry *cur, *tmp;
 
-       mutex_lock(&fi->inmem_lock);
-       list_for_each_entry(cur, head, list) {
-               if (cur->page == page)
-                       break;
+       list_for_each_entry_safe(cur, tmp, head, list) {
+               if (revoke)
+                       __replace_atomic_write_block(inode, cur->index,
+                                               cur->old_addr, NULL, true);
+               list_del(&cur->list);
+               kmem_cache_free(revoke_entry_slab, cur);
        }
-
-       f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
-       list_del(&cur->list);
-       mutex_unlock(&fi->inmem_lock);
-
-       dec_page_count(sbi, F2FS_INMEM_PAGES);
-       kmem_cache_free(inmem_entry_slab, cur);
-
-       ClearPageUptodate(page);
-       clear_page_private_atomic(page);
-       f2fs_put_page(page, 0);
-
-       detach_page_private(page);
-       set_page_private(page, 0);
-
-       trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
 }
 
-static int __f2fs_commit_inmem_pages(struct inode *inode)
+static int __f2fs_commit_atomic_write(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct f2fs_inode_info *fi = F2FS_I(inode);
-       struct inmem_pages *cur, *tmp;
-       struct f2fs_io_info fio = {
-               .sbi = sbi,
-               .ino = inode->i_ino,
-               .type = DATA,
-               .op = REQ_OP_WRITE,
-               .op_flags = REQ_SYNC | REQ_PRIO,
-               .io_type = FS_DATA_IO,
-       };
+       struct inode *cow_inode = fi->cow_inode;
+       struct revoke_entry *new;
        struct list_head revoke_list;
-       bool submit_bio = false;
-       int err = 0;
+       block_t blkaddr;
+       struct dnode_of_data dn;
+       pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+       pgoff_t off = 0, blen, index;
+       int ret = 0, i;
 
        INIT_LIST_HEAD(&revoke_list);
 
-       list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
-               struct page *page = cur->page;
+       while (len) {
+               blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
 
-               lock_page(page);
-               if (page->mapping == inode->i_mapping) {
-                       trace_f2fs_commit_inmem_page(page, INMEM);
+               set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
+               ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
+               if (ret && ret != -ENOENT) {
+                       goto out;
+               } else if (ret == -ENOENT) {
+                       ret = 0;
+                       if (dn.max_level == 0)
+                               goto out;
+                       goto next;
+               }
 
-                       f2fs_wait_on_page_writeback(page, DATA, true, true);
+               blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
+                               len);
+               index = off;
+               for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
+                       blkaddr = f2fs_data_blkaddr(&dn);
 
-                       set_page_dirty(page);
-                       if (clear_page_dirty_for_io(page)) {
-                               inode_dec_dirty_pages(inode);
-                               f2fs_remove_dirty_inode(inode);
-                       }
-retry:
-                       fio.page = page;
-                       fio.old_blkaddr = NULL_ADDR;
-                       fio.encrypted_page = NULL;
-                       fio.need_lock = LOCK_DONE;
-                       err = f2fs_do_write_data_page(&fio);
-                       if (err) {
-                               if (err == -ENOMEM) {
-                                       memalloc_retry_wait(GFP_NOFS);
-                                       goto retry;
-                               }
-                               unlock_page(page);
-                               break;
+                       if (!__is_valid_data_blkaddr(blkaddr)) {
+                               continue;
+                       } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+                                       DATA_GENERIC_ENHANCE)) {
+                               f2fs_put_dnode(&dn);
+                               ret = -EFSCORRUPTED;
+                               goto out;
                        }
-                       /* record old blkaddr for revoking */
-                       cur->old_addr = fio.old_blkaddr;
-                       submit_bio = true;
-               }
-               unlock_page(page);
-               list_move_tail(&cur->list, &revoke_list);
-       }
 
-       if (submit_bio)
-               f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
+                       new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
+                                                       true, NULL);
 
-       if (err) {
-               /*
-                * try to revoke all committed pages, but still we could fail
-                * due to no memory or other reason, if that happened, EAGAIN
-                * will be returned, which means in such case, transaction is
-                * already not integrity, caller should use journal to do the
-                * recovery or rewrite & commit last transaction. For other
-                * error number, revoking was done by filesystem itself.
-                */
-               err = __revoke_inmem_pages(inode, &revoke_list,
-                                               false, true, false);
+                       ret = __replace_atomic_write_block(inode, index, blkaddr,
+                                                       &new->old_addr, false);
+                       if (ret) {
+                               f2fs_put_dnode(&dn);
+                               kmem_cache_free(revoke_entry_slab, new);
+                               goto out;
+                       }
 
-               /* drop all uncommitted pages */
-               __revoke_inmem_pages(inode, &fi->inmem_pages,
-                                               true, false, false);
-       } else {
-               __revoke_inmem_pages(inode, &revoke_list,
-                                               false, false, false);
+                       f2fs_update_data_blkaddr(&dn, NULL_ADDR);
+                       new->index = index;
+                       list_add_tail(&new->list, &revoke_list);
+               }
+               f2fs_put_dnode(&dn);
+next:
+               off += blen;
+               len -= blen;
        }
 
-       return err;
+out:
+       __complete_revoke_list(inode, &revoke_list, ret ? true : false);
+
+       return ret;
 }
 
-int f2fs_commit_inmem_pages(struct inode *inode)
+int f2fs_commit_atomic_write(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct f2fs_inode_info *fi = F2FS_I(inode);
        int err;
 
-       f2fs_balance_fs(sbi, true);
+       err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+       if (err)
+               return err;
 
        f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
-
        f2fs_lock_op(sbi);
-       set_inode_flag(inode, FI_ATOMIC_COMMIT);
-
-       mutex_lock(&fi->inmem_lock);
-       err = __f2fs_commit_inmem_pages(inode);
-       mutex_unlock(&fi->inmem_lock);
 
-       clear_inode_flag(inode, FI_ATOMIC_COMMIT);
+       err = __f2fs_commit_atomic_write(inode);
 
        f2fs_unlock_op(sbi);
        f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
@@ -520,8 +394,15 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
                        io_schedule();
                        finish_wait(&sbi->gc_thread->fggc_wq, &wait);
                } else {
+                       struct f2fs_gc_control gc_control = {
+                               .victim_segno = NULL_SEGNO,
+                               .init_gc_type = BG_GC,
+                               .no_bg_gc = true,
+                               .should_migrate_blocks = false,
+                               .err_gc_skipped = false,
+                               .nr_free_secs = 1 };
                        f2fs_down_write(&sbi->gc_lock);
-                       f2fs_gc(sbi, false, false, false, NULL_SEGNO);
+                       f2fs_gc(sbi, &gc_control);
                }
        }
 }
@@ -1664,33 +1545,32 @@ static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
        struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
        struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
                                        &(dcc->fstrim_list) : &(dcc->wait_list);
-       struct discard_cmd *dc, *tmp;
-       bool need_wait;
+       struct discard_cmd *dc = NULL, *iter, *tmp;
        unsigned int trimmed = 0;
 
 next:
-       need_wait = false;
+       dc = NULL;
 
        mutex_lock(&dcc->cmd_lock);
-       list_for_each_entry_safe(dc, tmp, wait_list, list) {
-               if (dc->lstart + dc->len <= start || end <= dc->lstart)
+       list_for_each_entry_safe(iter, tmp, wait_list, list) {
+               if (iter->lstart + iter->len <= start || end <= iter->lstart)
                        continue;
-               if (dc->len < dpolicy->granularity)
+               if (iter->len < dpolicy->granularity)
                        continue;
-               if (dc->state == D_DONE && !dc->ref) {
-                       wait_for_completion_io(&dc->wait);
-                       if (!dc->error)
-                               trimmed += dc->len;
-                       __remove_discard_cmd(sbi, dc);
+               if (iter->state == D_DONE && !iter->ref) {
+                       wait_for_completion_io(&iter->wait);
+                       if (!iter->error)
+                               trimmed += iter->len;
+                       __remove_discard_cmd(sbi, iter);
                } else {
-                       dc->ref++;
-                       need_wait = true;
+                       iter->ref++;
+                       dc = iter;
                        break;
                }
        }
        mutex_unlock(&dcc->cmd_lock);
 
-       if (need_wait) {
+       if (dc) {
                trimmed += __wait_one_discard_bio(sbi, dc);
                goto next;
        }
@@ -3286,8 +3166,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
                        return CURSEG_COLD_DATA;
                if (file_is_hot(inode) ||
                                is_inode_flag_set(inode, FI_HOT_DATA) ||
-                               f2fs_is_atomic_file(inode) ||
-                               f2fs_is_volatile_file(inode))
+                               f2fs_is_atomic_file(inode))
                        return CURSEG_HOT_DATA;
                return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
        } else {
@@ -4084,10 +3963,12 @@ static void adjust_sit_entry_set(struct sit_entry_set *ses,
                return;
 
        list_for_each_entry_continue(next, head, set_list)
-               if (ses->entry_cnt <= next->entry_cnt)
-                       break;
+               if (ses->entry_cnt <= next->entry_cnt) {
+                       list_move_tail(&ses->set_list, &next->set_list);
+                       return;
+               }
 
-       list_move_tail(&ses->set_list, &next->set_list);
+       list_move_tail(&ses->set_list, head);
 }
 
 static void add_sit_entry(unsigned int segno, struct list_head *head)
@@ -4455,7 +4336,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
        unsigned int i, start, end;
        unsigned int readed, start_blk = 0;
        int err = 0;
-       block_t total_node_blocks = 0;
+       block_t sit_valid_blocks[2] = {0, 0};
 
        do {
                readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
@@ -4480,8 +4361,8 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
                        if (err)
                                return err;
                        seg_info_from_raw_sit(se, &sit);
-                       if (IS_NODESEG(se->type))
-                               total_node_blocks += se->valid_blocks;
+
+                       sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
 
                        if (f2fs_block_unit_discard(sbi)) {
                                /* build discard map only one time */
@@ -4521,15 +4402,15 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
                sit = sit_in_journal(journal, i);
 
                old_valid_blocks = se->valid_blocks;
-               if (IS_NODESEG(se->type))
-                       total_node_blocks -= old_valid_blocks;
+
+               sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
 
                err = check_block_count(sbi, start, &sit);
                if (err)
                        break;
                seg_info_from_raw_sit(se, &sit);
-               if (IS_NODESEG(se->type))
-                       total_node_blocks += se->valid_blocks;
+
+               sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
 
                if (f2fs_block_unit_discard(sbi)) {
                        if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
@@ -4551,13 +4432,24 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
        }
        up_read(&curseg->journal_rwsem);
 
-       if (!err && total_node_blocks != valid_node_count(sbi)) {
+       if (err)
+               return err;
+
+       if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
                f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
-                        total_node_blocks, valid_node_count(sbi));
-               err = -EFSCORRUPTED;
+                        sit_valid_blocks[NODE], valid_node_count(sbi));
+               return -EFSCORRUPTED;
        }
 
-       return err;
+       if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
+                               valid_user_blocks(sbi)) {
+               f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
+                        sit_valid_blocks[DATA], sit_valid_blocks[NODE],
+                        valid_user_blocks(sbi));
+               return -EFSCORRUPTED;
+       }
+
+       return 0;
 }
 
 static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -4637,6 +4529,13 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
        dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
        if (!dirty_i->victim_secmap)
                return -ENOMEM;
+
+       dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
+       if (!dirty_i->pinned_secmap)
+               return -ENOMEM;
+
+       dirty_i->pinned_secmap_cnt = 0;
+       dirty_i->enable_pin_section = true;
        return 0;
 }
 
@@ -5225,6 +5124,7 @@ static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 
+       kvfree(dirty_i->pinned_secmap);
        kvfree(dirty_i->victim_secmap);
 }
 
@@ -5335,9 +5235,9 @@ int __init f2fs_create_segment_manager_caches(void)
        if (!sit_entry_set_slab)
                goto destroy_discard_cmd;
 
-       inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
-                       sizeof(struct inmem_pages));
-       if (!inmem_entry_slab)
+       revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
+                       sizeof(struct revoke_entry));
+       if (!revoke_entry_slab)
                goto destroy_sit_entry_set;
        return 0;
 
@@ -5356,5 +5256,5 @@ void f2fs_destroy_segment_manager_caches(void)
        kmem_cache_destroy(sit_entry_set_slab);
        kmem_cache_destroy(discard_cmd_slab);
        kmem_cache_destroy(discard_entry_slab);
-       kmem_cache_destroy(inmem_entry_slab);
+       kmem_cache_destroy(revoke_entry_slab);
 }
index 5c94caf..3f277df 100644 (file)
@@ -24,6 +24,7 @@
 
 #define IS_DATASEG(t)  ((t) <= CURSEG_COLD_DATA)
 #define IS_NODESEG(t)  ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
+#define SE_PAGETYPE(se)        ((IS_NODESEG((se)->type) ? NODE : DATA))
 
 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
                                                unsigned short seg_type)
@@ -224,10 +225,10 @@ struct segment_allocation {
 
 #define MAX_SKIP_GC_COUNT                      16
 
-struct inmem_pages {
+struct revoke_entry {
        struct list_head list;
-       struct page *page;
        block_t old_addr;               /* for revoking when fail to commit */
+       pgoff_t index;
 };
 
 struct sit_info {
@@ -294,6 +295,9 @@ struct dirty_seglist_info {
        struct mutex seglist_lock;              /* lock for segment bitmaps */
        int nr_dirty[NR_DIRTY_TYPE];            /* # of dirty segments */
        unsigned long *victim_secmap;           /* background GC victims */
+       unsigned long *pinned_secmap;           /* pinned victims from foreground GC */
+       unsigned int pinned_secmap_cnt;         /* count of victims which has pinned data */
+       bool enable_pin_section;                /* enable pinning section */
 };
 
 /* victim selection function for cleaning and SSR */
@@ -572,11 +576,10 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
        return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
 }
 
-static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
+static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+                       unsigned int node_blocks, unsigned int dent_blocks)
 {
-       unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
-                                       get_pages(sbi, F2FS_DIRTY_DENTS);
-       unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+
        unsigned int segno, left_blocks;
        int i;
 
@@ -602,19 +605,28 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
                                        int freed, int needed)
 {
-       int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
-       int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
-       int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+       unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
+                                       get_pages(sbi, F2FS_DIRTY_DENTS) +
+                                       get_pages(sbi, F2FS_DIRTY_IMETA);
+       unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+       unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi);
+       unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi);
+       unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi);
+       unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi);
+       unsigned int free, need_lower, need_upper;
 
        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                return false;
 
-       if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
-                       has_curseg_enough_space(sbi))
+       free = free_sections(sbi) + freed;
+       need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed;
+       need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
+
+       if (free > need_upper)
                return false;
-       return (free_sections(sbi) + freed) <=
-               (node_secs + 2 * dent_secs + imeta_secs +
-               reserved_sections(sbi) + needed);
+       else if (free <= need_lower)
+               return true;
+       return !has_curseg_enough_space(sbi, node_blocks, dent_blocks);
 }
 
 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
index ed3e8b7..37221e9 100644 (file)
@@ -525,10 +525,11 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
                return -EINVAL;
        }
        f2fs_warn(sbi, "Test dummy encryption mode enabled");
+       return 0;
 #else
-       f2fs_warn(sbi, "Test dummy encryption mount option ignored");
+       f2fs_warn(sbi, "test_dummy_encryption option not supported");
+       return -EINVAL;
 #endif
-       return 0;
 }
 
 #ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -1339,9 +1340,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
        spin_lock_init(&fi->i_size_lock);
        INIT_LIST_HEAD(&fi->dirty_list);
        INIT_LIST_HEAD(&fi->gdirty_list);
-       INIT_LIST_HEAD(&fi->inmem_ilist);
-       INIT_LIST_HEAD(&fi->inmem_pages);
-       mutex_init(&fi->inmem_lock);
        init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
        init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
        init_f2fs_rwsem(&fi->i_xattr_sem);
@@ -1382,9 +1380,8 @@ static int f2fs_drop_inode(struct inode *inode)
                        atomic_inc(&inode->i_count);
                        spin_unlock(&inode->i_lock);
 
-                       /* some remained atomic pages should discarded */
                        if (f2fs_is_atomic_file(inode))
-                               f2fs_drop_inmem_pages(inode);
+                               f2fs_abort_atomic_write(inode, true);
 
                        /* should remain fi->extent_tree for writepage */
                        f2fs_destroy_extent_node(inode);
@@ -1707,18 +1704,23 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
        block_t total_count, user_block_count, start_count;
        u64 avail_node_count;
+       unsigned int total_valid_node_count;
 
        total_count = le64_to_cpu(sbi->raw_super->block_count);
-       user_block_count = sbi->user_block_count;
        start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
        buf->f_type = F2FS_SUPER_MAGIC;
        buf->f_bsize = sbi->blocksize;
 
        buf->f_blocks = total_count - start_count;
+
+       spin_lock(&sbi->stat_lock);
+
+       user_block_count = sbi->user_block_count;
+       total_valid_node_count = valid_node_count(sbi);
+       avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
        buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
                                                sbi->current_reserved_blocks;
 
-       spin_lock(&sbi->stat_lock);
        if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
                buf->f_bfree = 0;
        else
@@ -1731,14 +1733,12 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
        else
                buf->f_bavail = 0;
 
-       avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
-
        if (avail_node_count > user_block_count) {
                buf->f_files = user_block_count;
                buf->f_ffree = buf->f_bavail;
        } else {
                buf->f_files = avail_node_count;
-               buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
+               buf->f_ffree = min(avail_node_count - total_valid_node_count,
                                        buf->f_bavail);
        }
 
@@ -2055,7 +2055,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
 {
        unsigned int s_flags = sbi->sb->s_flags;
        struct cp_control cpc;
-       unsigned int gc_mode;
+       unsigned int gc_mode = sbi->gc_mode;
        int err = 0;
        int ret;
        block_t unusable;
@@ -2066,14 +2066,25 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
        }
        sbi->sb->s_flags |= SB_ACTIVE;
 
+       /* check if we need more GC first */
+       unusable = f2fs_get_unusable_blocks(sbi);
+       if (!f2fs_disable_cp_again(sbi, unusable))
+               goto skip_gc;
+
        f2fs_update_time(sbi, DISABLE_TIME);
 
-       gc_mode = sbi->gc_mode;
        sbi->gc_mode = GC_URGENT_HIGH;
 
        while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+               struct f2fs_gc_control gc_control = {
+                       .victim_segno = NULL_SEGNO,
+                       .init_gc_type = FG_GC,
+                       .should_migrate_blocks = false,
+                       .err_gc_skipped = true,
+                       .nr_free_secs = 1 };
+
                f2fs_down_write(&sbi->gc_lock);
-               err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
+               err = f2fs_gc(sbi, &gc_control);
                if (err == -ENODATA) {
                        err = 0;
                        break;
@@ -2094,6 +2105,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
                goto restore_flag;
        }
 
+skip_gc:
        f2fs_down_write(&sbi->gc_lock);
        cpc.reason = CP_PAUSE;
        set_sbi_flag(sbi, SBI_CP_DISABLED);
@@ -2684,7 +2696,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
                if (!sb_has_quota_active(sb, cnt))
                        continue;
 
-               inode_lock(dqopt->files[cnt]);
+               if (!f2fs_sb_has_quota_ino(sbi))
+                       inode_lock(dqopt->files[cnt]);
 
                /*
                 * do_quotactl
@@ -2703,7 +2716,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
                f2fs_up_read(&sbi->quota_sem);
                f2fs_unlock_op(sbi);
 
-               inode_unlock(dqopt->files[cnt]);
+               if (!f2fs_sb_has_quota_ino(sbi))
+                       inode_unlock(dqopt->files[cnt]);
 
                if (ret)
                        break;
@@ -3648,22 +3662,29 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
        struct block_device *bdev = FDEV(devi).bdev;
        sector_t nr_sectors = bdev_nr_sectors(bdev);
        struct f2fs_report_zones_args rep_zone_arg;
+       u64 zone_sectors;
        int ret;
 
        if (!f2fs_sb_has_blkzoned(sbi))
                return 0;
 
+       zone_sectors = bdev_zone_sectors(bdev);
+       if (!is_power_of_2(zone_sectors)) {
+               f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
+               return -EINVAL;
+       }
+
        if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
-                               SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
+                               SECTOR_TO_BLOCK(zone_sectors))
                return -EINVAL;
-       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
+       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
        if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
                                __ilog2_u32(sbi->blocks_per_blkz))
                return -EINVAL;
        sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
        FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
                                        sbi->log_blocks_per_blkz;
-       if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
+       if (nr_sectors & (zone_sectors - 1))
                FDEV(devi).nr_blkz++;
 
        FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
@@ -4070,30 +4091,9 @@ try_onemore:
        set_sbi_flag(sbi, SBI_POR_DOING);
        spin_lock_init(&sbi->stat_lock);
 
-       for (i = 0; i < NR_PAGE_TYPE; i++) {
-               int n = (i == META) ? 1 : NR_TEMP_TYPE;
-               int j;
-
-               sbi->write_io[i] =
-                       f2fs_kmalloc(sbi,
-                                    array_size(n,
-                                               sizeof(struct f2fs_bio_info)),
-                                    GFP_KERNEL);
-               if (!sbi->write_io[i]) {
-                       err = -ENOMEM;
-                       goto free_bio_info;
-               }
-
-               for (j = HOT; j < n; j++) {
-                       init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
-                       sbi->write_io[i][j].sbi = sbi;
-                       sbi->write_io[i][j].bio = NULL;
-                       spin_lock_init(&sbi->write_io[i][j].io_lock);
-                       INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
-                       INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
-                       init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
-               }
-       }
+       err = f2fs_init_write_merge_io(sbi);
+       if (err)
+               goto free_bio_info;
 
        init_f2fs_rwsem(&sbi->cp_rwsem);
        init_f2fs_rwsem(&sbi->quota_sem);
index 65395ae..7b8f2b4 100644 (file)
@@ -129,7 +129,7 @@ static int f2fs_begin_enable_verity(struct file *filp)
        if (f2fs_verity_in_progress(inode))
                return -EBUSY;
 
-       if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
+       if (f2fs_is_atomic_file(inode))
                return -EOPNOTSUPP;
 
        /*
index ee93173..3bcc1ec 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -630,32 +630,23 @@ EXPORT_SYMBOL(fd_install);
  * @files: file struct to retrieve file from
  * @fd: file descriptor to retrieve file for
  *
- * If this functions returns an EINVAL error pointer the fd was beyond the
- * current maximum number of file descriptors for that fdtable.
+ * Context: files_lock must be held.
  *
- * Returns: The file associated with @fd, on error returns an error pointer.
+ * Returns: The file associated with @fd (NULL if @fd is not open)
  */
 static struct file *pick_file(struct files_struct *files, unsigned fd)
 {
+       struct fdtable *fdt = files_fdtable(files);
        struct file *file;
-       struct fdtable *fdt;
 
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       if (fd >= fdt->max_fds) {
-               file = ERR_PTR(-EINVAL);
-               goto out_unlock;
-       }
+       if (fd >= fdt->max_fds)
+               return NULL;
+
        file = fdt->fd[fd];
-       if (!file) {
-               file = ERR_PTR(-EBADF);
-               goto out_unlock;
+       if (file) {
+               rcu_assign_pointer(fdt->fd[fd], NULL);
+               __put_unused_fd(files, fd);
        }
-       rcu_assign_pointer(fdt->fd[fd], NULL);
-       __put_unused_fd(files, fd);
-
-out_unlock:
-       spin_unlock(&files->file_lock);
        return file;
 }
 
@@ -664,8 +655,10 @@ int close_fd(unsigned fd)
        struct files_struct *files = current->files;
        struct file *file;
 
+       spin_lock(&files->file_lock);
        file = pick_file(files, fd);
-       if (IS_ERR(file))
+       spin_unlock(&files->file_lock);
+       if (!file)
                return -EBADF;
 
        return filp_close(file, files);
@@ -702,20 +695,25 @@ static inline void __range_cloexec(struct files_struct *cur_fds,
 static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
                                 unsigned int max_fd)
 {
+       unsigned n;
+
+       rcu_read_lock();
+       n = last_fd(files_fdtable(cur_fds));
+       rcu_read_unlock();
+       max_fd = min(max_fd, n);
+
        while (fd <= max_fd) {
                struct file *file;
 
+               spin_lock(&cur_fds->file_lock);
                file = pick_file(cur_fds, fd++);
-               if (!IS_ERR(file)) {
+               spin_unlock(&cur_fds->file_lock);
+
+               if (file) {
                        /* found a valid file to close */
                        filp_close(file, cur_fds);
                        cond_resched();
-                       continue;
                }
-
-               /* beyond the last fd in that table */
-               if (PTR_ERR(file) == -EINVAL)
-                       return;
        }
 }
 
@@ -795,43 +793,25 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
  * See close_fd_get_file() below, this variant assumes current->files->file_lock
  * is held.
  */
-int __close_fd_get_file(unsigned int fd, struct file **res)
+struct file *__close_fd_get_file(unsigned int fd)
 {
-       struct files_struct *files = current->files;
-       struct file *file;
-       struct fdtable *fdt;
-
-       fdt = files_fdtable(files);
-       if (fd >= fdt->max_fds)
-               goto out_err;
-       file = fdt->fd[fd];
-       if (!file)
-               goto out_err;
-       rcu_assign_pointer(fdt->fd[fd], NULL);
-       __put_unused_fd(files, fd);
-       get_file(file);
-       *res = file;
-       return 0;
-out_err:
-       *res = NULL;
-       return -ENOENT;
+       return pick_file(current->files, fd);
 }
 
 /*
  * variant of close_fd that gets a ref on the file for later fput.
- * The caller must ensure that filp_close() called on the file, and then
- * an fput().
+ * The caller must ensure that filp_close() called on the file.
  */
-int close_fd_get_file(unsigned int fd, struct file **res)
+struct file *close_fd_get_file(unsigned int fd)
 {
        struct files_struct *files = current->files;
-       int ret;
+       struct file *file;
 
        spin_lock(&files->file_lock);
-       ret = __close_fd_get_file(fd, res);
+       file = pick_file(files, fd);
        spin_unlock(&files->file_lock);
 
-       return ret;
+       return file;
 }
 
 void do_close_on_exec(struct files_struct *files)
@@ -871,7 +851,7 @@ void do_close_on_exec(struct files_struct *files)
 }
 
 static inline struct file *__fget_files_rcu(struct files_struct *files,
-       unsigned int fd, fmode_t mask, unsigned int refs)
+       unsigned int fd, fmode_t mask)
 {
        for (;;) {
                struct file *file;
@@ -897,10 +877,9 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
                 * Such a race can take two forms:
                 *
                 *  (a) the file ref already went down to zero,
-                *      and get_file_rcu_many() fails. Just try
-                *      again:
+                *      and get_file_rcu() fails. Just try again:
                 */
-               if (unlikely(!get_file_rcu_many(file, refs)))
+               if (unlikely(!get_file_rcu(file)))
                        continue;
 
                /*
@@ -909,11 +888,11 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
                 *       pointer having changed, because it always goes
                 *       hand-in-hand with 'fdt'.
                 *
-                * If so, we need to put our refs and try again.
+                * If so, we need to put our ref and try again.
                 */
                if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
                    unlikely(rcu_dereference_raw(*fdentry) != file)) {
-                       fput_many(file, refs);
+                       fput(file);
                        continue;
                }
 
@@ -926,37 +905,31 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
 }
 
 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
-                                fmode_t mask, unsigned int refs)
+                                fmode_t mask)
 {
        struct file *file;
 
        rcu_read_lock();
-       file = __fget_files_rcu(files, fd, mask, refs);
+       file = __fget_files_rcu(files, fd, mask);
        rcu_read_unlock();
 
        return file;
 }
 
-static inline struct file *__fget(unsigned int fd, fmode_t mask,
-                                 unsigned int refs)
-{
-       return __fget_files(current->files, fd, mask, refs);
-}
-
-struct file *fget_many(unsigned int fd, unsigned int refs)
+static inline struct file *__fget(unsigned int fd, fmode_t mask)
 {
-       return __fget(fd, FMODE_PATH, refs);
+       return __fget_files(current->files, fd, mask);
 }
 
 struct file *fget(unsigned int fd)
 {
-       return __fget(fd, FMODE_PATH, 1);
+       return __fget(fd, FMODE_PATH);
 }
 EXPORT_SYMBOL(fget);
 
 struct file *fget_raw(unsigned int fd)
 {
-       return __fget(fd, 0, 1);
+       return __fget(fd, 0);
 }
 EXPORT_SYMBOL(fget_raw);
 
@@ -966,7 +939,7 @@ struct file *fget_task(struct task_struct *task, unsigned int fd)
 
        task_lock(task);
        if (task->files)
-               file = __fget_files(task->files, fd, 0, 1);
+               file = __fget_files(task->files, fd, 0);
        task_unlock(task);
 
        return file;
@@ -1035,7 +1008,7 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
                        return 0;
                return (unsigned long)file;
        } else {
-               file = __fget(fd, mask, 1);
+               file = __fget(fd, mask);
                if (!file)
                        return 0;
                return FDPUT_FPUT | (unsigned long)file;
index ada8fe8..5424e3a 100644 (file)
@@ -368,9 +368,9 @@ EXPORT_SYMBOL_GPL(flush_delayed_fput);
 
 static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
 
-void fput_many(struct file *file, unsigned int refs)
+void fput(struct file *file)
 {
-       if (atomic_long_sub_and_test(refs, &file->f_count)) {
+       if (atomic_long_dec_and_test(&file->f_count)) {
                struct task_struct *task = current;
 
                if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
@@ -389,11 +389,6 @@ void fput_many(struct file *file, unsigned int refs)
        }
 }
 
-void fput(struct file *file)
-{
-       fput_many(file, 1);
-}
-
 /*
  * synchronous analog of fput(); for kernel threads that might be needed
  * in some umount() (and thus can't use flush_delayed_fput() without
index a41ea0b..bffd156 100644 (file)
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
  * Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
  */
 #ifndef _VXFS_SUPER_H_
 #define _VXFS_SUPER_H_
index 1fd41cf..de2a5bc 100644 (file)
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /*
index acc5477..fbcd603 100644 (file)
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
  */
 #ifndef _VXFS_DIR_H_
 #define _VXFS_DIR_H_
index f5c428e..3a2180c 100644 (file)
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
  */
 #ifndef _VXFS_EXTERN_H_
 #define _VXFS_EXTERN_H_
index a4610a7..c1174a3 100644 (file)
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
  * Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /*
index e026f0c..dfd2147 100644 (file)
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
  * Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
  */
 #ifndef _VXFS_FSHEAD_H_
 #define _VXFS_FSHEAD_H_
index a37431e..c2ef9f0 100644 (file)
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /*
index 1f41b25..ceb6a12 100644 (file)
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
  * Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /*
index f012abe..1e9e138 100644 (file)
@@ -1,32 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
  * Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
  */
 #ifndef _VXFS_INODE_H_
 #define _VXFS_INODE_H_
index a514256..f04ba2e 100644 (file)
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
  * Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /*
index 813da66..23f3518 100644 (file)
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /* 
index 0c0b0c9..53afba0 100644 (file)
@@ -1,31 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
  */
 #ifndef _VXFS_OLT_H_
 #define _VXFS_OLT_H_
index 6143eba..0e633d2 100644 (file)
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /*
index 22eed5a..c3b82f7 100644 (file)
@@ -1,31 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2000-2001 Christoph Hellwig.
  * Copyright (c) 2016 Krzysztof Blaszkowski
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL").
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
  */
 
 /*
index 27a890a..fc9d2d9 100644 (file)
@@ -119,7 +119,7 @@ SYSCALL_DEFINE2(fsopen, const char __user *, _fs_name, unsigned int, flags)
        const char *fs_name;
        int ret;
 
-       if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
+       if (!may_mount())
                return -EPERM;
 
        if (flags & ~FSOPEN_CLOEXEC)
@@ -162,7 +162,7 @@ SYSCALL_DEFINE3(fspick, int, dfd, const char __user *, path, unsigned int, flags
        unsigned int lookup_flags;
        int ret;
 
-       if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
+       if (!may_mount())
                return -EPERM;
 
        if ((flags & ~(FSPICK_CLOEXEC |
index 9a6c233..87e96b9 100644 (file)
@@ -84,6 +84,7 @@ extern int __mnt_want_write_file(struct file *);
 extern void __mnt_drop_write_file(struct file *);
 
 extern void dissolve_on_fput(struct vfsmount *);
+extern bool may_mount(void);
 
 int path_mount(const char *dev_name, struct path *path,
                const char *type_page, unsigned long flags, void *data_page);
@@ -125,7 +126,7 @@ extern struct file *do_file_open_root(const struct path *,
                const char *, const struct open_flags *);
 extern struct open_how build_open_how(int flags, umode_t mode);
 extern int build_open_flags(const struct open_how *how, struct open_flags *op);
-extern int __close_fd_get_file(unsigned int fd, struct file **res);
+extern struct file *__close_fd_get_file(unsigned int fd);
 
 long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
 int chmod_common(const struct path *path, umode_t mode);
index 9f1c682..3aab418 100644 (file)
                        IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
 
 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
-                               REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
+                               REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
+                               REQ_F_ASYNC_DATA)
 
 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
                                 IO_REQ_CLEAN_FLAGS)
@@ -540,6 +541,7 @@ struct io_uring_task {
        const struct io_ring_ctx *last;
        struct io_wq            *io_wq;
        struct percpu_counter   inflight;
+       atomic_t                inflight_tracked;
        atomic_t                in_idle;
 
        spinlock_t              task_lock;
@@ -574,6 +576,7 @@ struct io_close {
        struct file                     *file;
        int                             fd;
        u32                             file_slot;
+       u32                             flags;
 };
 
 struct io_timeout_data {
@@ -1355,8 +1358,6 @@ static void io_clean_op(struct io_kiocb *req);
 static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
                                             unsigned issue_flags);
 static struct file *io_file_get_normal(struct io_kiocb *req, int fd);
-static void io_drop_inflight_file(struct io_kiocb *req);
-static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
 static void io_queue_sqe(struct io_kiocb *req);
 static void io_rsrc_put_work(struct work_struct *work);
 
@@ -1366,7 +1367,9 @@ static int io_req_prep_async(struct io_kiocb *req);
 
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index);
-static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
+static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
+                           unsigned int offset);
+static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
 
 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
 static void io_eventfd_signal(struct io_ring_ctx *ctx);
@@ -1757,9 +1760,29 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
                          bool cancel_all)
        __must_hold(&req->ctx->timeout_lock)
 {
+       struct io_kiocb *req;
+
        if (task && head->task != task)
                return false;
-       return cancel_all;
+       if (cancel_all)
+               return true;
+
+       io_for_each_link(req, head) {
+               if (req->flags & REQ_F_INFLIGHT)
+                       return true;
+       }
+       return false;
+}
+
+static bool io_match_linked(struct io_kiocb *head)
+{
+       struct io_kiocb *req;
+
+       io_for_each_link(req, head) {
+               if (req->flags & REQ_F_INFLIGHT)
+                       return true;
+       }
+       return false;
 }
 
 /*
@@ -1769,9 +1792,24 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
 static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
                               bool cancel_all)
 {
+       bool matched;
+
        if (task && head->task != task)
                return false;
-       return cancel_all;
+       if (cancel_all)
+               return true;
+
+       if (head->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = head->ctx;
+
+               /* protect against races with linked timeouts */
+               spin_lock_irq(&ctx->timeout_lock);
+               matched = io_match_linked(head);
+               spin_unlock_irq(&ctx->timeout_lock);
+       } else {
+               matched = io_match_linked(head);
+       }
+       return matched;
 }
 
 static inline bool req_has_async_data(struct io_kiocb *req)
@@ -1927,6 +1965,14 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
        return req->flags & REQ_F_FIXED_FILE;
 }
 
+static inline void io_req_track_inflight(struct io_kiocb *req)
+{
+       if (!(req->flags & REQ_F_INFLIGHT)) {
+               req->flags |= REQ_F_INFLIGHT;
+               atomic_inc(&current->io_uring->inflight_tracked);
+       }
+}
+
 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
 {
        if (WARN_ON_ONCE(!req->link))
@@ -2988,8 +3034,6 @@ static void __io_req_task_work_add(struct io_kiocb *req,
        unsigned long flags;
        bool running;
 
-       io_drop_inflight_file(req);
-
        spin_lock_irqsave(&tctx->task_lock, flags);
        wq_list_add_tail(&req->io_task_work.node, list);
        running = tctx->task_running;
@@ -4176,6 +4220,16 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        return 0;
 }
 
+static int io_readv_prep_async(struct io_kiocb *req)
+{
+       return io_rw_prep_async(req, READ);
+}
+
+static int io_writev_prep_async(struct io_kiocb *req)
+{
+       return io_rw_prep_async(req, WRITE);
+}
+
 /*
  * This is our waitqueue callback handler, registered through __folio_lock_async()
  * when we initially tried to do the IO with the iocb armed our waitqueue.
@@ -5103,42 +5157,6 @@ static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
        return 0;
 }
 
-static int io_shutdown_prep(struct io_kiocb *req,
-                           const struct io_uring_sqe *sqe)
-{
-#if defined(CONFIG_NET)
-       if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
-                    sqe->buf_index || sqe->splice_fd_in))
-               return -EINVAL;
-
-       req->shutdown.how = READ_ONCE(sqe->len);
-       return 0;
-#else
-       return -EOPNOTSUPP;
-#endif
-}
-
-static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
-{
-#if defined(CONFIG_NET)
-       struct socket *sock;
-       int ret;
-
-       if (issue_flags & IO_URING_F_NONBLOCK)
-               return -EAGAIN;
-
-       sock = sock_from_file(req->file);
-       if (unlikely(!sock))
-               return -ENOTSOCK;
-
-       ret = __sys_shutdown_sock(sock, req->shutdown.how);
-       io_req_complete(req, ret);
-       return 0;
-#else
-       return -EOPNOTSUPP;
-#endif
-}
-
 static int __io_splice_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
@@ -5445,15 +5463,11 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
        unsigned long nr = ctx->nr_user_files;
        int ret;
 
-       if (table->alloc_hint >= nr)
-               table->alloc_hint = 0;
-
        do {
                ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
-               if (ret != nr) {
-                       table->alloc_hint = ret + 1;
+               if (ret != nr)
                        return ret;
-               }
+
                if (!table->alloc_hint)
                        break;
 
@@ -5464,6 +5478,10 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
        return -ENFILE;
 }
 
+/*
+ * Note when io_fixed_fd_install() returns error value, it will ensure
+ * fput() is called correspondingly.
+ */
 static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
                               struct file *file, unsigned int file_slot)
 {
@@ -5471,26 +5489,24 @@ static int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
+       io_ring_submit_lock(ctx, issue_flags);
+
        if (alloc_slot) {
-               io_ring_submit_lock(ctx, issue_flags);
                ret = io_file_bitmap_get(ctx);
-               if (unlikely(ret < 0)) {
-                       io_ring_submit_unlock(ctx, issue_flags);
-                       return ret;
-               }
-
+               if (unlikely(ret < 0))
+                       goto err;
                file_slot = ret;
        } else {
                file_slot--;
        }
 
        ret = io_install_fixed_file(req, file, issue_flags, file_slot);
-       if (alloc_slot) {
-               io_ring_submit_unlock(ctx, issue_flags);
-               if (!ret)
-                       return file_slot;
-       }
-
+       if (!ret && alloc_slot)
+               ret = file_slot;
+err:
+       io_ring_submit_unlock(ctx, issue_flags);
+       if (unlikely(ret < 0))
+               fput(file);
        return ret;
 }
 
@@ -5972,14 +5988,18 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
 
 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
+       if (sqe->off || sqe->addr || sqe->len || sqe->buf_index)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
 
        req->close.fd = READ_ONCE(sqe->fd);
        req->close.file_slot = READ_ONCE(sqe->file_index);
-       if (req->close.file_slot && req->close.fd)
+       req->close.flags = READ_ONCE(sqe->close_flags);
+       if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT)
+               return -EINVAL;
+       if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) &&
+           req->close.file_slot && req->close.fd)
                return -EINVAL;
 
        return 0;
@@ -5990,12 +6010,13 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
        struct files_struct *files = current->files;
        struct io_close *close = &req->close;
        struct fdtable *fdt;
-       struct file *file = NULL;
+       struct file *file;
        int ret = -EBADF;
 
        if (req->close.file_slot) {
                ret = io_close_fixed(req, issue_flags);
-               goto err;
+               if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT))
+                       goto err;
        }
 
        spin_lock(&files->file_lock);
@@ -6008,7 +6029,6 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
                        lockdep_is_held(&files->file_lock));
        if (!file || file->f_op == &io_uring_fops) {
                spin_unlock(&files->file_lock);
-               file = NULL;
                goto err;
        }
 
@@ -6018,21 +6038,16 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
                return -EAGAIN;
        }
 
-       ret = __close_fd_get_file(close->fd, &file);
+       file = __close_fd_get_file(close->fd);
        spin_unlock(&files->file_lock);
-       if (ret < 0) {
-               if (ret == -ENOENT)
-                       ret = -EBADF;
+       if (!file)
                goto err;
-       }
 
        /* No ->flush() or already async, safely close from here */
        ret = filp_close(file, current->files);
 err:
        if (ret < 0)
                req_set_fail(req);
-       if (file)
-               fput(file);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
 }
@@ -6063,6 +6078,34 @@ static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
 }
 
 #if defined(CONFIG_NET)
+static int io_shutdown_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
+                    sqe->buf_index || sqe->splice_fd_in))
+               return -EINVAL;
+
+       req->shutdown.how = READ_ONCE(sqe->len);
+       return 0;
+}
+
+static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
+{
+       struct socket *sock;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       sock = sock_from_file(req->file);
+       if (unlikely(!sock))
+               return -ENOTSOCK;
+
+       ret = __sys_shutdown_sock(sock, req->shutdown.how);
+       io_req_complete(req, ret);
+       return 0;
+}
+
 static bool io_net_retry(struct socket *sock, int flags)
 {
        if (!(flags & MSG_WAITALL))
@@ -6674,8 +6717,8 @@ static int io_socket(struct io_kiocb *req, unsigned int issue_flags)
                fd_install(fd, file);
                ret = fd;
        } else {
-               ret = io_install_fixed_file(req, file, issue_flags,
-                                           sock->file_slot - 1);
+               ret = io_fixed_fd_install(req, issue_flags, file,
+                                           sock->file_slot);
        }
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
@@ -6767,6 +6810,7 @@ IO_NETOP_PREP_ASYNC(recvmsg);
 IO_NETOP_PREP_ASYNC(connect);
 IO_NETOP_PREP(accept);
 IO_NETOP_PREP(socket);
+IO_NETOP_PREP(shutdown);
 IO_NETOP_FN(send);
 IO_NETOP_FN(recv);
 #endif /* CONFIG_NET */
@@ -6905,10 +6949,6 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
 
                if (!req->cqe.res) {
                        struct poll_table_struct pt = { ._key = req->apoll_events };
-                       unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
-
-                       if (unlikely(!io_assign_file(req, flags)))
-                               return -EBADF;
                        req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
                }
 
@@ -7390,7 +7430,7 @@ static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
        return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
 }
 
-static int io_poll_update_prep(struct io_kiocb *req,
+static int io_poll_remove_prep(struct io_kiocb *req,
                               const struct io_uring_sqe *sqe)
 {
        struct io_poll_update *upd = &req->poll_update;
@@ -7454,7 +7494,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
        return 0;
 }
 
-static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
+static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_cancel_data cd = { .data = req->poll_update.old_user_data, };
        struct io_ring_ctx *ctx = req->ctx;
@@ -7698,8 +7738,9 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
        return 0;
 }
 
-static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                          bool is_timeout_link)
+static int __io_timeout_prep(struct io_kiocb *req,
+                            const struct io_uring_sqe *sqe,
+                            bool is_timeout_link)
 {
        struct io_timeout_data *data;
        unsigned flags;
@@ -7754,6 +7795,18 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        return 0;
 }
 
+static int io_timeout_prep(struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
+{
+       return __io_timeout_prep(req, sqe, false);
+}
+
+static int io_link_timeout_prep(struct io_kiocb *req,
+                               const struct io_uring_sqe *sqe)
+{
+       return __io_timeout_prep(req, sqe, true);
+}
+
 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -7970,7 +8023,7 @@ done:
        return 0;
 }
 
-static int io_rsrc_update_prep(struct io_kiocb *req,
+static int io_files_update_prep(struct io_kiocb *req,
                                const struct io_uring_sqe *sqe)
 {
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
@@ -7986,6 +8039,41 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
        return 0;
 }
 
+static int io_files_update_with_index_alloc(struct io_kiocb *req,
+                                           unsigned int issue_flags)
+{
+       __s32 __user *fds = u64_to_user_ptr(req->rsrc_update.arg);
+       unsigned int done;
+       struct file *file;
+       int ret, fd;
+
+       for (done = 0; done < req->rsrc_update.nr_args; done++) {
+               if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               file = fget(fd);
+               if (!file) {
+                       ret = -EBADF;
+                       break;
+               }
+               ret = io_fixed_fd_install(req, issue_flags, file,
+                                         IORING_FILE_INDEX_ALLOC);
+               if (ret < 0)
+                       break;
+               if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
+                       ret = -EFAULT;
+                       __io_close_fixed(req, issue_flags, ret);
+                       break;
+               }
+       }
+
+       if (done)
+               return done;
+       return ret;
+}
+
 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -7999,10 +8087,14 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
        up.resv = 0;
        up.resv2 = 0;
 
-       io_ring_submit_lock(ctx, issue_flags);
-       ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
-                                       &up, req->rsrc_update.nr_args);
-       io_ring_submit_unlock(ctx, issue_flags);
+       if (req->rsrc_update.offset == IORING_FILE_INDEX_ALLOC) {
+               ret = io_files_update_with_index_alloc(req, issue_flags);
+       } else {
+               io_ring_submit_lock(ctx, issue_flags);
+               ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
+                               &up, req->rsrc_update.nr_args);
+               io_ring_submit_unlock(ctx, issue_flags);
+       }
 
        if (ret < 0)
                req_set_fail(req);
@@ -8025,7 +8117,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        case IORING_OP_POLL_ADD:
                return io_poll_add_prep(req, sqe);
        case IORING_OP_POLL_REMOVE:
-               return io_poll_update_prep(req, sqe);
+               return io_poll_remove_prep(req, sqe);
        case IORING_OP_FSYNC:
                return io_fsync_prep(req, sqe);
        case IORING_OP_SYNC_FILE_RANGE:
@@ -8039,13 +8131,13 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        case IORING_OP_CONNECT:
                return io_connect_prep(req, sqe);
        case IORING_OP_TIMEOUT:
-               return io_timeout_prep(req, sqe, false);
+               return io_timeout_prep(req, sqe);
        case IORING_OP_TIMEOUT_REMOVE:
                return io_timeout_remove_prep(req, sqe);
        case IORING_OP_ASYNC_CANCEL:
                return io_async_cancel_prep(req, sqe);
        case IORING_OP_LINK_TIMEOUT:
-               return io_timeout_prep(req, sqe, true);
+               return io_link_timeout_prep(req, sqe);
        case IORING_OP_ACCEPT:
                return io_accept_prep(req, sqe);
        case IORING_OP_FALLOCATE:
@@ -8055,7 +8147,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        case IORING_OP_CLOSE:
                return io_close_prep(req, sqe);
        case IORING_OP_FILES_UPDATE:
-               return io_rsrc_update_prep(req, sqe);
+               return io_files_update_prep(req, sqe);
        case IORING_OP_STATX:
                return io_statx_prep(req, sqe);
        case IORING_OP_FADVISE:
@@ -8123,9 +8215,9 @@ static int io_req_prep_async(struct io_kiocb *req)
 
        switch (req->opcode) {
        case IORING_OP_READV:
-               return io_rw_prep_async(req, READ);
+               return io_readv_prep_async(req);
        case IORING_OP_WRITEV:
-               return io_rw_prep_async(req, WRITE);
+               return io_writev_prep_async(req);
        case IORING_OP_SENDMSG:
                return io_sendmsg_prep_async(req);
        case IORING_OP_RECVMSG:
@@ -8264,6 +8356,11 @@ static void io_clean_op(struct io_kiocb *req)
                kfree(req->apoll);
                req->apoll = NULL;
        }
+       if (req->flags & REQ_F_INFLIGHT) {
+               struct io_uring_task *tctx = req->task->io_uring;
+
+               atomic_dec(&tctx->inflight_tracked);
+       }
        if (req->flags & REQ_F_CREDS)
                put_cred(req->creds);
        if (req->flags & REQ_F_ASYNC_DATA) {
@@ -8288,6 +8385,7 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
 
 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 {
+       const struct io_op_def *def = &io_op_defs[req->opcode];
        const struct cred *creds = NULL;
        int ret;
 
@@ -8297,7 +8395,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
        if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
                creds = override_creds(req->creds);
 
-       if (!io_op_defs[req->opcode].audit_skip)
+       if (!def->audit_skip)
                audit_uring_entry(req->opcode);
 
        switch (req->opcode) {
@@ -8321,7 +8419,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
                ret = io_poll_add(req, issue_flags);
                break;
        case IORING_OP_POLL_REMOVE:
-               ret = io_poll_update(req, issue_flags);
+               ret = io_poll_remove(req, issue_flags);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
                ret = io_sync_file_range(req, issue_flags);
@@ -8436,7 +8534,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
                break;
        }
 
-       if (!io_op_defs[req->opcode].audit_skip)
+       if (!def->audit_skip)
                audit_uring_exit(!ret, ret);
 
        if (creds)
@@ -8569,19 +8667,6 @@ out:
        return file;
 }
 
-/*
- * Drop the file for requeue operations. Only used of req->file is the
- * io_uring descriptor itself.
- */
-static void io_drop_inflight_file(struct io_kiocb *req)
-{
-       if (unlikely(req->flags & REQ_F_INFLIGHT)) {
-               fput(req->file);
-               req->file = NULL;
-               req->flags &= ~REQ_F_INFLIGHT;
-       }
-}
-
 static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
 {
        struct file *file = fget(fd);
@@ -8590,7 +8675,7 @@ static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
 
        /* we don't allow fixed io_uring files */
        if (file && file->f_op == &io_uring_fops)
-               req->flags |= REQ_F_INFLIGHT;
+               io_req_track_inflight(req);
        return file;
 }
 
@@ -8788,6 +8873,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                       const struct io_uring_sqe *sqe)
        __must_hold(&ctx->uring_lock)
 {
+       const struct io_op_def *def;
        unsigned int sqe_flags;
        int personality;
        u8 opcode;
@@ -8805,12 +8891,13 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                req->opcode = 0;
                return -EINVAL;
        }
+       def = &io_op_defs[opcode];
        if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
                /* enforce forwards compatibility on users */
                if (sqe_flags & ~SQE_VALID_FLAGS)
                        return -EINVAL;
                if (sqe_flags & IOSQE_BUFFER_SELECT) {
-                       if (!io_op_defs[opcode].buffer_select)
+                       if (!def->buffer_select)
                                return -EOPNOTSUPP;
                        req->buf_index = READ_ONCE(sqe->buf_group);
                }
@@ -8836,12 +8923,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                }
        }
 
-       if (!io_op_defs[opcode].ioprio && sqe->ioprio)
+       if (!def->ioprio && sqe->ioprio)
                return -EINVAL;
-       if (!io_op_defs[opcode].iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
+       if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
-       if (io_op_defs[opcode].needs_file) {
+       if (def->needs_file) {
                struct io_submit_state *state = &ctx->submit_state;
 
                req->cqe.fd = READ_ONCE(sqe->fd);
@@ -8850,7 +8937,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                 * Plug now if we have more than 2 IO left after this, and the
                 * target is potentially a read/write to block based storage.
                 */
-               if (state->need_plug && io_op_defs[opcode].plug) {
+               if (state->need_plug && def->plug) {
                        state->plug_started = true;
                        state->need_plug = false;
                        blk_start_plug_nr_ios(&state->plug, state->submit_nr);
@@ -9658,8 +9745,7 @@ static inline void io_file_bitmap_set(struct io_file_table *table, int bit)
 {
        WARN_ON_ONCE(test_bit(bit, table->bitmap));
        __set_bit(bit, table->bitmap);
-       if (bit == table->alloc_hint)
-               table->alloc_hint++;
+       table->alloc_hint = bit + 1;
 }
 
 static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
@@ -10113,21 +10199,19 @@ static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
 
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index)
+       __must_hold(&req->ctx->uring_lock)
 {
        struct io_ring_ctx *ctx = req->ctx;
        bool needs_switch = false;
        struct io_fixed_file *file_slot;
-       int ret = -EBADF;
+       int ret;
 
-       io_ring_submit_lock(ctx, issue_flags);
        if (file->f_op == &io_uring_fops)
-               goto err;
-       ret = -ENXIO;
+               return -EBADF;
        if (!ctx->file_data)
-               goto err;
-       ret = -EINVAL;
+               return -ENXIO;
        if (slot_index >= ctx->nr_user_files)
-               goto err;
+               return -EINVAL;
 
        slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
        file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
@@ -10158,15 +10242,14 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 err:
        if (needs_switch)
                io_rsrc_node_switch(ctx, ctx->file_data);
-       io_ring_submit_unlock(ctx, issue_flags);
        if (ret)
                fput(file);
        return ret;
 }
 
-static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
+static int __io_close_fixed(struct io_kiocb *req, unsigned int issue_flags,
+                           unsigned int offset)
 {
-       unsigned int offset = req->close.file_slot - 1;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_fixed_file *file_slot;
        struct file *file;
@@ -10203,6 +10286,11 @@ out:
        return ret;
 }
 
+static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
+{
+       return __io_close_fixed(req, issue_flags, req->close.file_slot - 1);
+}
+
 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                 struct io_uring_rsrc_update2 *up,
                                 unsigned nr_args)
@@ -10351,6 +10439,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
        xa_init(&tctx->xa);
        init_waitqueue_head(&tctx->wait);
        atomic_set(&tctx->in_idle, 0);
+       atomic_set(&tctx->inflight_tracked, 0);
        task->io_uring = tctx;
        spin_lock_init(&tctx->task_lock);
        INIT_WQ_LIST(&tctx->task_list);
@@ -11046,6 +11135,7 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
        xa_for_each(&ctx->io_bl_xa, index, bl) {
                xa_erase(&ctx->io_bl_xa, bl->bgid);
                __io_remove_buffers(ctx, bl, -1U);
+               kfree(bl);
        }
 
        while (!list_empty(&ctx->io_buffers_pages)) {
@@ -11581,7 +11671,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
 {
        if (tracked)
-               return 0;
+               return atomic_read(&tctx->inflight_tracked);
        return percpu_counter_sum(&tctx->inflight);
 }
 
@@ -11957,14 +12047,14 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        return -EINVAL;
                fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
                f.file = tctx->registered_rings[fd];
-               if (unlikely(!f.file))
-                       return -EBADF;
+               f.flags = 0;
        } else {
                f = fdget(fd);
-               if (unlikely(!f.file))
-                       return -EBADF;
        }
 
+       if (unlikely(!f.file))
+               return -EBADF;
+
        ret = -EOPNOTSUPP;
        if (unlikely(f.file->f_op != &io_uring_fops))
                goto out_fput;
@@ -12062,8 +12152,7 @@ iopoll_locked:
 out:
        percpu_ref_put(&ctx->refs);
 out_fput:
-       if (!(flags & IORING_ENTER_REGISTERED_RING))
-               fdput(f);
+       fdput(f);
        return ret;
 }
 
index 7e9abdb..acd32f0 100644 (file)
@@ -43,9 +43,9 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
        jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
                  __func__,
                  jeb->offset, jeb->offset, jeb->offset + c->sector_size);
-       instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+       instr = kzalloc(sizeof(struct erase_info), GFP_KERNEL);
        if (!instr) {
-               pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
+               pr_warn("kzalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
                mutex_lock(&c->erase_free_sem);
                spin_lock(&c->erase_completion_lock);
                list_move(&jeb->list, &c->erase_pending_list);
@@ -57,8 +57,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
                return;
        }
 
-       memset(instr, 0, sizeof(*instr));
-
        instr->addr = jeb->offset;
        instr->len = c->sector_size;
 
index 00a110f..39cec28 100644 (file)
@@ -604,6 +604,7 @@ out_root:
        jffs2_free_raw_node_refs(c);
        kvfree(c->blocks);
        jffs2_clear_xattr_subsystem(c);
+       jffs2_sum_exit(c);
  out_inohash:
        kfree(c->inocache_list);
  out_wbuf:
index e205fde..6eca72c 100644 (file)
 #include "kernfs-internal.h"
 
 static DEFINE_SPINLOCK(kernfs_rename_lock);    /* kn->parent and ->name */
-static char kernfs_pr_cont_buf[PATH_MAX];      /* protected by rename_lock */
+/*
+ * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
+ * call pr_cont() while holding rename_lock. Because sometimes pr_cont()
+ * will perform wakeups when releasing console_sem. Holding rename_lock
+ * will introduce deadlock if the scheduler reads the kernfs_name in the
+ * wakeup path.
+ */
+static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
+static char kernfs_pr_cont_buf[PATH_MAX];      /* protected by pr_cont_lock */
 static DEFINE_SPINLOCK(kernfs_idr_lock);       /* root->ino_idr */
 
 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
@@ -229,12 +237,12 @@ void pr_cont_kernfs_name(struct kernfs_node *kn)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&kernfs_rename_lock, flags);
+       spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
 
-       kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
+       kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
        pr_cont("%s", kernfs_pr_cont_buf);
 
-       spin_unlock_irqrestore(&kernfs_rename_lock, flags);
+       spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
 }
 
 /**
@@ -248,10 +256,10 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
        unsigned long flags;
        int sz;
 
-       spin_lock_irqsave(&kernfs_rename_lock, flags);
+       spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
 
-       sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
-                                         sizeof(kernfs_pr_cont_buf));
+       sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
+                                  sizeof(kernfs_pr_cont_buf));
        if (sz < 0) {
                pr_cont("(error)");
                goto out;
@@ -265,7 +273,7 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
        pr_cont("%s", kernfs_pr_cont_buf);
 
 out:
-       spin_unlock_irqrestore(&kernfs_rename_lock, flags);
+       spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
 }
 
 /**
@@ -823,13 +831,12 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
 
        lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem);
 
-       /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
-       spin_lock_irq(&kernfs_rename_lock);
+       spin_lock_irq(&kernfs_pr_cont_lock);
 
        len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
 
        if (len >= sizeof(kernfs_pr_cont_buf)) {
-               spin_unlock_irq(&kernfs_rename_lock);
+               spin_unlock_irq(&kernfs_pr_cont_lock);
                return NULL;
        }
 
@@ -841,7 +848,7 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
                parent = kernfs_find_ns(parent, name, ns);
        }
 
-       spin_unlock_irq(&kernfs_rename_lock);
+       spin_unlock_irq(&kernfs_pr_cont_lock);
 
        return parent;
 }
index 8842306..e3abfa8 100644 (file)
@@ -33,7 +33,6 @@ static DEFINE_SPINLOCK(kernfs_open_node_lock);
 static DEFINE_MUTEX(kernfs_open_file_mutex);
 
 struct kernfs_open_node {
-       atomic_t                refcnt;
        atomic_t                event;
        wait_queue_head_t       poll;
        struct list_head        files; /* goes through kernfs_open_file.list */
@@ -530,10 +529,8 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
        }
 
        on = kn->attr.open;
-       if (on) {
-               atomic_inc(&on->refcnt);
+       if (on)
                list_add_tail(&of->list, &on->files);
-       }
 
        spin_unlock_irq(&kernfs_open_node_lock);
        mutex_unlock(&kernfs_open_file_mutex);
@@ -548,7 +545,6 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
        if (!new_on)
                return -ENOMEM;
 
-       atomic_set(&new_on->refcnt, 0);
        atomic_set(&new_on->event, 1);
        init_waitqueue_head(&new_on->poll);
        INIT_LIST_HEAD(&new_on->files);
@@ -556,17 +552,19 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
 }
 
 /**
- *     kernfs_put_open_node - put kernfs_open_node
- *     @kn: target kernfs_nodet
+ *     kernfs_unlink_open_file - Unlink @of from @kn.
+ *
+ *     @kn: target kernfs_node
  *     @of: associated kernfs_open_file
  *
- *     Put @kn->attr.open and unlink @of from the files list.  If
- *     reference count reaches zero, disassociate and free it.
+ *     Unlink @of from list of @kn's associated open files. If list of
+ *     associated open files becomes empty, disassociate and free
+ *     kernfs_open_node.
  *
  *     LOCKING:
  *     None.
  */
-static void kernfs_put_open_node(struct kernfs_node *kn,
+static void kernfs_unlink_open_file(struct kernfs_node *kn,
                                 struct kernfs_open_file *of)
 {
        struct kernfs_open_node *on = kn->attr.open;
@@ -578,7 +576,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
        if (of)
                list_del(&of->list);
 
-       if (atomic_dec_and_test(&on->refcnt))
+       if (list_empty(&on->files))
                kn->attr.open = NULL;
        else
                on = NULL;
@@ -706,7 +704,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
        return 0;
 
 err_put_node:
-       kernfs_put_open_node(kn, of);
+       kernfs_unlink_open_file(kn, of);
 err_seq_release:
        seq_release(inode, file);
 err_free:
@@ -752,7 +750,7 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
                mutex_unlock(&kernfs_open_file_mutex);
        }
 
-       kernfs_put_open_node(kn, of);
+       kernfs_unlink_open_file(kn, of);
        seq_release(inode, filp);
        kfree(of->prealloc_buf);
        kfree(of);
@@ -768,15 +766,24 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
        if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
                return;
 
-       spin_lock_irq(&kernfs_open_node_lock);
-       on = kn->attr.open;
-       if (on)
-               atomic_inc(&on->refcnt);
-       spin_unlock_irq(&kernfs_open_node_lock);
-       if (!on)
+       /*
+        * lockless opportunistic check is safe below because no one is adding to
+        * ->attr.open at this point of time. This check allows early bail out
+        * if ->attr.open is already NULL. kernfs_unlink_open_file makes
+        * ->attr.open NULL only while holding kernfs_open_file_mutex so below
+        * check under kernfs_open_file_mutex will ensure bailing out if
+        * ->attr.open became NULL while waiting for the mutex.
+        */
+       if (!kn->attr.open)
                return;
 
        mutex_lock(&kernfs_open_file_mutex);
+       if (!kn->attr.open) {
+               mutex_unlock(&kernfs_open_file_mutex);
+               return;
+       }
+
+       on = kn->attr.open;
 
        list_for_each_entry(of, &on->files, list) {
                struct inode *inode = file_inode(of->file);
@@ -789,8 +796,6 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
        }
 
        mutex_unlock(&kernfs_open_file_mutex);
-
-       kernfs_put_open_node(kn, NULL);
 }
 
 /*
index 208d2cf..e8f476c 100644 (file)
@@ -62,7 +62,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
        atomic_set(&conn->req_running, 0);
        atomic_set(&conn->r_count, 0);
        conn->total_credits = 1;
-       conn->outstanding_credits = 1;
+       conn->outstanding_credits = 0;
 
        init_waitqueue_head(&conn->req_running_q);
        INIT_LIST_HEAD(&conn->conns_list);
@@ -205,31 +205,31 @@ int ksmbd_conn_write(struct ksmbd_work *work)
        return 0;
 }
 
-int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
-                        unsigned int buflen, u32 remote_key, u64 remote_offset,
-                        u32 remote_len)
+int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
+                        void *buf, unsigned int buflen,
+                        struct smb2_buffer_desc_v1 *desc,
+                        unsigned int desc_len)
 {
        int ret = -EINVAL;
 
        if (conn->transport->ops->rdma_read)
                ret = conn->transport->ops->rdma_read(conn->transport,
                                                      buf, buflen,
-                                                     remote_key, remote_offset,
-                                                     remote_len);
+                                                     desc, desc_len);
        return ret;
 }
 
-int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
-                         unsigned int buflen, u32 remote_key,
-                         u64 remote_offset, u32 remote_len)
+int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
+                         void *buf, unsigned int buflen,
+                         struct smb2_buffer_desc_v1 *desc,
+                         unsigned int desc_len)
 {
        int ret = -EINVAL;
 
        if (conn->transport->ops->rdma_write)
                ret = conn->transport->ops->rdma_write(conn->transport,
                                                       buf, buflen,
-                                                      remote_key, remote_offset,
-                                                      remote_len);
+                                                      desc, desc_len);
        return ret;
 }
 
index 7a59aac..98c1cbe 100644 (file)
@@ -122,11 +122,14 @@ struct ksmbd_transport_ops {
        int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
                      int size, bool need_invalidate_rkey,
                      unsigned int remote_key);
-       int (*rdma_read)(struct ksmbd_transport *t, void *buf, unsigned int len,
-                        u32 remote_key, u64 remote_offset, u32 remote_len);
-       int (*rdma_write)(struct ksmbd_transport *t, void *buf,
-                         unsigned int len, u32 remote_key, u64 remote_offset,
-                         u32 remote_len);
+       int (*rdma_read)(struct ksmbd_transport *t,
+                        void *buf, unsigned int len,
+                        struct smb2_buffer_desc_v1 *desc,
+                        unsigned int desc_len);
+       int (*rdma_write)(struct ksmbd_transport *t,
+                         void *buf, unsigned int len,
+                         struct smb2_buffer_desc_v1 *desc,
+                         unsigned int desc_len);
 };
 
 struct ksmbd_transport {
@@ -148,12 +151,14 @@ struct ksmbd_conn *ksmbd_conn_alloc(void);
 void ksmbd_conn_free(struct ksmbd_conn *conn);
 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
 int ksmbd_conn_write(struct ksmbd_work *work);
-int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, void *buf,
-                        unsigned int buflen, u32 remote_key, u64 remote_offset,
-                        u32 remote_len);
-int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, void *buf,
-                         unsigned int buflen, u32 remote_key, u64 remote_offset,
-                         u32 remote_len);
+int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
+                        void *buf, unsigned int buflen,
+                        struct smb2_buffer_desc_v1 *desc,
+                        unsigned int desc_len);
+int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
+                         void *buf, unsigned int buflen,
+                         struct smb2_buffer_desc_v1 *desc,
+                         unsigned int desc_len);
 void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
index ebe6ca0..52aa0ad 100644 (file)
@@ -104,7 +104,8 @@ struct ksmbd_startup_request {
                                         */
        __u32   sub_auth[3];            /* Subauth value for Security ID */
        __u32   smb2_max_credits;       /* MAX credits */
-       __u32   reserved[128];          /* Reserved room */
+       __u32   smbd_max_io_size;       /* smbd read write size */
+       __u32   reserved[127];          /* Reserved room */
        __u32   ifc_list_sz;            /* interfaces list size */
        __s8    ____payload[];
 };
index 1e2076a..df99110 100644 (file)
@@ -20,7 +20,7 @@
  * wildcard '*' and '?'
  * TODO : implement consideration about DOS_DOT, DOS_QM and DOS_STAR
  *
- * @string:    string to compare with a pattern
+ * @str:       string to compare with a pattern
  * @len:       string length
  * @pattern:   pattern string which might include wildcard '*' and '?'
  *
@@ -152,8 +152,8 @@ out:
 /**
  * convert_to_nt_pathname() - extract and return windows path string
  *      whose share directory prefix was removed from file path
- * @filename : unix filename
- * @sharepath: share path string
+ * @share: ksmbd_share_config pointer
+ * @path: path to report
  *
  * Return : windows path string or error
  */
@@ -250,8 +250,8 @@ char *ksmbd_extract_sharename(char *treename)
 
 /**
  * convert_to_unix_name() - convert windows name to unix format
- * @path:      name to be converted
- * @tid:       tree id of mathing share
+ * @share:     ksmbd_share_config pointer
+ * @name:      file name that is relative to share
  *
  * Return:     converted name on success, otherwise NULL
  */
index 4a94601..f8f4563 100644 (file)
@@ -338,7 +338,7 @@ static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
                ret = 1;
        }
 
-       if ((u64)conn->outstanding_credits + credit_charge > conn->vals->max_credits) {
+       if ((u64)conn->outstanding_credits + credit_charge > conn->total_credits) {
                ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n",
                            credit_charge, conn->outstanding_credits);
                ret = 1;
index 16c803a..e6f4ccc 100644 (file)
@@ -3938,6 +3938,12 @@ int smb2_query_dir(struct ksmbd_work *work)
        set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
 
        rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
+       /*
+        * req->OutputBufferLength is too small to contain even one entry.
+        * In this case, it immediately returns OutputBufferLength 0 to client.
+        */
+       if (!d_info.out_buf_len && !d_info.num_entry)
+               goto no_buf_len;
        if (rc == 0)
                restart_ctx(&dir_fp->readdir_data.ctx);
        if (rc == -ENOSPC)
@@ -3964,10 +3970,12 @@ int smb2_query_dir(struct ksmbd_work *work)
                rsp->Buffer[0] = 0;
                inc_rfc1001_len(work->response_buf, 9);
        } else {
+no_buf_len:
                ((struct file_directory_info *)
                ((char *)rsp->Buffer + d_info.last_entry_offset))
                ->NextEntryOffset = 0;
-               d_info.data_count -= d_info.last_entry_off_align;
+               if (d_info.data_count >= d_info.last_entry_off_align)
+                       d_info.data_count -= d_info.last_entry_off_align;
 
                rsp->StructureSize = cpu_to_le16(9);
                rsp->OutputBufferOffset = cpu_to_le16(72);
@@ -6116,7 +6124,6 @@ out:
 static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
                                        struct smb2_buffer_desc_v1 *desc,
                                        __le32 Channel,
-                                       __le16 ChannelInfoOffset,
                                        __le16 ChannelInfoLength)
 {
        unsigned int i, ch_count;
@@ -6134,15 +6141,13 @@ static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
                                le32_to_cpu(desc[i].length));
                }
        }
-       if (ch_count != 1) {
-               ksmbd_debug(RDMA, "RDMA multiple buffer descriptors %d are not supported yet\n",
-                           ch_count);
+       if (!ch_count)
                return -EINVAL;
-       }
 
        work->need_invalidate_rkey =
                (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
-       work->remote_key = le32_to_cpu(desc->token);
+       if (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE)
+               work->remote_key = le32_to_cpu(desc->token);
        return 0;
 }
 
@@ -6150,14 +6155,12 @@ static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
                                      struct smb2_read_req *req, void *data_buf,
                                      size_t length)
 {
-       struct smb2_buffer_desc_v1 *desc =
-               (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
        int err;
 
        err = ksmbd_conn_rdma_write(work->conn, data_buf, length,
-                                   le32_to_cpu(desc->token),
-                                   le64_to_cpu(desc->offset),
-                                   le32_to_cpu(desc->length));
+                                   (struct smb2_buffer_desc_v1 *)
+                                   ((char *)req + le16_to_cpu(req->ReadChannelInfoOffset)),
+                                   le16_to_cpu(req->ReadChannelInfoLength));
        if (err)
                return err;
 
@@ -6180,6 +6183,8 @@ int smb2_read(struct ksmbd_work *work)
        size_t length, mincount;
        ssize_t nbytes = 0, remain_bytes = 0;
        int err = 0;
+       bool is_rdma_channel = false;
+       unsigned int max_read_size = conn->vals->max_read_size;
 
        WORK_BUFFERS(work, req, rsp);
 
@@ -6191,6 +6196,11 @@ int smb2_read(struct ksmbd_work *work)
 
        if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
            req->Channel == SMB2_CHANNEL_RDMA_V1) {
+               is_rdma_channel = true;
+               max_read_size = get_smbd_max_read_write_size();
+       }
+
+       if (is_rdma_channel == true) {
                unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
 
                if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
@@ -6201,7 +6211,6 @@ int smb2_read(struct ksmbd_work *work)
                                                   (struct smb2_buffer_desc_v1 *)
                                                   ((char *)req + ch_offset),
                                                   req->Channel,
-                                                  req->ReadChannelInfoOffset,
                                                   req->ReadChannelInfoLength);
                if (err)
                        goto out;
@@ -6223,9 +6232,9 @@ int smb2_read(struct ksmbd_work *work)
        length = le32_to_cpu(req->Length);
        mincount = le32_to_cpu(req->MinimumCount);
 
-       if (length > conn->vals->max_read_size) {
+       if (length > max_read_size) {
                ksmbd_debug(SMB, "limiting read size to max size(%u)\n",
-                           conn->vals->max_read_size);
+                           max_read_size);
                err = -EINVAL;
                goto out;
        }
@@ -6257,8 +6266,7 @@ int smb2_read(struct ksmbd_work *work)
        ksmbd_debug(SMB, "nbytes %zu, offset %lld mincount %zu\n",
                    nbytes, offset, mincount);
 
-       if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
-           req->Channel == SMB2_CHANNEL_RDMA_V1) {
+       if (is_rdma_channel == true) {
                /* write data to the client using rdma channel */
                remain_bytes = smb2_read_rdma_channel(work, req,
                                                      work->aux_payload_buf,
@@ -6328,23 +6336,18 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
        length = le32_to_cpu(req->Length);
        id = req->VolatileFileId;
 
-       if (le16_to_cpu(req->DataOffset) ==
-           offsetof(struct smb2_write_req, Buffer)) {
-               data_buf = (char *)&req->Buffer[0];
-       } else {
-               if ((u64)le16_to_cpu(req->DataOffset) + length >
-                   get_rfc1002_len(work->request_buf)) {
-                       pr_err("invalid write data offset %u, smb_len %u\n",
-                              le16_to_cpu(req->DataOffset),
-                              get_rfc1002_len(work->request_buf));
-                       err = -EINVAL;
-                       goto out;
-               }
-
-               data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
-                               le16_to_cpu(req->DataOffset));
+       if ((u64)le16_to_cpu(req->DataOffset) + length >
+           get_rfc1002_len(work->request_buf)) {
+               pr_err("invalid write data offset %u, smb_len %u\n",
+                      le16_to_cpu(req->DataOffset),
+                      get_rfc1002_len(work->request_buf));
+               err = -EINVAL;
+               goto out;
        }
 
+       data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+                          le16_to_cpu(req->DataOffset));
+
        rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length);
        if (rpc_resp) {
                if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
@@ -6384,21 +6387,18 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
                                       struct ksmbd_file *fp,
                                       loff_t offset, size_t length, bool sync)
 {
-       struct smb2_buffer_desc_v1 *desc;
        char *data_buf;
        int ret;
        ssize_t nbytes;
 
-       desc = (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
-
        data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
        if (!data_buf)
                return -ENOMEM;
 
        ret = ksmbd_conn_rdma_read(work->conn, data_buf, length,
-                                  le32_to_cpu(desc->token),
-                                  le64_to_cpu(desc->offset),
-                                  le32_to_cpu(desc->length));
+                                  (struct smb2_buffer_desc_v1 *)
+                                  ((char *)req + le16_to_cpu(req->WriteChannelInfoOffset)),
+                                  le16_to_cpu(req->WriteChannelInfoLength));
        if (ret < 0) {
                kvfree(data_buf);
                return ret;
@@ -6427,8 +6427,9 @@ int smb2_write(struct ksmbd_work *work)
        size_t length;
        ssize_t nbytes;
        char *data_buf;
-       bool writethrough = false;
+       bool writethrough = false, is_rdma_channel = false;
        int err = 0;
+       unsigned int max_write_size = work->conn->vals->max_write_size;
 
        WORK_BUFFERS(work, req, rsp);
 
@@ -6437,8 +6438,17 @@ int smb2_write(struct ksmbd_work *work)
                return smb2_write_pipe(work);
        }
 
+       offset = le64_to_cpu(req->Offset);
+       length = le32_to_cpu(req->Length);
+
        if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
            req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
+               is_rdma_channel = true;
+               max_write_size = get_smbd_max_read_write_size();
+               length = le32_to_cpu(req->RemainingBytes);
+       }
+
+       if (is_rdma_channel == true) {
                unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
 
                if (req->Length != 0 || req->DataOffset != 0 ||
@@ -6450,7 +6460,6 @@ int smb2_write(struct ksmbd_work *work)
                                                   (struct smb2_buffer_desc_v1 *)
                                                   ((char *)req + ch_offset),
                                                   req->Channel,
-                                                  req->WriteChannelInfoOffset,
                                                   req->WriteChannelInfoLength);
                if (err)
                        goto out;
@@ -6474,12 +6483,9 @@ int smb2_write(struct ksmbd_work *work)
                goto out;
        }
 
-       offset = le64_to_cpu(req->Offset);
-       length = le32_to_cpu(req->Length);
-
-       if (length > work->conn->vals->max_write_size) {
+       if (length > max_write_size) {
                ksmbd_debug(SMB, "limiting write size to max size(%u)\n",
-                           work->conn->vals->max_write_size);
+                           max_write_size);
                err = -EINVAL;
                goto out;
        }
@@ -6487,24 +6493,17 @@ int smb2_write(struct ksmbd_work *work)
        if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
                writethrough = true;
 
-       if (req->Channel != SMB2_CHANNEL_RDMA_V1 &&
-           req->Channel != SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
-               if (le16_to_cpu(req->DataOffset) ==
-                   offsetof(struct smb2_write_req, Buffer)) {
-                       data_buf = (char *)&req->Buffer[0];
-               } else {
-                       if ((u64)le16_to_cpu(req->DataOffset) + length >
-                           get_rfc1002_len(work->request_buf)) {
-                               pr_err("invalid write data offset %u, smb_len %u\n",
-                                      le16_to_cpu(req->DataOffset),
-                                      get_rfc1002_len(work->request_buf));
-                               err = -EINVAL;
-                               goto out;
-                       }
-
-                       data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
-                                       le16_to_cpu(req->DataOffset));
+       if (is_rdma_channel == false) {
+               if ((u64)le16_to_cpu(req->DataOffset) + length >
+                   get_rfc1002_len(work->request_buf)) {
+                       pr_err("invalid write data offset %u, smb_len %u\n",
+                              le16_to_cpu(req->DataOffset),
+                              get_rfc1002_len(work->request_buf));
+                       err = -EINVAL;
+                       goto out;
                }
+               data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+                                   le16_to_cpu(req->DataOffset));
 
                ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
                if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
@@ -6520,8 +6519,7 @@ int smb2_write(struct ksmbd_work *work)
                /* read data from the client using rdma channel, and
                 * write the data.
                 */
-               nbytes = smb2_write_rdma_channel(work, req, fp, offset,
-                                                le32_to_cpu(req->RemainingBytes),
+               nbytes = smb2_write_rdma_channel(work, req, fp, offset, length,
                                                 writethrough);
                if (nbytes < 0) {
                        err = (int)nbytes;
index 9a7e211..7f8ab14 100644 (file)
@@ -140,8 +140,10 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
 
        hdr = work->request_buf;
        if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
-           hdr->Command == SMB_COM_NEGOTIATE)
+           hdr->Command == SMB_COM_NEGOTIATE) {
+               work->conn->outstanding_credits++;
                return 0;
+       }
 
        return -EINVAL;
 }
index 6ecf55e..38f23bf 100644 (file)
@@ -1261,6 +1261,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
                                        if (!access_bits)
                                                access_bits =
                                                        SET_MINIMUM_RIGHTS;
+                                       posix_acl_release(posix_acls);
                                        goto check_access_bits;
                                }
                        }
index 3ad6881..7cb0eeb 100644 (file)
@@ -26,6 +26,7 @@
 #include "mgmt/ksmbd_ida.h"
 #include "connection.h"
 #include "transport_tcp.h"
+#include "transport_rdma.h"
 
 #define IPC_WAIT_TIMEOUT       (2 * HZ)
 
@@ -303,6 +304,8 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
                init_smb2_max_trans_size(req->smb2_max_trans);
        if (req->smb2_max_credits)
                init_smb2_max_credits(req->smb2_max_credits);
+       if (req->smbd_max_io_size)
+               init_smbd_max_io_size(req->smbd_max_io_size);
 
        ret = ksmbd_set_netbios_name(req->netbios_name);
        ret |= ksmbd_set_server_string(req->server_string);
index e646d79..d035e06 100644 (file)
@@ -80,9 +80,7 @@ static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
 /*  The maximum single-message size which can be received */
 static int smb_direct_max_receive_size = 8192;
 
-static int smb_direct_max_read_write_size = 524224;
-
-static int smb_direct_max_outstanding_rw_ops = 8;
+static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE;
 
 static LIST_HEAD(smb_direct_device_list);
 static DEFINE_RWLOCK(smb_direct_device_lock);
@@ -147,18 +145,18 @@ struct smb_direct_transport {
        atomic_t                send_credits;
        spinlock_t              lock_new_recv_credits;
        int                     new_recv_credits;
-       atomic_t                rw_avail_ops;
+       int                     max_rw_credits;
+       int                     pages_per_rw_credit;
+       atomic_t                rw_credits;
 
        wait_queue_head_t       wait_send_credits;
-       wait_queue_head_t       wait_rw_avail_ops;
+       wait_queue_head_t       wait_rw_credits;
 
        mempool_t               *sendmsg_mempool;
        struct kmem_cache       *sendmsg_cache;
        mempool_t               *recvmsg_mempool;
        struct kmem_cache       *recvmsg_cache;
 
-       wait_queue_head_t       wait_send_payload_pending;
-       atomic_t                send_payload_pending;
        wait_queue_head_t       wait_send_pending;
        atomic_t                send_pending;
 
@@ -208,12 +206,25 @@ struct smb_direct_recvmsg {
 struct smb_direct_rdma_rw_msg {
        struct smb_direct_transport     *t;
        struct ib_cqe           cqe;
+       int                     status;
        struct completion       *completion;
+       struct list_head        list;
        struct rdma_rw_ctx      rw_ctx;
        struct sg_table         sgt;
        struct scatterlist      sg_list[];
 };
 
+void init_smbd_max_io_size(unsigned int sz)
+{
+       sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE);
+       smb_direct_max_read_write_size = sz;
+}
+
+unsigned int get_smbd_max_read_write_size(void)
+{
+       return smb_direct_max_read_write_size;
+}
+
 static inline int get_buf_page_count(void *buf, int size)
 {
        return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
@@ -377,7 +388,7 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
        t->reassembly_queue_length = 0;
        init_waitqueue_head(&t->wait_reassembly_queue);
        init_waitqueue_head(&t->wait_send_credits);
-       init_waitqueue_head(&t->wait_rw_avail_ops);
+       init_waitqueue_head(&t->wait_rw_credits);
 
        spin_lock_init(&t->receive_credit_lock);
        spin_lock_init(&t->recvmsg_queue_lock);
@@ -386,8 +397,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
        spin_lock_init(&t->empty_recvmsg_queue_lock);
        INIT_LIST_HEAD(&t->empty_recvmsg_queue);
 
-       init_waitqueue_head(&t->wait_send_payload_pending);
-       atomic_set(&t->send_payload_pending, 0);
        init_waitqueue_head(&t->wait_send_pending);
        atomic_set(&t->send_pending, 0);
 
@@ -417,8 +426,6 @@ static void free_transport(struct smb_direct_transport *t)
        wake_up_interruptible(&t->wait_send_credits);
 
        ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
-       wait_event(t->wait_send_payload_pending,
-                  atomic_read(&t->send_payload_pending) == 0);
        wait_event(t->wait_send_pending,
                   atomic_read(&t->send_pending) == 0);
 
@@ -569,6 +576,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
                }
                t->negotiation_requested = true;
                t->full_packet_received = true;
+               t->status = SMB_DIRECT_CS_CONNECTED;
                enqueue_reassembly(t, recvmsg, 0);
                wake_up_interruptible(&t->wait_status);
                break;
@@ -873,13 +881,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
                smb_direct_disconnect_rdma_connection(t);
        }
 
-       if (sendmsg->num_sge > 1) {
-               if (atomic_dec_and_test(&t->send_payload_pending))
-                       wake_up(&t->wait_send_payload_pending);
-       } else {
-               if (atomic_dec_and_test(&t->send_pending))
-                       wake_up(&t->wait_send_pending);
-       }
+       if (atomic_dec_and_test(&t->send_pending))
+               wake_up(&t->wait_send_pending);
 
        /* iterate and free the list of messages in reverse. the list's head
         * is invalid.
@@ -911,21 +914,12 @@ static int smb_direct_post_send(struct smb_direct_transport *t,
 {
        int ret;
 
-       if (wr->num_sge > 1)
-               atomic_inc(&t->send_payload_pending);
-       else
-               atomic_inc(&t->send_pending);
-
+       atomic_inc(&t->send_pending);
        ret = ib_post_send(t->qp, wr, NULL);
        if (ret) {
                pr_err("failed to post send: %d\n", ret);
-               if (wr->num_sge > 1) {
-                       if (atomic_dec_and_test(&t->send_payload_pending))
-                               wake_up(&t->wait_send_payload_pending);
-               } else {
-                       if (atomic_dec_and_test(&t->send_pending))
-                               wake_up(&t->wait_send_pending);
-               }
+               if (atomic_dec_and_test(&t->send_pending))
+                       wake_up(&t->wait_send_pending);
                smb_direct_disconnect_rdma_connection(t);
        }
        return ret;
@@ -983,18 +977,19 @@ static int smb_direct_flush_send_list(struct smb_direct_transport *t,
 }
 
 static int wait_for_credits(struct smb_direct_transport *t,
-                           wait_queue_head_t *waitq, atomic_t *credits)
+                           wait_queue_head_t *waitq, atomic_t *total_credits,
+                           int needed)
 {
        int ret;
 
        do {
-               if (atomic_dec_return(credits) >= 0)
+               if (atomic_sub_return(needed, total_credits) >= 0)
                        return 0;
 
-               atomic_inc(credits);
+               atomic_add(needed, total_credits);
                ret = wait_event_interruptible(*waitq,
-                                              atomic_read(credits) > 0 ||
-                                               t->status != SMB_DIRECT_CS_CONNECTED);
+                                              atomic_read(total_credits) >= needed ||
+                                              t->status != SMB_DIRECT_CS_CONNECTED);
 
                if (t->status != SMB_DIRECT_CS_CONNECTED)
                        return -ENOTCONN;
@@ -1015,7 +1010,19 @@ static int wait_for_send_credits(struct smb_direct_transport *t,
                        return ret;
        }
 
-       return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
+       return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1);
+}
+
+static int wait_for_rw_credits(struct smb_direct_transport *t, int credits)
+{
+       return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits);
+}
+
+static int calc_rw_credits(struct smb_direct_transport *t,
+                          char *buf, unsigned int len)
+{
+       return DIV_ROUND_UP(get_buf_page_count(buf, len),
+                           t->pages_per_rw_credit);
 }
 
 static int smb_direct_create_header(struct smb_direct_transport *t,
@@ -1086,7 +1093,7 @@ static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nen
        int offset, len;
        int i = 0;
 
-       if (nentries < get_buf_page_count(buf, size))
+       if (size <= 0 || nentries < get_buf_page_count(buf, size))
                return -EINVAL;
 
        offset = offset_in_page(buf);
@@ -1118,7 +1125,7 @@ static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
        int npages;
 
        npages = get_sg_list(buf, size, sg_list, nentries);
-       if (npages <= 0)
+       if (npages < 0)
                return -EINVAL;
        return ib_dma_map_sg(device, sg_list, npages, dir);
 }
@@ -1313,11 +1320,21 @@ done:
         * that means all the I/Os have been out and we are good to return
         */
 
-       wait_event(st->wait_send_payload_pending,
-                  atomic_read(&st->send_payload_pending) == 0);
+       wait_event(st->wait_send_pending,
+                  atomic_read(&st->send_pending) == 0);
        return ret;
 }
 
+static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t,
+                                       struct smb_direct_rdma_rw_msg *msg,
+                                       enum dma_data_direction dir)
+{
+       rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
+                           msg->sgt.sgl, msg->sgt.nents, dir);
+       sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+       kfree(msg);
+}
+
 static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
                            enum dma_data_direction dir)
 {
@@ -1326,19 +1343,14 @@ static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
        struct smb_direct_transport *t = msg->t;
 
        if (wc->status != IB_WC_SUCCESS) {
+               msg->status = -EIO;
                pr_err("read/write error. opcode = %d, status = %s(%d)\n",
                       wc->opcode, ib_wc_status_msg(wc->status), wc->status);
-               smb_direct_disconnect_rdma_connection(t);
+               if (wc->status != IB_WC_WR_FLUSH_ERR)
+                       smb_direct_disconnect_rdma_connection(t);
        }
 
-       if (atomic_inc_return(&t->rw_avail_ops) > 0)
-               wake_up(&t->wait_rw_avail_ops);
-
-       rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
-                           msg->sg_list, msg->sgt.nents, dir);
-       sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
        complete(msg->completion);
-       kfree(msg);
 }
 
 static void read_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1351,94 +1363,141 @@ static void write_done(struct ib_cq *cq, struct ib_wc *wc)
        read_write_done(cq, wc, DMA_TO_DEVICE);
 }
 
-static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
-                               int buf_len, u32 remote_key, u64 remote_offset,
-                               u32 remote_len, bool is_read)
+static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+                               void *buf, int buf_len,
+                               struct smb2_buffer_desc_v1 *desc,
+                               unsigned int desc_len,
+                               bool is_read)
 {
-       struct smb_direct_rdma_rw_msg *msg;
-       int ret;
+       struct smb_direct_rdma_rw_msg *msg, *next_msg;
+       int i, ret;
        DECLARE_COMPLETION_ONSTACK(completion);
-       struct ib_send_wr *first_wr = NULL;
+       struct ib_send_wr *first_wr;
+       LIST_HEAD(msg_list);
+       char *desc_buf;
+       int credits_needed;
+       unsigned int desc_buf_len;
+       size_t total_length = 0;
+
+       if (t->status != SMB_DIRECT_CS_CONNECTED)
+               return -ENOTCONN;
 
-       ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
+       /* calculate needed credits */
+       credits_needed = 0;
+       desc_buf = buf;
+       for (i = 0; i < desc_len / sizeof(*desc); i++) {
+               desc_buf_len = le32_to_cpu(desc[i].length);
+
+               credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
+               desc_buf += desc_buf_len;
+               total_length += desc_buf_len;
+               if (desc_buf_len == 0 || total_length > buf_len ||
+                   total_length > t->max_rdma_rw_size)
+                       return -EINVAL;
+       }
+
+       ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
+                   is_read ? "read" : "write", buf_len, credits_needed);
+
+       ret = wait_for_rw_credits(t, credits_needed);
        if (ret < 0)
                return ret;
 
-       /* TODO: mempool */
-       msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
-                     sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
-       if (!msg) {
-               atomic_inc(&t->rw_avail_ops);
-               return -ENOMEM;
-       }
+       /* build rdma_rw_ctx for each descriptor */
+       desc_buf = buf;
+       for (i = 0; i < desc_len / sizeof(*desc); i++) {
+               msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
+                             sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
+               if (!msg) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
-       msg->sgt.sgl = &msg->sg_list[0];
-       ret = sg_alloc_table_chained(&msg->sgt,
-                                    get_buf_page_count(buf, buf_len),
-                                    msg->sg_list, SG_CHUNK_SIZE);
-       if (ret) {
-               atomic_inc(&t->rw_avail_ops);
-               kfree(msg);
-               return -ENOMEM;
-       }
+               desc_buf_len = le32_to_cpu(desc[i].length);
 
-       ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
-       if (ret <= 0) {
-               pr_err("failed to get pages\n");
-               goto err;
-       }
+               msg->t = t;
+               msg->cqe.done = is_read ? read_done : write_done;
+               msg->completion = &completion;
 
-       ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
-                              msg->sg_list, get_buf_page_count(buf, buf_len),
-                              0, remote_offset, remote_key,
-                              is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       if (ret < 0) {
-               pr_err("failed to init rdma_rw_ctx: %d\n", ret);
-               goto err;
+               msg->sgt.sgl = &msg->sg_list[0];
+               ret = sg_alloc_table_chained(&msg->sgt,
+                                            get_buf_page_count(desc_buf, desc_buf_len),
+                                            msg->sg_list, SG_CHUNK_SIZE);
+               if (ret) {
+                       kfree(msg);
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               ret = get_sg_list(desc_buf, desc_buf_len,
+                                 msg->sgt.sgl, msg->sgt.orig_nents);
+               if (ret < 0) {
+                       sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+                       kfree(msg);
+                       goto out;
+               }
+
+               ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
+                                      msg->sgt.sgl,
+                                      get_buf_page_count(desc_buf, desc_buf_len),
+                                      0,
+                                      le64_to_cpu(desc[i].offset),
+                                      le32_to_cpu(desc[i].token),
+                                      is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+               if (ret < 0) {
+                       pr_err("failed to init rdma_rw_ctx: %d\n", ret);
+                       sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+                       kfree(msg);
+                       goto out;
+               }
+
+               list_add_tail(&msg->list, &msg_list);
+               desc_buf += desc_buf_len;
        }
 
-       msg->t = t;
-       msg->cqe.done = is_read ? read_done : write_done;
-       msg->completion = &completion;
-       first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
-                                  &msg->cqe, NULL);
+       /* concatenate work requests of rdma_rw_ctxs */
+       first_wr = NULL;
+       list_for_each_entry_reverse(msg, &msg_list, list) {
+               first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
+                                          &msg->cqe, first_wr);
+       }
 
        ret = ib_post_send(t->qp, first_wr, NULL);
        if (ret) {
-               pr_err("failed to post send wr: %d\n", ret);
-               goto err;
+               pr_err("failed to post send wr for RDMA R/W: %d\n", ret);
+               goto out;
        }
 
+       msg = list_last_entry(&msg_list, struct smb_direct_rdma_rw_msg, list);
        wait_for_completion(&completion);
-       return 0;
-
-err:
-       atomic_inc(&t->rw_avail_ops);
-       if (first_wr)
-               rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
-                                   msg->sg_list, msg->sgt.nents,
-                                   is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
-       sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
-       kfree(msg);
+       ret = msg->status;
+out:
+       list_for_each_entry_safe(msg, next_msg, &msg_list, list) {
+               list_del(&msg->list);
+               smb_direct_free_rdma_rw_msg(t, msg,
+                                           is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+       }
+       atomic_add(credits_needed, &t->rw_credits);
+       wake_up(&t->wait_rw_credits);
        return ret;
 }
 
-static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
-                                unsigned int buflen, u32 remote_key,
-                                u64 remote_offset, u32 remote_len)
+static int smb_direct_rdma_write(struct ksmbd_transport *t,
+                                void *buf, unsigned int buflen,
+                                struct smb2_buffer_desc_v1 *desc,
+                                unsigned int desc_len)
 {
        return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
-                                   remote_key, remote_offset,
-                                   remote_len, false);
+                                   desc, desc_len, false);
 }
 
-static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
-                               unsigned int buflen, u32 remote_key,
-                               u64 remote_offset, u32 remote_len)
+static int smb_direct_rdma_read(struct ksmbd_transport *t,
+                               void *buf, unsigned int buflen,
+                               struct smb2_buffer_desc_v1 *desc,
+                               unsigned int desc_len)
 {
        return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
-                                   remote_key, remote_offset,
-                                   remote_len, true);
+                                   desc, desc_len, true);
 }
 
 static void smb_direct_disconnect(struct ksmbd_transport *t)
@@ -1638,41 +1697,57 @@ out_err:
        return ret;
 }
 
+static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t)
+{
+       return min_t(unsigned int,
+                    t->cm_id->device->attrs.max_fast_reg_page_list_len,
+                    256);
+}
+
 static int smb_direct_init_params(struct smb_direct_transport *t,
                                  struct ib_qp_cap *cap)
 {
        struct ib_device *device = t->cm_id->device;
-       int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
+       int max_send_sges, max_rw_wrs, max_send_wrs;
+       unsigned int max_sge_per_wr, wrs_per_credit;
 
-       /* need 2 more sge. because a SMB_DIRECT header will be mapped,
-        * and maybe a send buffer could be not page aligned.
+       /* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
+        * SMB2 response could be mapped.
         */
        t->max_send_size = smb_direct_max_send_size;
-       max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
+       max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3;
        if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
                pr_err("max_send_size %d is too large\n", t->max_send_size);
                return -EINVAL;
        }
 
-       /*
-        * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
-        * read/writes. HCA guarantees at least max_send_sge of sges for
-        * a RDMA read/write work request, and if memory registration is used,
-        * we need reg_mr, local_inv wrs for each read/write.
+       /* Calculate the number of work requests for RDMA R/W.
+        * The maximum number of pages which can be registered
+        * with one Memory region can be transferred with one
+        * R/W credit. And at least 4 work requests for each credit
+        * are needed for MR registration, RDMA R/W, local & remote
+        * MR invalidation.
         */
        t->max_rdma_rw_size = smb_direct_max_read_write_size;
-       max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
-       max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
-       max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
-                       max_pages) * 2;
-       max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
+       t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t);
+       t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size,
+                                        (t->pages_per_rw_credit - 1) *
+                                        PAGE_SIZE);
+
+       max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge,
+                              device->attrs.max_sge_rd);
+       max_sge_per_wr = max_t(unsigned int, max_sge_per_wr,
+                              max_send_sges);
+       wrs_per_credit = max_t(unsigned int, 4,
+                              DIV_ROUND_UP(t->pages_per_rw_credit,
+                                           max_sge_per_wr) + 1);
+       max_rw_wrs = t->max_rw_credits * wrs_per_credit;
 
        max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
        if (max_send_wrs > device->attrs.max_cqe ||
            max_send_wrs > device->attrs.max_qp_wr) {
-               pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
-                      smb_direct_send_credit_target,
-                      smb_direct_max_outstanding_rw_ops);
+               pr_err("consider lowering send_credit_target = %d\n",
+                      smb_direct_send_credit_target);
                pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
                       device->attrs.max_cqe, device->attrs.max_qp_wr);
                return -EINVAL;
@@ -1687,11 +1762,6 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
                return -EINVAL;
        }
 
-       if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
-               pr_err("warning: device max_send_sge = %d too small\n",
-                      device->attrs.max_send_sge);
-               return -EINVAL;
-       }
        if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
                pr_err("warning: device max_recv_sge = %d too small\n",
                       device->attrs.max_recv_sge);
@@ -1707,7 +1777,7 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
 
        t->send_credit_target = smb_direct_send_credit_target;
        atomic_set(&t->send_credits, 0);
-       atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
+       atomic_set(&t->rw_credits, t->max_rw_credits);
 
        t->max_send_size = smb_direct_max_send_size;
        t->max_recv_size = smb_direct_max_receive_size;
@@ -1715,12 +1785,10 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
 
        cap->max_send_wr = max_send_wrs;
        cap->max_recv_wr = t->recv_credit_max;
-       cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
+       cap->max_send_sge = max_sge_per_wr;
        cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
        cap->max_inline_data = 0;
-       cap->max_rdma_ctxs =
-               rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) *
-               smb_direct_max_outstanding_rw_ops;
+       cap->max_rdma_ctxs = t->max_rw_credits;
        return 0;
 }
 
@@ -1813,7 +1881,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
        }
 
        t->send_cq = ib_alloc_cq(t->cm_id->device, t,
-                                t->send_credit_target, 0, IB_POLL_WORKQUEUE);
+                                smb_direct_send_credit_target + cap->max_rdma_ctxs,
+                                0, IB_POLL_WORKQUEUE);
        if (IS_ERR(t->send_cq)) {
                pr_err("Can't create RDMA send CQ\n");
                ret = PTR_ERR(t->send_cq);
@@ -1822,8 +1891,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
        }
 
        t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
-                                cap->max_send_wr + cap->max_rdma_ctxs,
-                                0, IB_POLL_WORKQUEUE);
+                                t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
        if (IS_ERR(t->recv_cq)) {
                pr_err("Can't create RDMA recv CQ\n");
                ret = PTR_ERR(t->recv_cq);
@@ -1852,17 +1920,12 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
 
        pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
        if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
-               int pages_per_mr, mr_count;
-
-               pages_per_mr = min_t(int, pages_per_rw,
-                                    t->cm_id->device->attrs.max_fast_reg_page_list_len);
-               mr_count = DIV_ROUND_UP(pages_per_rw, pages_per_mr) *
-                       atomic_read(&t->rw_avail_ops);
-               ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count,
-                                     IB_MR_TYPE_MEM_REG, pages_per_mr, 0);
+               ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs,
+                                     t->max_rw_credits, IB_MR_TYPE_MEM_REG,
+                                     t->pages_per_rw_credit, 0);
                if (ret) {
                        pr_err("failed to init mr pool count %d pages %d\n",
-                              mr_count, pages_per_mr);
+                              t->max_rw_credits, t->pages_per_rw_credit);
                        goto err;
                }
        }
index 5567d93..77aee4e 100644 (file)
@@ -7,6 +7,10 @@
 #ifndef __KSMBD_TRANSPORT_RDMA_H__
 #define __KSMBD_TRANSPORT_RDMA_H__
 
+#define SMBD_DEFAULT_IOSIZE (8 * 1024 * 1024)
+#define SMBD_MIN_IOSIZE (512 * 1024)
+#define SMBD_MAX_IOSIZE (16 * 1024 * 1024)
+
 /* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
 struct smb_direct_negotiate_req {
        __le16 min_version;
@@ -52,10 +56,14 @@ struct smb_direct_data_transfer {
 int ksmbd_rdma_init(void);
 void ksmbd_rdma_destroy(void);
 bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
+void init_smbd_max_io_size(unsigned int sz);
+unsigned int get_smbd_max_read_write_size(void);
 #else
 static inline int ksmbd_rdma_init(void) { return 0; }
 static inline int ksmbd_rdma_destroy(void) { return 0; }
 static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
+static inline void init_smbd_max_io_size(unsigned int sz) { }
+static inline unsigned int get_smbd_max_read_write_size(void) { return 0; }
 #endif
 
 #endif /* __KSMBD_TRANSPORT_RDMA_H__ */
index 3dc0db2..1f28d3f 100644 (file)
@@ -730,13 +730,6 @@ static bool legitimize_links(struct nameidata *nd)
 
 static bool legitimize_root(struct nameidata *nd)
 {
-       /*
-        * For scoped-lookups (where nd->root has been zeroed), we need to
-        * restart the whole lookup from scratch -- because set_root() is wrong
-        * for these lookups (nd->dfd is the root, not the filesystem root).
-        */
-       if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
-               return false;
        /* Nothing to do if nd->root is zero or is managed by the VFS user. */
        if (!nd->root.mnt || (nd->state & ND_ROOT_PRESET))
                return true;
@@ -798,7 +791,7 @@ out:
  * @seq: seq number to check @dentry against
  * Returns: true on success, false on failure
  *
- * Similar to to try_to_unlazy(), but here we have the next dentry already
+ * Similar to try_to_unlazy(), but here we have the next dentry already
  * picked by rcu-walk and want to legitimize that in addition to the current
  * nd->path and nd->root for ref-walk mode.  Must be called from rcu-walk context.
  * Nothing should touch nameidata between try_to_unlazy_next() failure and
@@ -1755,7 +1748,7 @@ static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
                // unlazy even if we fail to grab the link - cleanup needs it
                bool grabbed_link = legitimize_path(nd, link, seq);
 
-               if (!try_to_unlazy(nd) != 0 || !grabbed_link)
+               if (!try_to_unlazy(nd) || !grabbed_link)
                        return -ECHILD;
 
                if (nd_alloc_stack(nd))
@@ -2769,7 +2762,8 @@ struct dentry *lookup_one(struct user_namespace *mnt_userns, const char *name,
 EXPORT_SYMBOL(lookup_one);
 
 /**
- * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
+ * lookup_one_unlocked - filesystem helper to lookup single pathname component
+ * @mnt_userns:        idmapping of the mount the lookup is performed from
  * @name:      pathname component to lookup
  * @base:      base directory to lookup from
  * @len:       maximum length @len should be interpreted to
@@ -2780,14 +2774,15 @@ EXPORT_SYMBOL(lookup_one);
  * Unlike lookup_one_len, it should be called without the parent
  * i_mutex held, and will take the i_mutex itself if necessary.
  */
-struct dentry *lookup_one_len_unlocked(const char *name,
-                                      struct dentry *base, int len)
+struct dentry *lookup_one_unlocked(struct user_namespace *mnt_userns,
+                                  const char *name, struct dentry *base,
+                                  int len)
 {
        struct qstr this;
        int err;
        struct dentry *ret;
 
-       err = lookup_one_common(&init_user_ns, name, base, len, &this);
+       err = lookup_one_common(mnt_userns, name, base, len, &this);
        if (err)
                return ERR_PTR(err);
 
@@ -2796,6 +2791,59 @@ struct dentry *lookup_one_len_unlocked(const char *name,
                ret = lookup_slow(&this, base, 0);
        return ret;
 }
+EXPORT_SYMBOL(lookup_one_unlocked);
+
+/**
+ * lookup_one_positive_unlocked - filesystem helper to lookup single
+ *                               pathname component
+ * @mnt_userns:        idmapping of the mount the lookup is performed from
+ * @name:      pathname component to lookup
+ * @base:      base directory to lookup from
+ * @len:       maximum length @len should be interpreted to
+ *
+ * This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns
+ * known positive or ERR_PTR(). This is what most of the users want.
+ *
+ * Note that pinned negative with unlocked parent _can_ become positive at any
+ * time, so callers of lookup_one_unlocked() need to be very careful; pinned
+ * positives have >d_inode stable, so this one avoids such problems.
+ *
+ * Note that this routine is purely a helper for filesystem usage and should
+ * not be called by generic code.
+ *
+ * The helper should be called without i_mutex held.
+ */
+struct dentry *lookup_one_positive_unlocked(struct user_namespace *mnt_userns,
+                                           const char *name,
+                                           struct dentry *base, int len)
+{
+       struct dentry *ret = lookup_one_unlocked(mnt_userns, name, base, len);
+
+       if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+               dput(ret);
+               ret = ERR_PTR(-ENOENT);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(lookup_one_positive_unlocked);
+
+/**
+ * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
+ * @name:      pathname component to lookup
+ * @base:      base directory to lookup from
+ * @len:       maximum length @len should be interpreted to
+ *
+ * Note that this routine is purely a helper for filesystem usage and should
+ * not be called by generic code.
+ *
+ * Unlike lookup_one_len, it should be called without the parent
+ * i_mutex held, and will take the i_mutex itself if necessary.
+ */
+struct dentry *lookup_one_len_unlocked(const char *name,
+                                      struct dentry *base, int len)
+{
+       return lookup_one_unlocked(&init_user_ns, name, base, len);
+}
 EXPORT_SYMBOL(lookup_one_len_unlocked);
 
 /*
@@ -2809,12 +2857,7 @@ EXPORT_SYMBOL(lookup_one_len_unlocked);
 struct dentry *lookup_positive_unlocked(const char *name,
                                       struct dentry *base, int len)
 {
-       struct dentry *ret = lookup_one_len_unlocked(name, base, len);
-       if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
-               dput(ret);
-               ret = ERR_PTR(-ENOENT);
-       }
-       return ret;
+       return lookup_one_positive_unlocked(&init_user_ns, name, base, len);
 }
 EXPORT_SYMBOL(lookup_positive_unlocked);
 
index 41461f5..e6a7e76 100644 (file)
@@ -1760,7 +1760,7 @@ out_unlock:
 /*
  * Is the caller allowed to modify his namespace?
  */
-static inline bool may_mount(void)
+bool may_mount(void)
 {
        return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
 }
index 6f5425e..2d72b1b 100644 (file)
@@ -206,15 +206,16 @@ static int
 nfs_file_fsync_commit(struct file *file, int datasync)
 {
        struct inode *inode = file_inode(file);
-       int ret;
+       int ret, ret2;
 
        dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
 
        nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
        ret = nfs_commit_inode(inode, FLUSH_SYNC);
-       if (ret < 0)
-               return ret;
-       return file_check_and_advance_wb_err(file);
+       ret2 = file_check_and_advance_wb_err(file);
+       if (ret2 < 0)
+               return ret2;
+       return ret;
 }
 
 int
@@ -387,11 +388,8 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
                return status;
        NFS_I(mapping->host)->write_io += copied;
 
-       if (nfs_ctx_key_to_expire(ctx, mapping->host)) {
-               status = nfs_wb_all(mapping->host);
-               if (status < 0)
-                       return status;
-       }
+       if (nfs_ctx_key_to_expire(ctx, mapping->host))
+               nfs_wb_all(mapping->host);
 
        return copied;
 }
@@ -606,18 +604,6 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
        .page_mkwrite = nfs_vm_page_mkwrite,
 };
 
-static int nfs_need_check_write(struct file *filp, struct inode *inode,
-                               int error)
-{
-       struct nfs_open_context *ctx;
-
-       ctx = nfs_file_open_context(filp);
-       if (nfs_error_is_fatal_on_server(error) ||
-           nfs_ctx_key_to_expire(ctx, inode))
-               return 1;
-       return 0;
-}
-
 ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
@@ -645,7 +631,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        if (iocb->ki_flags & IOCB_APPEND || iocb->ki_pos > i_size_read(inode)) {
                result = nfs_revalidate_file_size(inode, file);
                if (result)
-                       goto out;
+                       return result;
        }
 
        nfs_clear_invalid_mapping(file->f_mapping);
@@ -664,6 +650,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
 
        written = result;
        iocb->ki_pos += written;
+       nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
 
        if (mntflags & NFS_MOUNT_WRITE_EAGER) {
                result = filemap_fdatawrite_range(file->f_mapping,
@@ -681,17 +668,22 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        }
        result = generic_write_sync(iocb, written);
        if (result < 0)
-               goto out;
+               return result;
 
+out:
        /* Return error values */
        error = filemap_check_wb_err(file->f_mapping, since);
-       if (nfs_need_check_write(file, inode, error)) {
-               int err = nfs_wb_all(inode);
-               if (err < 0)
-                       result = err;
+       switch (error) {
+       default:
+               break;
+       case -EDQUOT:
+       case -EFBIG:
+       case -ENOSPC:
+               nfs_wb_all(inode);
+               error = file_check_and_advance_wb_err(file);
+               if (error < 0)
+                       result = error;
        }
-       nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
-out:
        return result;
 
 out_swapfile:
index 76dedda..2b26615 100644 (file)
@@ -839,7 +839,12 @@ fl_pnfs_update_layout(struct inode *ino,
 
        lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
                                  gfp_flags);
-       if (IS_ERR_OR_NULL(lseg))
+       if (IS_ERR(lseg)) {
+               /* Fall back to MDS on recoverable errors */
+               if (!nfs_error_is_fatal_on_server(PTR_ERR(lseg)))
+                       lseg = NULL;
+               goto out;
+       } else if (!lseg)
                goto out;
 
        lo = NFS_I(ino)->layout;
index f73c09a..e861d7b 100644 (file)
@@ -231,11 +231,10 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
 {
        struct nfs_fscache_inode_auxdata auxdata;
        struct fscache_cookie *cookie = nfs_i_fscache(inode);
+       loff_t i_size = i_size_read(inode);
 
-       if (fscache_cookie_valid(cookie)) {
-               nfs_fscache_update_auxdata(&auxdata, inode);
-               fscache_unuse_cookie(cookie, &auxdata, NULL);
-       }
+       nfs_fscache_update_auxdata(&auxdata, inode);
+       fscache_unuse_cookie(cookie, &auxdata, &i_size);
 }
 
 /*
index 7eefa16..8f8cd6e 100644 (file)
@@ -841,6 +841,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
        case 0:
        case -ERESTARTSYS:
        case -EINTR:
+       case -ENOMEM:
                return false;
        }
        return nfs_error_is_fatal(err);
index 7b861e4..03d3a27 100644 (file)
@@ -328,7 +328,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
        char *read_name = NULL;
        int len, status = 0;
 
-       server = NFS_SERVER(ss_mnt->mnt_root->d_inode);
+       server = NFS_SB(ss_mnt->mnt_sb);
 
        if (!fattr)
                return ERR_PTR(-ENOMEM);
@@ -346,7 +346,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
                goto out;
        snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++);
 
-       r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, fattr);
+       r_ino = nfs_fhget(ss_mnt->mnt_sb, src_fh, fattr);
        if (IS_ERR(r_ino)) {
                res = ERR_CAST(r_ino);
                goto out_free_name;
index 3680c8d..f2dbf90 100644 (file)
@@ -417,6 +417,9 @@ static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
        fs_locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
        if (!fs_locations)
                goto out_free;
+       fs_locations->fattr = nfs_alloc_fattr();
+       if (!fs_locations->fattr)
+               goto out_free_2;
 
        /* Get locations */
        dentry = ctx->clone_data.dentry;
@@ -427,14 +430,16 @@ static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
        err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page);
        dput(parent);
        if (err != 0)
-               goto out_free_2;
+               goto out_free_3;
 
        err = -ENOENT;
        if (fs_locations->nlocations <= 0 ||
            fs_locations->fs_path.ncomponents <= 0)
-               goto out_free_2;
+               goto out_free_3;
 
        err = nfs_follow_referral(fc, fs_locations);
+out_free_3:
+       kfree(fs_locations->fattr);
 out_free_2:
        kfree(fs_locations);
 out_free:
index a79f664..c0fdcf8 100644 (file)
@@ -1162,7 +1162,7 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
 {
        unsigned short task_flags = 0;
 
-       if (server->nfs_client->cl_minorversion)
+       if (server->caps & NFS_CAP_MOVEABLE)
                task_flags = RPC_TASK_MOVEABLE;
        return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
 }
@@ -2568,7 +2568,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
        };
        int status;
 
-       if (server->nfs_client->cl_minorversion)
+       if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
                task_setup_data.flags |= RPC_TASK_MOVEABLE;
 
        kref_get(&data->kref);
@@ -3098,6 +3098,10 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
        }
 
 out:
+       if (opendata->lgp) {
+               nfs4_lgopen_release(opendata->lgp);
+               opendata->lgp = NULL;
+       }
        if (!opendata->cancelled)
                nfs4_sequence_free_slot(&opendata->o_res.seq_res);
        return ret;
@@ -3733,7 +3737,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
        };
        int status = -ENOMEM;
 
-       if (server->nfs_client->cl_minorversion)
+       if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
                task_setup_data.flags |= RPC_TASK_MOVEABLE;
 
        nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
@@ -4243,6 +4247,8 @@ static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
        if (locations == NULL)
                goto out;
 
+       locations->fattr = fattr;
+
        status = nfs4_proc_fs_locations(client, dir, name, locations, page);
        if (status != 0)
                goto out;
@@ -4252,17 +4258,14 @@ static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
         * referral.  Cause us to drop into the exception handler, which
         * will kick off migration recovery.
         */
-       if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
+       if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
                dprintk("%s: server did not return a different fsid for"
                        " a referral at %s\n", __func__, name->name);
                status = -NFS4ERR_MOVED;
                goto out;
        }
        /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
-       nfs_fixup_referral_attributes(&locations->fattr);
-
-       /* replace the lookup nfs_fattr with the locations nfs_fattr */
-       memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
+       nfs_fixup_referral_attributes(fattr);
        memset(fhandle, 0, sizeof(struct nfs_fh));
 out:
        if (page)
@@ -4404,7 +4407,7 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
        };
        unsigned short task_flags = 0;
 
-       if (server->nfs_client->cl_minorversion)
+       if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
                task_flags = RPC_TASK_MOVEABLE;
 
        /* Is this is an attribute revalidation, subject to softreval? */
@@ -5768,9 +5771,17 @@ static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
        return 0;
 }
 
-static inline int nfs4_server_supports_acls(struct nfs_server *server)
+static bool nfs4_server_supports_acls(const struct nfs_server *server,
+                                     enum nfs4_acl_type type)
 {
-       return server->caps & NFS_CAP_ACLS;
+       switch (type) {
+       default:
+               return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
+       case NFS4ACL_DACL:
+               return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
+       case NFS4ACL_SACL:
+               return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
+       }
 }
 
 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
@@ -5809,6 +5820,7 @@ unwind:
 }
 
 struct nfs4_cached_acl {
+       enum nfs4_acl_type type;
        int cached;
        size_t len;
        char data[];
@@ -5829,7 +5841,8 @@ static void nfs4_zap_acl_attr(struct inode *inode)
        nfs4_set_cached_acl(inode, NULL);
 }
 
-static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
+static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
+                                   size_t buflen, enum nfs4_acl_type type)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
        struct nfs4_cached_acl *acl;
@@ -5839,6 +5852,8 @@ static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_
        acl = nfsi->nfs4_acl;
        if (acl == NULL)
                goto out;
+       if (acl->type != type)
+               goto out;
        if (buf == NULL) /* user is just asking for length */
                goto out_len;
        if (acl->cached == 0)
@@ -5854,7 +5869,9 @@ out:
        return ret;
 }
 
-static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
+static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
+                                 size_t pgbase, size_t acl_len,
+                                 enum nfs4_acl_type type)
 {
        struct nfs4_cached_acl *acl;
        size_t buflen = sizeof(*acl) + acl_len;
@@ -5871,6 +5888,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
                        goto out;
                acl->cached = 0;
        }
+       acl->type = type;
        acl->len = acl_len;
 out:
        nfs4_set_cached_acl(inode, acl);
@@ -5886,14 +5904,17 @@ out:
  * length. The next getxattr call will then produce another round trip to
  * the server, this time with the input buf of the required size.
  */
-static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
+                                      size_t buflen, enum nfs4_acl_type type)
 {
        struct page **pages;
        struct nfs_getaclargs args = {
                .fh = NFS_FH(inode),
+               .acl_type = type,
                .acl_len = buflen,
        };
        struct nfs_getaclres res = {
+               .acl_type = type,
                .acl_len = buflen,
        };
        struct rpc_message msg = {
@@ -5943,7 +5964,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
                ret = -ERANGE;
                goto out_free;
        }
-       nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
+       nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
+                             type);
        if (buf) {
                if (res.acl_len > buflen) {
                        ret = -ERANGE;
@@ -5963,14 +5985,15 @@ out_free:
        return ret;
 }
 
-static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
+                                    size_t buflen, enum nfs4_acl_type type)
 {
        struct nfs4_exception exception = {
                .interruptible = true,
        };
        ssize_t ret;
        do {
-               ret = __nfs4_get_acl_uncached(inode, buf, buflen);
+               ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
                trace_nfs4_get_acl(inode, ret);
                if (ret >= 0)
                        break;
@@ -5979,34 +6002,37 @@ static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bufl
        return ret;
 }
 
-static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
+static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
+                                enum nfs4_acl_type type)
 {
        struct nfs_server *server = NFS_SERVER(inode);
        int ret;
 
-       if (!nfs4_server_supports_acls(server))
+       if (!nfs4_server_supports_acls(server, type))
                return -EOPNOTSUPP;
        ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
        if (ret < 0)
                return ret;
        if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
                nfs_zap_acl_cache(inode);
-       ret = nfs4_read_cached_acl(inode, buf, buflen);
+       ret = nfs4_read_cached_acl(inode, buf, buflen, type);
        if (ret != -ENOENT)
                /* -ENOENT is returned if there is no ACL or if there is an ACL
                 * but no cached acl data, just the acl length */
                return ret;
-       return nfs4_get_acl_uncached(inode, buf, buflen);
+       return nfs4_get_acl_uncached(inode, buf, buflen, type);
 }
 
-static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
+                              size_t buflen, enum nfs4_acl_type type)
 {
        struct nfs_server *server = NFS_SERVER(inode);
        struct page *pages[NFS4ACL_MAXPAGES];
        struct nfs_setaclargs arg = {
-               .fh             = NFS_FH(inode),
-               .acl_pages      = pages,
-               .acl_len        = buflen,
+               .fh = NFS_FH(inode),
+               .acl_type = type,
+               .acl_len = buflen,
+               .acl_pages = pages,
        };
        struct nfs_setaclres res;
        struct rpc_message msg = {
@@ -6020,7 +6046,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
        /* You can't remove system.nfs4_acl: */
        if (buflen == 0)
                return -EINVAL;
-       if (!nfs4_server_supports_acls(server))
+       if (!nfs4_server_supports_acls(server, type))
                return -EOPNOTSUPP;
        if (npages > ARRAY_SIZE(pages))
                return -ERANGE;
@@ -6051,12 +6077,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
        return ret;
 }
 
-static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
+                            size_t buflen, enum nfs4_acl_type type)
 {
        struct nfs4_exception exception = { };
        int err;
        do {
-               err = __nfs4_proc_set_acl(inode, buf, buflen);
+               err = __nfs4_proc_set_acl(inode, buf, buflen, type);
                trace_nfs4_set_acl(inode, err);
                if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
                        /*
@@ -6612,10 +6639,13 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
                .rpc_client = server->client,
                .rpc_message = &msg,
                .callback_ops = &nfs4_delegreturn_ops,
-               .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
+               .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
        };
        int status = 0;
 
+       if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
+               task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (data == NULL)
                return -ENOMEM;
@@ -6929,10 +6959,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
                .workqueue = nfsiod_workqueue,
                .flags = RPC_TASK_ASYNC,
        };
-       struct nfs_client *client =
-               NFS_SERVER(lsp->ls_state->inode)->nfs_client;
 
-       if (client->cl_minorversion)
+       if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
                task_setup_data.flags |= RPC_TASK_MOVEABLE;
 
        nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
@@ -7203,9 +7231,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
                .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
        };
        int ret;
-       struct nfs_client *client = NFS_SERVER(state->inode)->nfs_client;
 
-       if (client->cl_minorversion)
+       if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
                task_setup_data.flags |= RPC_TASK_MOVEABLE;
 
        data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
@@ -7655,21 +7682,70 @@ static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
                                   const char *key, const void *buf,
                                   size_t buflen, int flags)
 {
-       return nfs4_proc_set_acl(inode, buf, buflen);
+       return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
 }
 
 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
                                   struct dentry *unused, struct inode *inode,
                                   const char *key, void *buf, size_t buflen)
 {
-       return nfs4_proc_get_acl(inode, buf, buflen);
+       return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
 }
 
 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
 {
-       return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
+       return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+#define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
+
+static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
+                                   struct user_namespace *mnt_userns,
+                                   struct dentry *unused, struct inode *inode,
+                                   const char *key, const void *buf,
+                                   size_t buflen, int flags)
+{
+       return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
+}
+
+static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
+                                   struct dentry *unused, struct inode *inode,
+                                   const char *key, void *buf, size_t buflen)
+{
+       return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
+}
+
+static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
+{
+       return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
+}
+
+#define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
+
+static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
+                                   struct user_namespace *mnt_userns,
+                                   struct dentry *unused, struct inode *inode,
+                                   const char *key, const void *buf,
+                                   size_t buflen, int flags)
+{
+       return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
+}
+
+static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
+                                   struct dentry *unused, struct inode *inode,
+                                   const char *key, void *buf, size_t buflen)
+{
+       return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
 }
 
+static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
+{
+       return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
+}
+
+#endif
+
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
 
 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
@@ -7902,7 +7978,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
        else
                bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
 
-       nfs_fattr_init(&fs_locations->fattr);
+       nfs_fattr_init(fs_locations->fattr);
        fs_locations->server = server;
        fs_locations->nlocations = 0;
        status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
@@ -7967,7 +8043,7 @@ static int _nfs40_proc_get_locations(struct nfs_server *server,
        unsigned long now = jiffies;
        int status;
 
-       nfs_fattr_init(&locations->fattr);
+       nfs_fattr_init(locations->fattr);
        locations->server = server;
        locations->nlocations = 0;
 
@@ -8032,7 +8108,7 @@ static int _nfs41_proc_get_locations(struct nfs_server *server,
        };
        int status;
 
-       nfs_fattr_init(&locations->fattr);
+       nfs_fattr_init(locations->fattr);
        locations->server = server;
        locations->nlocations = 0;
 
@@ -10391,7 +10467,8 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
                | NFS_CAP_POSIX_LOCK
                | NFS_CAP_STATEID_NFSV41
                | NFS_CAP_ATOMIC_OPEN_V1
-               | NFS_CAP_LGOPEN,
+               | NFS_CAP_LGOPEN
+               | NFS_CAP_MOVEABLE,
        .init_client = nfs41_init_client,
        .shutdown_client = nfs41_shutdown_client,
        .match_stateid = nfs41_match_stateid,
@@ -10426,7 +10503,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
                | NFS_CAP_LAYOUTSTATS
                | NFS_CAP_CLONE
                | NFS_CAP_LAYOUTERROR
-               | NFS_CAP_READ_PLUS,
+               | NFS_CAP_READ_PLUS
+               | NFS_CAP_MOVEABLE,
        .init_client = nfs41_init_client,
        .shutdown_client = nfs41_shutdown_client,
        .match_stateid = nfs41_match_stateid,
@@ -10587,6 +10665,22 @@ static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
        .set    = nfs4_xattr_set_nfs4_acl,
 };
 
+#if defined(CONFIG_NFS_V4_1)
+static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
+       .name   = XATTR_NAME_NFSV4_DACL,
+       .list   = nfs4_xattr_list_nfs4_dacl,
+       .get    = nfs4_xattr_get_nfs4_dacl,
+       .set    = nfs4_xattr_set_nfs4_dacl,
+};
+
+static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
+       .name   = XATTR_NAME_NFSV4_SACL,
+       .list   = nfs4_xattr_list_nfs4_sacl,
+       .get    = nfs4_xattr_get_nfs4_sacl,
+       .set    = nfs4_xattr_set_nfs4_sacl,
+};
+#endif
+
 #ifdef CONFIG_NFS_V4_2
 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
        .prefix = XATTR_USER_PREFIX,
@@ -10597,6 +10691,10 @@ static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
 
 const struct xattr_handler *nfs4_xattr_handlers[] = {
        &nfs4_xattr_nfs4_acl_handler,
+#if defined(CONFIG_NFS_V4_1)
+       &nfs4_xattr_nfs4_dacl_handler,
+       &nfs4_xattr_nfs4_sacl_handler,
+#endif
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
        &nfs4_xattr_nfs4_label_handler,
 #endif
index 9e1c987..2540b35 100644 (file)
@@ -1602,7 +1602,8 @@ static inline void nfs42_complete_copies(struct nfs4_state_owner *sp,
 #endif /* CONFIG_NFS_V4_2 */
 
 static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_state *state,
-                                    const struct nfs4_state_recovery_ops *ops)
+                                    const struct nfs4_state_recovery_ops *ops,
+                                    int *lost_locks)
 {
        struct nfs4_lock_state *lock;
        int status;
@@ -1620,7 +1621,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
                list_for_each_entry(lock, &state->lock_states, ls_locks) {
                        trace_nfs4_state_lock_reclaim(state, lock);
                        if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
-                               pr_warn_ratelimited("NFS: %s: Lock reclaim failed!\n", __func__);
+                               *lost_locks += 1;
                }
                spin_unlock(&state->state_lock);
        }
@@ -1630,7 +1631,9 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
        return status;
 }
 
-static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
+static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp,
+                                  const struct nfs4_state_recovery_ops *ops,
+                                  int *lost_locks)
 {
        struct nfs4_state *state;
        unsigned int loop = 0;
@@ -1666,7 +1669,7 @@ restart:
 #endif /* CONFIG_NFS_V4_2 */
                refcount_inc(&state->count);
                spin_unlock(&sp->so_lock);
-               status = __nfs4_reclaim_open_state(sp, state, ops);
+               status = __nfs4_reclaim_open_state(sp, state, ops, lost_locks);
 
                switch (status) {
                default:
@@ -1909,6 +1912,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
        struct rb_node *pos;
        LIST_HEAD(freeme);
        int status = 0;
+       int lost_locks = 0;
 
 restart:
        rcu_read_lock();
@@ -1928,8 +1932,11 @@ restart:
                        spin_unlock(&clp->cl_lock);
                        rcu_read_unlock();
 
-                       status = nfs4_reclaim_open_state(sp, ops);
+                       status = nfs4_reclaim_open_state(sp, ops, &lost_locks);
                        if (status < 0) {
+                               if (lost_locks)
+                                       pr_warn("NFS: %s: lost %d locks\n",
+                                               clp->cl_hostname, lost_locks);
                                set_bit(ops->owner_flag_bit, &sp->so_flags);
                                nfs4_put_state_owner(sp);
                                status = nfs4_recovery_handle_error(clp, status);
@@ -1943,6 +1950,9 @@ restart:
        }
        rcu_read_unlock();
        nfs4_free_state_owners(&freeme);
+       if (lost_locks)
+               pr_warn("NFS: %s: lost %d locks\n",
+                       clp->cl_hostname, lost_locks);
        return 0;
 }
 
@@ -2106,6 +2116,11 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
                dprintk("<-- %s: no memory\n", __func__);
                goto out;
        }
+       locations->fattr = nfs_alloc_fattr();
+       if (locations->fattr == NULL) {
+               dprintk("<-- %s: no memory\n", __func__);
+               goto out;
+       }
 
        inode = d_inode(server->super->s_root);
        result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
@@ -2120,7 +2135,7 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
        if (!locations->nlocations)
                goto out;
 
-       if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
+       if (!(locations->fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
                dprintk("<-- %s: No fs_locations data, migration skipped\n",
                        __func__);
                goto out;
@@ -2145,6 +2160,8 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
 out:
        if (page != NULL)
                __free_page(page);
+       if (locations != NULL)
+               kfree(locations->fattr);
        kfree(locations);
        if (result) {
                pr_err("NFS: migration recovery failed (server %s)\n",
index 86a5f65..acfe5f4 100644 (file)
@@ -1680,19 +1680,35 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
        encode_op_hdr(xdr, OP_RESTOREFH, decode_restorefh_maxsz, hdr);
 }
 
-static void
-encode_setacl(struct xdr_stream *xdr, const struct nfs_setaclargs *arg,
-               struct compound_hdr *hdr)
+static void nfs4_acltype_to_bitmap(enum nfs4_acl_type type, __u32 bitmap[2])
 {
-       __be32 *p;
+       switch (type) {
+       default:
+               bitmap[0] = FATTR4_WORD0_ACL;
+               bitmap[1] = 0;
+               break;
+       case NFS4ACL_DACL:
+               bitmap[0] = 0;
+               bitmap[1] = FATTR4_WORD1_DACL;
+               break;
+       case NFS4ACL_SACL:
+               bitmap[0] = 0;
+               bitmap[1] = FATTR4_WORD1_SACL;
+       }
+}
+
+static void encode_setacl(struct xdr_stream *xdr,
+                         const struct nfs_setaclargs *arg,
+                         struct compound_hdr *hdr)
+{
+       __u32 bitmap[2];
+
+       nfs4_acltype_to_bitmap(arg->acl_type, bitmap);
 
        encode_op_hdr(xdr, OP_SETATTR, decode_setacl_maxsz, hdr);
        encode_nfs4_stateid(xdr, &zero_stateid);
-       p = reserve_space(xdr, 2*4);
-       *p++ = cpu_to_be32(1);
-       *p = cpu_to_be32(FATTR4_WORD0_ACL);
-       p = reserve_space(xdr, 4);
-       *p = cpu_to_be32(arg->acl_len);
+       xdr_encode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap));
+       encode_uint32(xdr, arg->acl_len);
        xdr_write_pages(xdr, arg->acl_pages, 0, arg->acl_len);
 }
 
@@ -2587,11 +2603,11 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
        struct compound_hdr hdr = {
                .minorversion = nfs4_xdr_minorversion(&args->seq_args),
        };
-       const __u32 nfs4_acl_bitmap[1] = {
-               [0] = FATTR4_WORD0_ACL,
-       };
+       __u32 nfs4_acl_bitmap[2];
        uint32_t replen;
 
+       nfs4_acltype_to_bitmap(args->acl_type, nfs4_acl_bitmap);
+
        encode_compound_hdr(xdr, req, &hdr);
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fh, &hdr);
@@ -5386,7 +5402,7 @@ decode_restorefh(struct xdr_stream *xdr)
 }
 
 static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
-                        struct nfs_getaclres *res)
+                        struct nfs_getaclres *res, enum nfs4_acl_type type)
 {
        unsigned int savep;
        uint32_t attrlen,
@@ -5404,26 +5420,39 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
        if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
                goto out;
 
-       if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
-               return -EIO;
-       if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
-
-               /* The bitmap (xdr len + bitmaps) and the attr xdr len words
-                * are stored with the acl data to handle the problem of
-                * variable length bitmaps.*/
-               res->acl_data_offset = xdr_page_pos(xdr);
-               res->acl_len = attrlen;
-
-               /* Check for receive buffer overflow */
-               if (res->acl_len > xdr_stream_remaining(xdr) ||
-                   res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
-                       res->acl_flags |= NFS4_ACL_TRUNC;
-                       dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
-                               attrlen, xdr_stream_remaining(xdr));
-               }
-       } else
-               status = -EOPNOTSUPP;
+       switch (type) {
+       default:
+               if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
+                       return -EIO;
+               if (!(bitmap[0] & FATTR4_WORD0_ACL))
+                       return -EOPNOTSUPP;
+               break;
+       case NFS4ACL_DACL:
+               if (unlikely(bitmap[0] || bitmap[1] & (FATTR4_WORD1_DACL - 1U)))
+                       return -EIO;
+               if (!(bitmap[1] & FATTR4_WORD1_DACL))
+                       return -EOPNOTSUPP;
+               break;
+       case NFS4ACL_SACL:
+               if (unlikely(bitmap[0] || bitmap[1] & (FATTR4_WORD1_SACL - 1U)))
+                       return -EIO;
+               if (!(bitmap[1] & FATTR4_WORD1_SACL))
+                       return -EOPNOTSUPP;
+       }
 
+       /* The bitmap (xdr len + bitmaps) and the attr xdr len words
+        * are stored with the acl data to handle the problem of
+        * variable length bitmaps.*/
+       res->acl_data_offset = xdr_page_pos(xdr);
+       res->acl_len = attrlen;
+
+       /* Check for receive buffer overflow */
+       if (res->acl_len > xdr_stream_remaining(xdr) ||
+           res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
+               res->acl_flags |= NFS4_ACL_TRUNC;
+               dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
+                       attrlen, xdr_stream_remaining(xdr));
+       }
 out:
        return status;
 }
@@ -6486,7 +6515,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_putfh(xdr);
        if (status)
                goto out;
-       status = decode_getacl(xdr, rqstp, res);
+       status = decode_getacl(xdr, rqstp, res, res->acl_type);
 
 out:
        return status;
@@ -7051,7 +7080,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
        if (res->migration) {
                xdr_enter_page(xdr, PAGE_SIZE);
                status = decode_getfattr_generic(xdr,
-                                       &res->fs_locations->fattr,
+                                       res->fs_locations->fattr,
                                         NULL, res->fs_locations,
                                         res->fs_locations->server);
                if (status)
@@ -7064,7 +7093,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
                        goto out;
                xdr_enter_page(xdr, PAGE_SIZE);
                status = decode_getfattr_generic(xdr,
-                                       &res->fs_locations->fattr,
+                                       res->fs_locations->fattr,
                                         NULL, res->fs_locations,
                                         res->fs_locations->server);
        }
index 9157dd1..317cedf 100644 (file)
@@ -767,6 +767,9 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
                .flags = RPC_TASK_ASYNC | flags,
        };
 
+       if (nfs_server_capable(hdr->inode, NFS_CAP_MOVEABLE))
+               task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
        hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 
        dprintk("NFS: initiated pgio call "
index 856c962..68a87be 100644 (file)
@@ -2000,6 +2000,7 @@ lookup_again:
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
        if (lo == NULL) {
                spin_unlock(&ino->i_lock);
+               lseg = ERR_PTR(-ENOMEM);
                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
                                 PNFS_UPDATE_LAYOUT_NOMEM);
                goto out;
@@ -2128,6 +2129,7 @@ lookup_again:
 
        lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
        if (!lgp) {
+               lseg = ERR_PTR(-ENOMEM);
                trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
                                         PNFS_UPDATE_LAYOUT_NOMEM);
                nfs_layoutget_end(lo);
index 6f325e1..9697cd5 100644 (file)
@@ -102,6 +102,10 @@ static void nfs_do_call_unlink(struct inode *inode, struct nfs_unlinkdata *data)
        };
        struct rpc_task *task;
        struct inode *dir = d_inode(data->dentry->d_parent);
+
+       if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
+               task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
        nfs_sb_active(dir->i_sb);
        data->args.fh = NFS_FH(dir);
        nfs_fattr_init(data->res.dir_attr);
@@ -344,6 +348,10 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
                .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
        };
 
+       if (nfs_server_capable(old_dir, NFS_CAP_MOVEABLE) &&
+           nfs_server_capable(new_dir, NFS_CAP_MOVEABLE))
+               task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (data == NULL)
                return ERR_PTR(-ENOMEM);
index f00d45c..1c70646 100644 (file)
@@ -603,8 +603,9 @@ static void nfs_write_error(struct nfs_page *req, int error)
  * Find an associated nfs write request, and prepare to flush it out
  * May return an error if the user signalled nfs_wait_on_request().
  */
-static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
-                               struct page *page)
+static int nfs_page_async_flush(struct page *page,
+                               struct writeback_control *wbc,
+                               struct nfs_pageio_descriptor *pgio)
 {
        struct nfs_page *req;
        int ret = 0;
@@ -630,11 +631,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
                /*
                 * Remove the problematic req upon fatal errors on the server
                 */
-               if (nfs_error_is_fatal(ret)) {
-                       if (nfs_error_is_fatal_on_server(ret))
-                               goto out_launder;
-               } else
-                       ret = -EAGAIN;
+               if (nfs_error_is_fatal_on_server(ret))
+                       goto out_launder;
+               if (wbc->sync_mode == WB_SYNC_NONE)
+                       ret = AOP_WRITEPAGE_ACTIVATE;
+               redirty_page_for_writepage(wbc, page);
                nfs_redirty_request(req);
                pgio->pg_error = 0;
        } else
@@ -650,15 +651,8 @@ out_launder:
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
                            struct nfs_pageio_descriptor *pgio)
 {
-       int ret;
-
        nfs_pageio_cond_complete(pgio, page_index(page));
-       ret = nfs_page_async_flush(pgio, page);
-       if (ret == -EAGAIN) {
-               redirty_page_for_writepage(wbc, page);
-               ret = AOP_WRITEPAGE_ACTIVATE;
-       }
-       return ret;
+       return nfs_page_async_flush(page, wbc, pgio);
 }
 
 /*
@@ -681,11 +675,7 @@ static int nfs_writepage_locked(struct page *page,
        err = nfs_do_writepage(page, wbc, &pgio);
        pgio.pg_error = 0;
        nfs_pageio_complete(&pgio);
-       if (err < 0)
-               return err;
-       if (nfs_error_is_fatal(pgio.pg_error))
-               return pgio.pg_error;
-       return 0;
+       return err;
 }
 
 int nfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -737,19 +727,19 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
                priority = wb_priority(wbc);
        }
 
-       nfs_pageio_init_write(&pgio, inode, priority, false,
-                               &nfs_async_write_completion_ops);
-       pgio.pg_io_completion = ioc;
-       err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
-       pgio.pg_error = 0;
-       nfs_pageio_complete(&pgio);
+       do {
+               nfs_pageio_init_write(&pgio, inode, priority, false,
+                                     &nfs_async_write_completion_ops);
+               pgio.pg_io_completion = ioc;
+               err = write_cache_pages(mapping, wbc, nfs_writepages_callback,
+                                       &pgio);
+               pgio.pg_error = 0;
+               nfs_pageio_complete(&pgio);
+       } while (err < 0 && !nfs_error_is_fatal(err));
        nfs_io_completion_put(ioc);
 
        if (err < 0)
                goto out_err;
-       err = pgio.pg_error;
-       if (nfs_error_is_fatal(err))
-               goto out_err;
        return 0;
 out_err:
        return err;
@@ -1444,7 +1434,7 @@ static void nfs_async_write_error(struct list_head *head, int error)
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               if (nfs_error_is_fatal(error))
+               if (nfs_error_is_fatal_on_server(error))
                        nfs_write_error(req, error);
                else
                        nfs_redirty_request(req);
@@ -1719,6 +1709,10 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
                .flags = RPC_TASK_ASYNC | flags,
                .priority = priority,
        };
+
+       if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE))
+               task_setup_data.flags |= RPC_TASK_MOVEABLE;
+
        /* Set up the initial task struct.  */
        nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
        trace_nfs_initiate_commit(data);
index a4fcdc7..8e9d2b3 100644 (file)
@@ -492,7 +492,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
 
        down_write(&ni->file.run_lock);
        err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
-                           &new_valid, true, NULL);
+                           &new_valid, ni->mi.sbi->options->prealloc, NULL);
        up_write(&ni->file.run_lock);
 
        if (new_valid < ni->i_valid)
@@ -659,7 +659,13 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                /*
                 * Normal file: Allocate clusters, do not change 'valid' size.
                 */
-               err = ntfs_set_size(inode, max(end, i_size));
+               loff_t new_size = max(end, i_size);
+
+               err = inode_newsize_ok(inode, new_size);
+               if (err)
+                       goto out;
+
+               err = ntfs_set_size(inode, new_size);
                if (err)
                        goto out;
 
@@ -759,7 +765,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                }
                inode_dio_wait(inode);
 
-               if (attr->ia_size < oldsize)
+               if (attr->ia_size <= oldsize)
                        err = ntfs_truncate(inode, attr->ia_size);
                else if (attr->ia_size > oldsize)
                        err = ntfs_extend(inode, attr->ia_size, 0, NULL);
index 6f47a9c..1884299 100644 (file)
@@ -1964,10 +1964,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
 
                vcn += clen;
 
-               if (vbo + bytes >= end) {
+               if (vbo + bytes >= end)
                        bytes = end - vbo;
-                       flags |= FIEMAP_EXTENT_LAST;
-               }
 
                if (vbo + bytes <= valid) {
                        ;
@@ -1977,6 +1975,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
                        /* vbo < valid && valid < vbo + bytes */
                        u64 dlen = valid - vbo;
 
+                       if (vbo + dlen >= end)
+                               flags |= FIEMAP_EXTENT_LAST;
+
                        err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
                                                      flags);
                        if (err < 0)
@@ -1995,6 +1996,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
                        flags |= FIEMAP_EXTENT_UNWRITTEN;
                }
 
+               if (vbo + bytes >= end)
+                       flags |= FIEMAP_EXTENT_LAST;
+
                err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
                if (err < 0)
                        break;
index 06492f0..49b7df6 100644 (file)
@@ -1185,8 +1185,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
        if (!r_page)
                return -ENOMEM;
 
-       memset(info, 0, sizeof(struct restart_info));
-
        /* Determine which restart area we are looking for. */
        if (first) {
                vbo = 0;
@@ -3791,10 +3789,11 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
        if (!log)
                return -ENOMEM;
 
+       memset(&rst_info, 0, sizeof(struct restart_info));
+
        log->ni = ni;
        log->l_size = l_size;
        log->one_page_buf = kmalloc(page_size, GFP_NOFS);
-
        if (!log->one_page_buf) {
                err = -ENOMEM;
                goto out;
@@ -3842,6 +3841,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
        if (rst_info.vbo)
                goto check_restart_area;
 
+       memset(&rst_info2, 0, sizeof(struct restart_info));
        err = log_read_rst(log, l_size, false, &rst_info2);
 
        /* Determine which restart area to use. */
@@ -4085,8 +4085,10 @@ process_log:
                if (client == LFS_NO_CLIENT_LE) {
                        /* Insert "NTFS" client LogFile. */
                        client = ra->client_idx[0];
-                       if (client == LFS_NO_CLIENT_LE)
-                               return -EINVAL;
+                       if (client == LFS_NO_CLIENT_LE) {
+                               err = -EINVAL;
+                               goto out;
+                       }
 
                        t16 = le16_to_cpu(client);
                        cr = ca + t16;
index 74f60c4..be4ebdd 100644 (file)
@@ -758,6 +758,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
        loff_t vbo = iocb->ki_pos;
        loff_t end;
        int wr = iov_iter_rw(iter) & WRITE;
+       size_t iter_count = iov_iter_count(iter);
        loff_t valid;
        ssize_t ret;
 
@@ -771,10 +772,13 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                                 wr ? ntfs_get_block_direct_IO_W
                                    : ntfs_get_block_direct_IO_R);
 
-       if (ret <= 0)
+       if (ret > 0)
+               end = vbo + ret;
+       else if (wr && ret == -EIOCBQUEUED)
+               end = vbo + iter_count;
+       else
                goto out;
 
-       end = vbo + ret;
        valid = ni->i_valid;
        if (wr) {
                if (end > valid && !S_ISBLK(inode->i_mode)) {
@@ -1950,6 +1954,7 @@ const struct address_space_operations ntfs_aops = {
        .direct_IO      = ntfs_direct_IO,
        .bmap           = ntfs_bmap,
        .dirty_folio    = block_dirty_folio,
+       .invalidate_folio = block_invalidate_folio,
 };
 
 const struct address_space_operations ntfs_aops_cmpr = {
index afd0dda..5e0e028 100644 (file)
@@ -112,7 +112,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
                return -ENOMEM;
 
        if (!size) {
-               ;
+               /* EA info persists, but xattr is empty. Looks like EA problem. */
        } else if (attr_ea->non_res) {
                struct runs_tree run;
 
@@ -259,7 +259,7 @@ out:
 
 static noinline int ntfs_set_ea(struct inode *inode, const char *name,
                                size_t name_len, const void *value,
-                               size_t val_size, int flags)
+                               size_t val_size, int flags, bool locked)
 {
        struct ntfs_inode *ni = ntfs_i(inode);
        struct ntfs_sb_info *sbi = ni->mi.sbi;
@@ -278,7 +278,8 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
        u64 new_sz;
        void *p;
 
-       ni_lock(ni);
+       if (!locked)
+               ni_lock(ni);
 
        run_init(&ea_run);
 
@@ -467,7 +468,8 @@ update_ea:
        mark_inode_dirty(&ni->vfs_inode);
 
 out:
-       ni_unlock(ni);
+       if (!locked)
+               ni_unlock(ni);
 
        run_close(&ea_run);
        kfree(ea_all);
@@ -541,7 +543,7 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
 
 static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
                                    struct inode *inode, struct posix_acl *acl,
-                                   int type)
+                                   int type, bool init_acl)
 {
        const char *name;
        size_t size, name_len;
@@ -554,8 +556,9 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
 
        switch (type) {
        case ACL_TYPE_ACCESS:
-               if (acl) {
-                       umode_t mode = inode->i_mode;
+               /* Do not change i_mode if we are in init_acl */
+               if (acl && !init_acl) {
+                       umode_t mode;
 
                        err = posix_acl_update_mode(mnt_userns, inode, &mode,
                                                    &acl);
@@ -598,7 +601,7 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
                flags = 0;
        }
 
-       err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
        if (err == -ENODATA && !size)
                err = 0; /* Removing non existed xattr. */
        if (!err)
@@ -616,7 +619,68 @@ out:
 int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
                 struct posix_acl *acl, int type)
 {
-       return ntfs_set_acl_ex(mnt_userns, inode, acl, type);
+       return ntfs_set_acl_ex(mnt_userns, inode, acl, type, false);
+}
+
+static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
+                             struct inode *inode, int type, void *buffer,
+                             size_t size)
+{
+       struct posix_acl *acl;
+       int err;
+
+       if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
+               ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
+               return -EOPNOTSUPP;
+       }
+
+       acl = ntfs_get_acl(inode, type, false);
+       if (IS_ERR(acl))
+               return PTR_ERR(acl);
+
+       if (!acl)
+               return -ENODATA;
+
+       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
+       posix_acl_release(acl);
+
+       return err;
+}
+
+static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
+                             struct inode *inode, int type, const void *value,
+                             size_t size)
+{
+       struct posix_acl *acl;
+       int err;
+
+       if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
+               ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
+               return -EOPNOTSUPP;
+       }
+
+       if (!inode_owner_or_capable(mnt_userns, inode))
+               return -EPERM;
+
+       if (!value) {
+               acl = NULL;
+       } else {
+               acl = posix_acl_from_xattr(mnt_userns, value, size);
+               if (IS_ERR(acl))
+                       return PTR_ERR(acl);
+
+               if (acl) {
+                       err = posix_acl_valid(mnt_userns, acl);
+                       if (err)
+                               goto release_and_out;
+               }
+       }
+
+       err = ntfs_set_acl(mnt_userns, inode, acl, type);
+
+release_and_out:
+       posix_acl_release(acl);
+       return err;
 }
 
 /*
@@ -636,7 +700,7 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
 
        if (default_acl) {
                err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
-                                     ACL_TYPE_DEFAULT);
+                                     ACL_TYPE_DEFAULT, true);
                posix_acl_release(default_acl);
        } else {
                inode->i_default_acl = NULL;
@@ -647,7 +711,7 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
        else {
                if (!err)
                        err = ntfs_set_acl_ex(mnt_userns, inode, acl,
-                                             ACL_TYPE_ACCESS);
+                                             ACL_TYPE_ACCESS, true);
                posix_acl_release(acl);
        }
 
@@ -785,6 +849,23 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
                goto out;
        }
 
+#ifdef CONFIG_NTFS3_FS_POSIX_ACL
+       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
+            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
+                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
+           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
+            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
+                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
+               /* TODO: init_user_ns? */
+               err = ntfs_xattr_get_acl(
+                       &init_user_ns, inode,
+                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
+                               ? ACL_TYPE_ACCESS
+                               : ACL_TYPE_DEFAULT,
+                       buffer, size);
+               goto out;
+       }
+#endif
        /* Deal with NTFS extended attribute. */
        err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
 
@@ -897,10 +978,29 @@ set_new_fa:
                goto out;
        }
 
+#ifdef CONFIG_NTFS3_FS_POSIX_ACL
+       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
+            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
+                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
+           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
+            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
+                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
+               err = ntfs_xattr_set_acl(
+                       mnt_userns, inode,
+                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
+                               ? ACL_TYPE_ACCESS
+                               : ACL_TYPE_DEFAULT,
+                       value, size);
+               goto out;
+       }
+#endif
        /* Deal with NTFS extended attribute. */
-       err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
 
 out:
+       inode->i_ctime = current_time(inode);
+       mark_inode_dirty(inode);
+
        return err;
 }
 
@@ -913,35 +1013,37 @@ int ntfs_save_wsl_perm(struct inode *inode)
 {
        int err;
        __le32 value;
+       struct ntfs_inode *ni = ntfs_i(inode);
 
-       /* TODO: refactor this, so we don't lock 4 times in ntfs_set_ea */
+       ni_lock(ni);
        value = cpu_to_le32(i_uid_read(inode));
        err = ntfs_set_ea(inode, "$LXUID", sizeof("$LXUID") - 1, &value,
-                         sizeof(value), 0);
+                         sizeof(value), 0, true); /* true == already locked. */
        if (err)
                goto out;
 
        value = cpu_to_le32(i_gid_read(inode));
        err = ntfs_set_ea(inode, "$LXGID", sizeof("$LXGID") - 1, &value,
-                         sizeof(value), 0);
+                         sizeof(value), 0, true);
        if (err)
                goto out;
 
        value = cpu_to_le32(inode->i_mode);
        err = ntfs_set_ea(inode, "$LXMOD", sizeof("$LXMOD") - 1, &value,
-                         sizeof(value), 0);
+                         sizeof(value), 0, true);
        if (err)
                goto out;
 
        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
                value = cpu_to_le32(inode->i_rdev);
                err = ntfs_set_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, &value,
-                                 sizeof(value), 0);
+                                 sizeof(value), 0, true);
                if (err)
                        goto out;
        }
 
 out:
+       ni_unlock(ni);
        /* In case of error should we delete all WSL xattr? */
        return err;
 }
index be849dc..1d57fbd 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -224,6 +224,21 @@ SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length)
 }
 #endif /* BITS_PER_LONG == 32 */
 
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_TRUNCATE64)
+COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, pathname,
+                      compat_arg_u64_dual(length))
+{
+       return ksys_truncate(pathname, compat_arg_u64_glue(length));
+}
+#endif
+
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FTRUNCATE64)
+COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd,
+                      compat_arg_u64_dual(length))
+{
+       return ksys_ftruncate(fd, compat_arg_u64_glue(length));
+}
+#endif
 
 int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 {
@@ -339,6 +354,15 @@ SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
        return ksys_fallocate(fd, mode, offset, len);
 }
 
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FALLOCATE)
+COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, compat_arg_u64_dual(offset),
+                      compat_arg_u64_dual(len))
+{
+       return ksys_fallocate(fd, mode, compat_arg_u64_glue(offset),
+                             compat_arg_u64_glue(len));
+}
+#endif
+
 /*
  * access() needs to use the real uid/gid, not the effective uid/gid.
  * We do this by temporarily clearing all FS-related capabilities and
index e040970..714ec56 100644 (file)
@@ -44,9 +44,9 @@ static bool ovl_must_copy_xattr(const char *name)
               !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN);
 }
 
-int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
-                  struct dentry *new)
+int ovl_copy_xattr(struct super_block *sb, struct path *oldpath, struct dentry *new)
 {
+       struct dentry *old = oldpath->dentry;
        ssize_t list_size, size, value_size = 0;
        char *buf, *name, *value = NULL;
        int error = 0;
@@ -94,9 +94,9 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
                        continue; /* Discard */
                }
 retry:
-               size = vfs_getxattr(&init_user_ns, old, name, value, value_size);
+               size = ovl_do_getxattr(oldpath, name, value, value_size);
                if (size == -ERANGE)
-                       size = vfs_getxattr(&init_user_ns, old, name, NULL, 0);
+                       size = ovl_do_getxattr(oldpath, name, NULL, 0);
 
                if (size < 0) {
                        error = size;
@@ -117,7 +117,7 @@ retry:
                        goto retry;
                }
 
-               error = vfs_setxattr(&init_user_ns, new, name, value, size, 0);
+               error = ovl_do_setxattr(OVL_FS(sb), new, name, value, size, 0);
                if (error) {
                        if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
                                break;
@@ -292,17 +292,19 @@ out_fput:
        return error;
 }
 
-static int ovl_set_size(struct dentry *upperdentry, struct kstat *stat)
+static int ovl_set_size(struct ovl_fs *ofs,
+                       struct dentry *upperdentry, struct kstat *stat)
 {
        struct iattr attr = {
                .ia_valid = ATTR_SIZE,
                .ia_size = stat->size,
        };
 
-       return notify_change(&init_user_ns, upperdentry, &attr, NULL);
+       return ovl_do_notify_change(ofs, upperdentry, &attr);
 }
 
-static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
+static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry,
+                             struct kstat *stat)
 {
        struct iattr attr = {
                .ia_valid =
@@ -311,10 +313,11 @@ static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
                .ia_mtime = stat->mtime,
        };
 
-       return notify_change(&init_user_ns, upperdentry, &attr, NULL);
+       return ovl_do_notify_change(ofs, upperdentry, &attr);
 }
 
-int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
+int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
+                struct kstat *stat)
 {
        int err = 0;
 
@@ -323,7 +326,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
                        .ia_valid = ATTR_MODE,
                        .ia_mode = stat->mode,
                };
-               err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
+               err = ovl_do_notify_change(ofs, upperdentry, &attr);
        }
        if (!err) {
                struct iattr attr = {
@@ -331,10 +334,10 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
                        .ia_uid = stat->uid,
                        .ia_gid = stat->gid,
                };
-               err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
+               err = ovl_do_notify_change(ofs, upperdentry, &attr);
        }
        if (!err)
-               ovl_set_timestamps(upperdentry, stat);
+               ovl_set_timestamps(ofs, upperdentry, stat);
 
        return err;
 }
@@ -433,7 +436,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
        if (IS_ERR(fh))
                return PTR_ERR(fh);
 
-       err = ovl_do_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len);
+       err = ovl_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len);
 
        kfree(fh);
        return err;
@@ -474,7 +477,7 @@ static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
        if (err)
                return err;
 
-       temp = ovl_create_temp(indexdir, OVL_CATTR(S_IFDIR | 0));
+       temp = ovl_create_temp(ofs, indexdir, OVL_CATTR(S_IFDIR | 0));
        err = PTR_ERR(temp);
        if (IS_ERR(temp))
                goto free_name;
@@ -483,16 +486,16 @@ static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
        if (err)
                goto out;
 
-       index = lookup_one_len(name.name, indexdir, name.len);
+       index = ovl_lookup_upper(ofs, name.name, indexdir, name.len);
        if (IS_ERR(index)) {
                err = PTR_ERR(index);
        } else {
-               err = ovl_do_rename(dir, temp, dir, index, 0);
+               err = ovl_do_rename(ofs, dir, temp, dir, index, 0);
                dput(index);
        }
 out:
        if (err)
-               ovl_cleanup(dir, temp);
+               ovl_cleanup(ofs, dir, temp);
        dput(temp);
 free_name:
        kfree(name.name);
@@ -519,6 +522,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
        int err;
        struct dentry *upper;
        struct dentry *upperdir = ovl_dentry_upper(c->parent);
+       struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
        struct inode *udir = d_inode(upperdir);
 
        /* Mark parent "impure" because it may now contain non-pure upper */
@@ -531,16 +535,16 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
                return err;
 
        inode_lock_nested(udir, I_MUTEX_PARENT);
-       upper = lookup_one_len(c->dentry->d_name.name, upperdir,
-                              c->dentry->d_name.len);
+       upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir,
+                                c->dentry->d_name.len);
        err = PTR_ERR(upper);
        if (!IS_ERR(upper)) {
-               err = ovl_do_link(ovl_dentry_upper(c->dentry), udir, upper);
+               err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
                dput(upper);
 
                if (!err) {
                        /* Restore timestamps on parent (best effort) */
-                       ovl_set_timestamps(upperdir, &c->pstat);
+                       ovl_set_timestamps(ofs, upperdir, &c->pstat);
                        ovl_dentry_set_upper_alias(c->dentry);
                }
        }
@@ -578,7 +582,7 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
                        return err;
        }
 
-       err = ovl_copy_xattr(c->dentry->d_sb, c->lowerpath.dentry, temp);
+       err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp);
        if (err)
                return err;
 
@@ -614,9 +618,9 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
 
        inode_lock(temp->d_inode);
        if (S_ISREG(c->stat.mode))
-               err = ovl_set_size(temp, &c->stat);
+               err = ovl_set_size(ofs, temp, &c->stat);
        if (!err)
-               err = ovl_set_attr(temp, &c->stat);
+               err = ovl_set_attr(ofs, temp, &c->stat);
        inode_unlock(temp->d_inode);
 
        return err;
@@ -656,6 +660,7 @@ static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
  */
 static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
 {
+       struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
        struct inode *inode;
        struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
        struct dentry *temp, *upper;
@@ -677,7 +682,7 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
        if (err)
                goto unlock;
 
-       temp = ovl_create_temp(c->workdir, &cattr);
+       temp = ovl_create_temp(ofs, c->workdir, &cattr);
        ovl_revert_cu_creds(&cc);
 
        err = PTR_ERR(temp);
@@ -694,12 +699,13 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
                        goto cleanup;
        }
 
-       upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+       upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
+                                c->destname.len);
        err = PTR_ERR(upper);
        if (IS_ERR(upper))
                goto cleanup;
 
-       err = ovl_do_rename(wdir, temp, udir, upper, 0);
+       err = ovl_do_rename(ofs, wdir, temp, udir, upper, 0);
        dput(upper);
        if (err)
                goto cleanup;
@@ -716,7 +722,7 @@ unlock:
        return err;
 
 cleanup:
-       ovl_cleanup(wdir, temp);
+       ovl_cleanup(ofs, wdir, temp);
        dput(temp);
        goto unlock;
 }
@@ -724,6 +730,7 @@ cleanup:
 /* Copyup using O_TMPFILE which does not require cross dir locking */
 static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
 {
+       struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
        struct inode *udir = d_inode(c->destdir);
        struct dentry *temp, *upper;
        struct ovl_cu_creds cc;
@@ -733,7 +740,7 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
        if (err)
                return err;
 
-       temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
+       temp = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode);
        ovl_revert_cu_creds(&cc);
 
        if (IS_ERR(temp))
@@ -745,10 +752,11 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
 
        inode_lock_nested(udir, I_MUTEX_PARENT);
 
-       upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+       upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir,
+                                c->destname.len);
        err = PTR_ERR(upper);
        if (!IS_ERR(upper)) {
-               err = ovl_do_link(temp, udir, upper);
+               err = ovl_do_link(ofs, temp, udir, upper);
                dput(upper);
        }
        inode_unlock(udir);
@@ -836,7 +844,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
 
                /* Restore timestamps on parent (best effort) */
                inode_lock(udir);
-               ovl_set_timestamps(c->destdir, &c->pstat);
+               ovl_set_timestamps(ofs, c->destdir, &c->pstat);
                inode_unlock(udir);
 
                ovl_dentry_set_upper_alias(c->dentry);
@@ -865,12 +873,12 @@ static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode,
        return true;
 }
 
-static ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value)
+static ssize_t ovl_getxattr_value(struct path *path, char *name, char **value)
 {
        ssize_t res;
        char *buf;
 
-       res = vfs_getxattr(&init_user_ns, dentry, name, NULL, 0);
+       res = ovl_do_getxattr(path, name, NULL, 0);
        if (res == -ENODATA || res == -EOPNOTSUPP)
                res = 0;
 
@@ -879,7 +887,7 @@ static ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value)
                if (!buf)
                        return -ENOMEM;
 
-               res = vfs_getxattr(&init_user_ns, dentry, name, buf, res);
+               res = ovl_do_getxattr(path, name, buf, res);
                if (res < 0)
                        kfree(buf);
                else
@@ -906,8 +914,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
                return -EIO;
 
        if (c->stat.size) {
-               err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
-                                             &capability);
+               err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS,
+                                                   &capability);
                if (cap_size < 0)
                        goto out;
        }
@@ -921,14 +929,14 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
         * don't want that to happen for normal copy-up operation.
         */
        if (capability) {
-               err = vfs_setxattr(&init_user_ns, upperpath.dentry,
-                                  XATTR_NAME_CAPS, capability, cap_size, 0);
+               err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS,
+                                     capability, cap_size, 0);
                if (err)
                        goto out_free;
        }
 
 
-       err = ovl_do_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY);
+       err = ovl_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY);
        if (err)
                goto out_free;
 
index f184908..6b03457 100644 (file)
@@ -23,15 +23,15 @@ MODULE_PARM_DESC(redirect_max,
 
 static int ovl_set_redirect(struct dentry *dentry, bool samedir);
 
-int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
+int ovl_cleanup(struct ovl_fs *ofs, struct inode *wdir, struct dentry *wdentry)
 {
        int err;
 
        dget(wdentry);
        if (d_is_dir(wdentry))
-               err = ovl_do_rmdir(wdir, wdentry);
+               err = ovl_do_rmdir(ofs, wdir, wdentry);
        else
-               err = ovl_do_unlink(wdir, wdentry);
+               err = ovl_do_unlink(ofs, wdir, wdentry);
        dput(wdentry);
 
        if (err) {
@@ -42,7 +42,7 @@ int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        return err;
 }
 
-struct dentry *ovl_lookup_temp(struct dentry *workdir)
+struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir)
 {
        struct dentry *temp;
        char name[20];
@@ -51,7 +51,7 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir)
        /* counter is allowed to wrap, since temp dentries are ephemeral */
        snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
 
-       temp = lookup_one_len(name, workdir, strlen(name));
+       temp = ovl_lookup_upper(ofs, name, workdir, strlen(name));
        if (!IS_ERR(temp) && temp->d_inode) {
                pr_err("workdir/%s already exists\n", name);
                dput(temp);
@@ -70,11 +70,11 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
        struct inode *wdir = workdir->d_inode;
 
        if (!ofs->whiteout) {
-               whiteout = ovl_lookup_temp(workdir);
+               whiteout = ovl_lookup_temp(ofs, workdir);
                if (IS_ERR(whiteout))
                        goto out;
 
-               err = ovl_do_whiteout(wdir, whiteout);
+               err = ovl_do_whiteout(ofs, wdir, whiteout);
                if (err) {
                        dput(whiteout);
                        whiteout = ERR_PTR(err);
@@ -84,11 +84,11 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
        }
 
        if (ofs->share_whiteout) {
-               whiteout = ovl_lookup_temp(workdir);
+               whiteout = ovl_lookup_temp(ofs, workdir);
                if (IS_ERR(whiteout))
                        goto out;
 
-               err = ovl_do_link(ofs->whiteout, wdir, whiteout);
+               err = ovl_do_link(ofs, ofs->whiteout, wdir, whiteout);
                if (!err)
                        goto out;
 
@@ -122,27 +122,28 @@ int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir,
        if (d_is_dir(dentry))
                flags = RENAME_EXCHANGE;
 
-       err = ovl_do_rename(wdir, whiteout, dir, dentry, flags);
+       err = ovl_do_rename(ofs, wdir, whiteout, dir, dentry, flags);
        if (err)
                goto kill_whiteout;
        if (flags)
-               ovl_cleanup(wdir, dentry);
+               ovl_cleanup(ofs, wdir, dentry);
 
 out:
        dput(whiteout);
        return err;
 
 kill_whiteout:
-       ovl_cleanup(wdir, whiteout);
+       ovl_cleanup(ofs, wdir, whiteout);
        goto out;
 }
 
-int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode)
+int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir,
+                  struct dentry **newdentry, umode_t mode)
 {
        int err;
        struct dentry *d, *dentry = *newdentry;
 
-       err = ovl_do_mkdir(dir, dentry, mode);
+       err = ovl_do_mkdir(ofs, dir, dentry, mode);
        if (err)
                return err;
 
@@ -154,8 +155,8 @@ int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode)
         * to it unhashed and negative. If that happens, try to
         * lookup a new hashed and positive dentry.
         */
-       d = lookup_one_len(dentry->d_name.name, dentry->d_parent,
-                          dentry->d_name.len);
+       d = ovl_lookup_upper(ofs, dentry->d_name.name, dentry->d_parent,
+                            dentry->d_name.len);
        if (IS_ERR(d)) {
                pr_warn("failed lookup after mkdir (%pd2, err=%i).\n",
                        dentry, err);
@@ -167,8 +168,8 @@ int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode)
        return 0;
 }
 
-struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
-                              struct ovl_cattr *attr)
+struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir,
+                              struct dentry *newdentry, struct ovl_cattr *attr)
 {
        int err;
 
@@ -180,28 +181,28 @@ struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
                goto out;
 
        if (attr->hardlink) {
-               err = ovl_do_link(attr->hardlink, dir, newdentry);
+               err = ovl_do_link(ofs, attr->hardlink, dir, newdentry);
        } else {
                switch (attr->mode & S_IFMT) {
                case S_IFREG:
-                       err = ovl_do_create(dir, newdentry, attr->mode);
+                       err = ovl_do_create(ofs, dir, newdentry, attr->mode);
                        break;
 
                case S_IFDIR:
                        /* mkdir is special... */
-                       err =  ovl_mkdir_real(dir, &newdentry, attr->mode);
+                       err =  ovl_mkdir_real(ofs, dir, &newdentry, attr->mode);
                        break;
 
                case S_IFCHR:
                case S_IFBLK:
                case S_IFIFO:
                case S_IFSOCK:
-                       err = ovl_do_mknod(dir, newdentry, attr->mode,
+                       err = ovl_do_mknod(ofs, dir, newdentry, attr->mode,
                                           attr->rdev);
                        break;
 
                case S_IFLNK:
-                       err = ovl_do_symlink(dir, newdentry, attr->link);
+                       err = ovl_do_symlink(ofs, dir, newdentry, attr->link);
                        break;
 
                default:
@@ -223,10 +224,11 @@ out:
        return newdentry;
 }
 
-struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr)
+struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir,
+                              struct ovl_cattr *attr)
 {
-       return ovl_create_real(d_inode(workdir), ovl_lookup_temp(workdir),
-                              attr);
+       return ovl_create_real(ofs, d_inode(workdir),
+                              ovl_lookup_temp(ofs, workdir), attr);
 }
 
 static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
@@ -330,10 +332,9 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
                attr->mode &= ~current_umask();
 
        inode_lock_nested(udir, I_MUTEX_PARENT);
-       newdentry = ovl_create_real(udir,
-                                   lookup_one_len(dentry->d_name.name,
-                                                  upperdir,
-                                                  dentry->d_name.len),
+       newdentry = ovl_create_real(ofs, udir,
+                                   ovl_lookup_upper(ofs, dentry->d_name.name,
+                                                    upperdir, dentry->d_name.len),
                                    attr);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
@@ -353,7 +354,7 @@ out_unlock:
        return err;
 
 out_cleanup:
-       ovl_cleanup(udir, newdentry);
+       ovl_cleanup(ofs, udir, newdentry);
        dput(newdentry);
        goto out_unlock;
 }
@@ -361,6 +362,7 @@ out_cleanup:
 static struct dentry *ovl_clear_empty(struct dentry *dentry,
                                      struct list_head *list)
 {
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
        struct dentry *workdir = ovl_workdir(dentry);
        struct inode *wdir = workdir->d_inode;
        struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
@@ -391,12 +393,12 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        if (upper->d_parent->d_inode != udir)
                goto out_unlock;
 
-       opaquedir = ovl_create_temp(workdir, OVL_CATTR(stat.mode));
+       opaquedir = ovl_create_temp(ofs, workdir, OVL_CATTR(stat.mode));
        err = PTR_ERR(opaquedir);
        if (IS_ERR(opaquedir))
                goto out_unlock;
 
-       err = ovl_copy_xattr(dentry->d_sb, upper, opaquedir);
+       err = ovl_copy_xattr(dentry->d_sb, &upperpath, opaquedir);
        if (err)
                goto out_cleanup;
 
@@ -405,17 +407,17 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
                goto out_cleanup;
 
        inode_lock(opaquedir->d_inode);
-       err = ovl_set_attr(opaquedir, &stat);
+       err = ovl_set_attr(ofs, opaquedir, &stat);
        inode_unlock(opaquedir->d_inode);
        if (err)
                goto out_cleanup;
 
-       err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
+       err = ovl_do_rename(ofs, wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
        if (err)
                goto out_cleanup;
 
-       ovl_cleanup_whiteouts(upper, list);
-       ovl_cleanup(wdir, upper);
+       ovl_cleanup_whiteouts(ofs, upper, list);
+       ovl_cleanup(ofs, wdir, upper);
        unlock_rename(workdir, upperdir);
 
        /* dentry's upper doesn't match now, get rid of it */
@@ -424,7 +426,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        return opaquedir;
 
 out_cleanup:
-       ovl_cleanup(wdir, opaquedir);
+       ovl_cleanup(ofs, wdir, opaquedir);
        dput(opaquedir);
 out_unlock:
        unlock_rename(workdir, upperdir);
@@ -432,8 +434,8 @@ out:
        return ERR_PTR(err);
 }
 
-static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
-                            const struct posix_acl *acl)
+static int ovl_set_upper_acl(struct ovl_fs *ofs, struct dentry *upperdentry,
+                            const char *name, const struct posix_acl *acl)
 {
        void *buffer;
        size_t size;
@@ -451,7 +453,7 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
        if (err < 0)
                goto out_free;
 
-       err = vfs_setxattr(&init_user_ns, upperdentry, name, buffer, size, XATTR_CREATE);
+       err = ovl_do_setxattr(ofs, upperdentry, name, buffer, size, XATTR_CREATE);
 out_free:
        kfree(buffer);
        return err;
@@ -460,6 +462,7 @@ out_free:
 static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                                    struct ovl_cattr *cattr)
 {
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
        struct dentry *workdir = ovl_workdir(dentry);
        struct inode *wdir = workdir->d_inode;
        struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
@@ -484,8 +487,8 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        if (err)
                goto out;
 
-       upper = lookup_one_len(dentry->d_name.name, upperdir,
-                              dentry->d_name.len);
+       upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir,
+                                dentry->d_name.len);
        err = PTR_ERR(upper);
        if (IS_ERR(upper))
                goto out_unlock;
@@ -494,7 +497,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
                goto out_dput;
 
-       newdentry = ovl_create_temp(workdir, cattr);
+       newdentry = ovl_create_temp(ofs, workdir, cattr);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
                goto out_dput;
@@ -510,19 +513,19 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                        .ia_mode = cattr->mode,
                };
                inode_lock(newdentry->d_inode);
-               err = notify_change(&init_user_ns, newdentry, &attr, NULL);
+               err = ovl_do_notify_change(ofs, newdentry, &attr);
                inode_unlock(newdentry->d_inode);
                if (err)
                        goto out_cleanup;
        }
        if (!hardlink) {
-               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_ACCESS,
-                                       acl);
+               err = ovl_set_upper_acl(ofs, newdentry,
+                                       XATTR_NAME_POSIX_ACL_ACCESS, acl);
                if (err)
                        goto out_cleanup;
 
-               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_DEFAULT,
-                                       default_acl);
+               err = ovl_set_upper_acl(ofs, newdentry,
+                                       XATTR_NAME_POSIX_ACL_DEFAULT, default_acl);
                if (err)
                        goto out_cleanup;
        }
@@ -532,20 +535,20 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                if (err)
                        goto out_cleanup;
 
-               err = ovl_do_rename(wdir, newdentry, udir, upper,
+               err = ovl_do_rename(ofs, wdir, newdentry, udir, upper,
                                    RENAME_EXCHANGE);
                if (err)
                        goto out_cleanup;
 
-               ovl_cleanup(wdir, upper);
+               ovl_cleanup(ofs, wdir, upper);
        } else {
-               err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
+               err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, 0);
                if (err)
                        goto out_cleanup;
        }
        err = ovl_instantiate(dentry, inode, newdentry, hardlink);
        if (err) {
-               ovl_cleanup(udir, newdentry);
+               ovl_cleanup(ofs, udir, newdentry);
                dput(newdentry);
        }
 out_dput:
@@ -560,7 +563,7 @@ out:
        return err;
 
 out_cleanup:
-       ovl_cleanup(wdir, newdentry);
+       ovl_cleanup(ofs, wdir, newdentry);
        dput(newdentry);
        goto out_dput;
 }
@@ -767,8 +770,8 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
        if (err)
                goto out_dput;
 
-       upper = lookup_one_len(dentry->d_name.name, upperdir,
-                              dentry->d_name.len);
+       upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir,
+                                dentry->d_name.len);
        err = PTR_ERR(upper);
        if (IS_ERR(upper))
                goto out_unlock;
@@ -800,6 +803,7 @@ out:
 static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
                            struct list_head *list)
 {
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
        struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
        struct inode *dir = upperdir->d_inode;
        struct dentry *upper;
@@ -814,8 +818,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
        }
 
        inode_lock_nested(dir, I_MUTEX_PARENT);
-       upper = lookup_one_len(dentry->d_name.name, upperdir,
-                              dentry->d_name.len);
+       upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir,
+                                dentry->d_name.len);
        err = PTR_ERR(upper);
        if (IS_ERR(upper))
                goto out_unlock;
@@ -826,9 +830,9 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
                goto out_dput_upper;
 
        if (is_dir)
-               err = vfs_rmdir(&init_user_ns, dir, upper);
+               err = ovl_do_rmdir(ofs, dir, upper);
        else
-               err = vfs_unlink(&init_user_ns, dir, upper, NULL);
+               err = ovl_do_unlink(ofs, dir, upper);
        ovl_dir_modified(dentry->d_parent, ovl_type_origin(dentry));
 
        /*
@@ -880,7 +884,6 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
 {
        int err;
        const struct cred *old_cred;
-       struct dentry *upperdentry;
        bool lower_positive = ovl_lower_positive(dentry);
        LIST_HEAD(list);
 
@@ -923,9 +926,8 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
         * Note: we fail to update ctime if there was no copy-up, only a
         * whiteout
         */
-       upperdentry = ovl_dentry_upper(dentry);
-       if (upperdentry)
-               ovl_copyattr(d_inode(upperdentry), d_inode(dentry));
+       if (ovl_dentry_upper(dentry))
+               ovl_copyattr(d_inode(dentry));
 
 out_drop_write:
        ovl_drop_write(dentry);
@@ -1095,6 +1097,7 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
        bool samedir = olddir == newdir;
        struct dentry *opaquedir = NULL;
        const struct cred *old_cred = NULL;
+       struct ovl_fs *ofs = OVL_FS(old->d_sb);
        LIST_HEAD(list);
 
        err = -EINVAL;
@@ -1189,8 +1192,8 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
 
        trap = lock_rename(new_upperdir, old_upperdir);
 
-       olddentry = lookup_one_len(old->d_name.name, old_upperdir,
-                                  old->d_name.len);
+       olddentry = ovl_lookup_upper(ofs, old->d_name.name, old_upperdir,
+                                    old->d_name.len);
        err = PTR_ERR(olddentry);
        if (IS_ERR(olddentry))
                goto out_unlock;
@@ -1199,8 +1202,8 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
        if (!ovl_matches_upper(old, olddentry))
                goto out_dput_old;
 
-       newdentry = lookup_one_len(new->d_name.name, new_upperdir,
-                                  new->d_name.len);
+       newdentry = ovl_lookup_upper(ofs, new->d_name.name, new_upperdir,
+                                    new->d_name.len);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
                goto out_dput_old;
@@ -1251,13 +1254,13 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
        if (err)
                goto out_dput;
 
-       err = ovl_do_rename(old_upperdir->d_inode, olddentry,
+       err = ovl_do_rename(ofs, old_upperdir->d_inode, olddentry,
                            new_upperdir->d_inode, newdentry, flags);
        if (err)
                goto out_dput;
 
        if (cleanup_whiteout)
-               ovl_cleanup(old_upperdir->d_inode, newdentry);
+               ovl_cleanup(ofs, old_upperdir->d_inode, newdentry);
 
        if (overwrite && d_inode(new)) {
                if (new_is_dir)
@@ -1272,9 +1275,9 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
                         (d_inode(new) && ovl_type_origin(new)));
 
        /* copy ctime: */
-       ovl_copyattr(d_inode(olddentry), d_inode(old));
+       ovl_copyattr(d_inode(old));
        if (d_inode(new) && ovl_dentry_upper(new))
-               ovl_copyattr(d_inode(newdentry), d_inode(new));
+               ovl_copyattr(d_inode(new));
 
 out_dput:
        dput(newdentry);
index ebde05c..2eada97 100644 (file)
@@ -391,6 +391,11 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
         * pointer because we hold no lock on the real dentry.
         */
        take_dentry_name_snapshot(&name, real);
+       /*
+        * No mnt_userns handling here: it's an internal lookup.  Could skip
+        * permission checking altogether, but for now just use non-mnt_userns
+        * transformed ids.
+        */
        this = lookup_one_len(name.name.name, connected, name.name.len);
        release_dentry_name_snapshot(&name);
        err = PTR_ERR(this);
index 9d69b4d..daff601 100644 (file)
@@ -38,9 +38,11 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode)
 #define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY)
 
 static struct file *ovl_open_realfile(const struct file *file,
-                                     struct inode *realinode)
+                                     struct path *realpath)
 {
+       struct inode *realinode = d_inode(realpath->dentry);
        struct inode *inode = file_inode(file);
+       struct user_namespace *real_mnt_userns;
        struct file *realfile;
        const struct cred *old_cred;
        int flags = file->f_flags | OVL_OPEN_FLAGS;
@@ -51,11 +53,12 @@ static struct file *ovl_open_realfile(const struct file *file,
                acc_mode |= MAY_APPEND;
 
        old_cred = ovl_override_creds(inode->i_sb);
-       err = inode_permission(&init_user_ns, realinode, MAY_OPEN | acc_mode);
+       real_mnt_userns = mnt_user_ns(realpath->mnt);
+       err = inode_permission(real_mnt_userns, realinode, MAY_OPEN | acc_mode);
        if (err) {
                realfile = ERR_PTR(err);
        } else {
-               if (!inode_owner_or_capable(&init_user_ns, realinode))
+               if (!inode_owner_or_capable(real_mnt_userns, realinode))
                        flags &= ~O_NOATIME;
 
                realfile = open_with_fake_path(&file->f_path, flags, realinode,
@@ -101,21 +104,21 @@ static int ovl_change_flags(struct file *file, unsigned int flags)
 static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
                               bool allow_meta)
 {
-       struct inode *inode = file_inode(file);
-       struct inode *realinode;
+       struct dentry *dentry = file_dentry(file);
+       struct path realpath;
 
        real->flags = 0;
        real->file = file->private_data;
 
        if (allow_meta)
-               realinode = ovl_inode_real(inode);
+               ovl_path_real(dentry, &realpath);
        else
-               realinode = ovl_inode_realdata(inode);
+               ovl_path_realdata(dentry, &realpath);
 
        /* Has it been copied up since we'd opened it? */
-       if (unlikely(file_inode(real->file) != realinode)) {
+       if (unlikely(file_inode(real->file) != d_inode(realpath.dentry))) {
                real->flags = FDPUT_FPUT;
-               real->file = ovl_open_realfile(file, realinode);
+               real->file = ovl_open_realfile(file, &realpath);
 
                return PTR_ERR_OR_ZERO(real->file);
        }
@@ -141,17 +144,20 @@ static int ovl_real_fdget(const struct file *file, struct fd *real)
 
 static int ovl_open(struct inode *inode, struct file *file)
 {
+       struct dentry *dentry = file_dentry(file);
        struct file *realfile;
+       struct path realpath;
        int err;
 
-       err = ovl_maybe_copy_up(file_dentry(file), file->f_flags);
+       err = ovl_maybe_copy_up(dentry, file->f_flags);
        if (err)
                return err;
 
        /* No longer need these flags, so don't pass them on to underlying fs */
        file->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
 
-       realfile = ovl_open_realfile(file, ovl_inode_realdata(inode));
+       ovl_path_realdata(dentry, &realpath);
+       realfile = ovl_open_realfile(file, &realpath);
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
@@ -270,7 +276,7 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
                __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
                                      SB_FREEZE_WRITE);
                file_end_write(iocb->ki_filp);
-               ovl_copyattr(ovl_inode_real(inode), inode);
+               ovl_copyattr(inode);
        }
 
        orig_iocb->ki_pos = iocb->ki_pos;
@@ -352,7 +358,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
 
        inode_lock(inode);
        /* Update mode */
-       ovl_copyattr(ovl_inode_real(inode), inode);
+       ovl_copyattr(inode);
        ret = file_remove_privs(file);
        if (ret)
                goto out_unlock;
@@ -376,7 +382,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                                     ovl_iocb_to_rwf(ifl));
                file_end_write(real.file);
                /* Update size */
-               ovl_copyattr(ovl_inode_real(inode), inode);
+               ovl_copyattr(inode);
        } else {
                struct ovl_aio_req *aio_req;
 
@@ -426,12 +432,11 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
        struct fd real;
        const struct cred *old_cred;
        struct inode *inode = file_inode(out);
-       struct inode *realinode = ovl_inode_real(inode);
        ssize_t ret;
 
        inode_lock(inode);
        /* Update mode */
-       ovl_copyattr(realinode, inode);
+       ovl_copyattr(inode);
        ret = file_remove_privs(out);
        if (ret)
                goto out_unlock;
@@ -447,7 +452,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
 
        file_end_write(real.file);
        /* Update size */
-       ovl_copyattr(realinode, inode);
+       ovl_copyattr(inode);
        revert_creds(old_cred);
        fdput(real);
 
@@ -521,7 +526,7 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
        revert_creds(old_cred);
 
        /* Update size */
-       ovl_copyattr(ovl_inode_real(inode), inode);
+       ovl_copyattr(inode);
 
        fdput(real);
 
@@ -593,7 +598,7 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
        revert_creds(old_cred);
 
        /* Update size */
-       ovl_copyattr(ovl_inode_real(inode_out), inode_out);
+       ovl_copyattr(inode_out);
 
        fdput(real_in);
        fdput(real_out);
index 1f36158..492edde 100644 (file)
@@ -21,6 +21,7 @@ int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                struct iattr *attr)
 {
        int err;
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
        bool full_copy_up = false;
        struct dentry *upperdentry;
        const struct cred *old_cred;
@@ -77,10 +78,10 @@ int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
 
                inode_lock(upperdentry->d_inode);
                old_cred = ovl_override_creds(dentry->d_sb);
-               err = notify_change(&init_user_ns, upperdentry, attr, NULL);
+               err = ovl_do_notify_change(ofs, upperdentry, attr);
                revert_creds(old_cred);
                if (!err)
-                       ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
+                       ovl_copyattr(dentry->d_inode);
                inode_unlock(upperdentry->d_inode);
 
                if (winode)
@@ -279,12 +280,14 @@ int ovl_permission(struct user_namespace *mnt_userns,
                   struct inode *inode, int mask)
 {
        struct inode *upperinode = ovl_inode_upper(inode);
-       struct inode *realinode = upperinode ?: ovl_inode_lower(inode);
+       struct inode *realinode;
+       struct path realpath;
        const struct cred *old_cred;
        int err;
 
        /* Careful in RCU walk mode */
-       if (!realinode) {
+       ovl_i_path_real(inode, &realpath);
+       if (!realpath.dentry) {
                WARN_ON(!(mask & MAY_NOT_BLOCK));
                return -ECHILD;
        }
@@ -297,6 +300,7 @@ int ovl_permission(struct user_namespace *mnt_userns,
        if (err)
                return err;
 
+       realinode = d_inode(realpath.dentry);
        old_cred = ovl_override_creds(inode->i_sb);
        if (!upperinode &&
            !special_file(realinode->i_mode) && mask & MAY_WRITE) {
@@ -304,7 +308,7 @@ int ovl_permission(struct user_namespace *mnt_userns,
                /* Make sure mounter can read file for copy up later */
                mask |= MAY_READ;
        }
-       err = inode_permission(&init_user_ns, realinode, mask);
+       err = inode_permission(mnt_user_ns(realpath.mnt), realinode, mask);
        revert_creds(old_cred);
 
        return err;
@@ -342,8 +346,10 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
                  const void *value, size_t size, int flags)
 {
        int err;
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
        struct dentry *upperdentry = ovl_i_dentry_upper(inode);
        struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
+       struct path realpath;
        const struct cred *old_cred;
 
        err = ovl_want_write(dentry);
@@ -351,8 +357,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
                goto out;
 
        if (!value && !upperdentry) {
+               ovl_path_lower(dentry, &realpath);
                old_cred = ovl_override_creds(dentry->d_sb);
-               err = vfs_getxattr(&init_user_ns, realdentry, name, NULL, 0);
+               err = vfs_getxattr(mnt_user_ns(realpath.mnt), realdentry, name, NULL, 0);
                revert_creds(old_cred);
                if (err < 0)
                        goto out_drop_write;
@@ -367,17 +374,17 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
        }
 
        old_cred = ovl_override_creds(dentry->d_sb);
-       if (value)
-               err = vfs_setxattr(&init_user_ns, realdentry, name, value, size,
-                                  flags);
-       else {
+       if (value) {
+               err = ovl_do_setxattr(ofs, realdentry, name, value, size,
+                                     flags);
+       else {
                WARN_ON(flags != XATTR_REPLACE);
-               err = vfs_removexattr(&init_user_ns, realdentry, name);
+               err = ovl_do_removexattr(ofs, realdentry, name);
        }
        revert_creds(old_cred);
 
        /* copy c/mtime */
-       ovl_copyattr(d_inode(realdentry), inode);
+       ovl_copyattr(inode);
 
 out_drop_write:
        ovl_drop_write(dentry);
@@ -390,11 +397,11 @@ int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
 {
        ssize_t res;
        const struct cred *old_cred;
-       struct dentry *realdentry =
-               ovl_i_dentry_upper(inode) ?: ovl_dentry_lower(dentry);
+       struct path realpath;
 
+       ovl_i_path_real(inode, &realpath);
        old_cred = ovl_override_creds(dentry->d_sb);
-       res = vfs_getxattr(&init_user_ns, realdentry, name, value, size);
+       res = vfs_getxattr(mnt_user_ns(realpath.mnt), realpath.dentry, name, value, size);
        revert_creds(old_cred);
        return res;
 }
@@ -535,7 +542,7 @@ int ovl_real_fileattr_set(struct path *realpath, struct fileattr *fa)
        if (err)
                return err;
 
-       return vfs_fileattr_set(&init_user_ns, realpath->dentry, fa);
+       return vfs_fileattr_set(mnt_user_ns(realpath->mnt), realpath->dentry, fa);
 }
 
 int ovl_fileattr_set(struct user_namespace *mnt_userns,
@@ -579,7 +586,7 @@ int ovl_fileattr_set(struct user_namespace *mnt_userns,
                inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK);
 
                /* Update ctime */
-               ovl_copyattr(ovl_inode_real(inode), inode);
+               ovl_copyattr(inode);
        }
        ovl_drop_write(dentry);
 out:
@@ -777,16 +784,19 @@ void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
                    unsigned long ino, int fsid)
 {
        struct inode *realinode;
+       struct ovl_inode *oi = OVL_I(inode);
 
        if (oip->upperdentry)
-               OVL_I(inode)->__upperdentry = oip->upperdentry;
-       if (oip->lowerpath && oip->lowerpath->dentry)
-               OVL_I(inode)->lower = igrab(d_inode(oip->lowerpath->dentry));
+               oi->__upperdentry = oip->upperdentry;
+       if (oip->lowerpath && oip->lowerpath->dentry) {
+               oi->lowerpath.dentry = dget(oip->lowerpath->dentry);
+               oi->lowerpath.layer = oip->lowerpath->layer;
+       }
        if (oip->lowerdata)
-               OVL_I(inode)->lowerdata = igrab(d_inode(oip->lowerdata));
+               oi->lowerdata = igrab(d_inode(oip->lowerdata));
 
        realinode = ovl_inode_real(inode);
-       ovl_copyattr(realinode, inode);
+       ovl_copyattr(inode);
        ovl_copyflags(realinode, inode);
        ovl_map_ino(inode, ino, fsid);
 }
@@ -871,8 +881,8 @@ static int ovl_set_nlink_common(struct dentry *dentry,
        if (WARN_ON(len >= sizeof(buf)))
                return -EIO;
 
-       return ovl_do_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry),
-                              OVL_XATTR_NLINK, buf, len);
+       return ovl_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry),
+                           OVL_XATTR_NLINK, buf, len);
 }
 
 int ovl_set_nlink_upper(struct dentry *dentry)
@@ -897,8 +907,8 @@ unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
        if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1)
                return fallback;
 
-       err = ovl_do_getxattr(ofs, upperdentry, OVL_XATTR_NLINK,
-                             &buf, sizeof(buf) - 1);
+       err = ovl_getxattr_upper(ofs, upperdentry, OVL_XATTR_NLINK,
+                                &buf, sizeof(buf) - 1);
        if (err < 0)
                goto fail;
 
@@ -1102,6 +1112,10 @@ struct inode *ovl_get_inode(struct super_block *sb,
        struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
        struct inode *inode;
        struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
+       struct path realpath = {
+               .dentry = upperdentry ?: lowerdentry,
+               .mnt = upperdentry ? ovl_upper_mnt(ofs) : lowerpath->layer->mnt,
+       };
        bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
                                        oip->index);
        int fsid = bylower ? lowerpath->layer->fsid : 0;
@@ -1175,7 +1189,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
        /* Check for non-merge dir that may have whiteouts */
        if (is_dir) {
                if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
-                   ovl_check_origin_xattr(ofs, upperdentry ?: lowerdentry)) {
+                   ovl_path_check_origin_xattr(ofs, &realpath)) {
                        ovl_set_flag(OVL_WHITEOUTS, inode);
                }
        }
index 1a9b515..65c4346 100644 (file)
@@ -16,6 +16,7 @@
 
 struct ovl_lookup_data {
        struct super_block *sb;
+       struct vfsmount *mnt;
        struct qstr name;
        bool is_dir;
        bool opaque;
@@ -25,14 +26,14 @@ struct ovl_lookup_data {
        bool metacopy;
 };
 
-static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
+static int ovl_check_redirect(struct path *path, struct ovl_lookup_data *d,
                              size_t prelen, const char *post)
 {
        int res;
        char *buf;
        struct ovl_fs *ofs = OVL_FS(d->sb);
 
-       buf = ovl_get_redirect_xattr(ofs, dentry, prelen + strlen(post));
+       buf = ovl_get_redirect_xattr(ofs, path, prelen + strlen(post));
        if (IS_ERR_OR_NULL(buf))
                return PTR_ERR(buf);
 
@@ -105,13 +106,13 @@ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len)
        return 0;
 }
 
-static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *dentry,
+static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *upperdentry,
                                 enum ovl_xattr ox)
 {
        int res, err;
        struct ovl_fh *fh = NULL;
 
-       res = ovl_do_getxattr(ofs, dentry, ox, NULL, 0);
+       res = ovl_getxattr_upper(ofs, upperdentry, ox, NULL, 0);
        if (res < 0) {
                if (res == -ENODATA || res == -EOPNOTSUPP)
                        return NULL;
@@ -125,7 +126,7 @@ static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *dentry,
        if (!fh)
                return ERR_PTR(-ENOMEM);
 
-       res = ovl_do_getxattr(ofs, dentry, ox, fh->buf, res);
+       res = ovl_getxattr_upper(ofs, upperdentry, ox, fh->buf, res);
        if (res < 0)
                goto fail;
 
@@ -193,16 +194,17 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
        return real;
 }
 
-static bool ovl_is_opaquedir(struct super_block *sb, struct dentry *dentry)
+static bool ovl_is_opaquedir(struct ovl_fs *ofs, struct path *path)
 {
-       return ovl_check_dir_xattr(sb, dentry, OVL_XATTR_OPAQUE);
+       return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE);
 }
 
-static struct dentry *ovl_lookup_positive_unlocked(const char *name,
+static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d,
+                                                  const char *name,
                                                   struct dentry *base, int len,
                                                   bool drop_negative)
 {
-       struct dentry *ret = lookup_one_len_unlocked(name, base, len);
+       struct dentry *ret = lookup_one_unlocked(mnt_user_ns(d->mnt), name, base, len);
 
        if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
                if (drop_negative && ret->d_lockref.count == 1) {
@@ -224,10 +226,11 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                             struct dentry **ret, bool drop_negative)
 {
        struct dentry *this;
+       struct path path;
        int err;
        bool last_element = !post[0];
 
-       this = ovl_lookup_positive_unlocked(name, base, namelen, drop_negative);
+       this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative);
        if (IS_ERR(this)) {
                err = PTR_ERR(this);
                this = NULL;
@@ -253,12 +256,15 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                d->stop = true;
                goto put_and_out;
        }
+
+       path.dentry = this;
+       path.mnt = d->mnt;
        if (!d_can_lookup(this)) {
                if (d->is_dir || !last_element) {
                        d->stop = true;
                        goto put_and_out;
                }
-               err = ovl_check_metacopy_xattr(OVL_FS(d->sb), this);
+               err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path);
                if (err < 0)
                        goto out_err;
 
@@ -278,14 +284,14 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                if (d->last)
                        goto out;
 
-               if (ovl_is_opaquedir(d->sb, this)) {
+               if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) {
                        d->stop = true;
                        if (last_element)
                                d->opaque = true;
                        goto out;
                }
        }
-       err = ovl_check_redirect(this, d, prelen, post);
+       err = ovl_check_redirect(&path, d, prelen, post);
        if (err)
                goto out_err;
 out:
@@ -464,7 +470,7 @@ int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
 
        err = ovl_verify_fh(ofs, dentry, ox, fh);
        if (set && err == -ENODATA)
-               err = ovl_do_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
+               err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
        if (err)
                goto fail;
 
@@ -704,7 +710,8 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
        if (err)
                return ERR_PTR(err);
 
-       index = lookup_positive_unlocked(name.name, ofs->indexdir, name.len);
+       index = lookup_one_positive_unlocked(ovl_upper_mnt_userns(ofs), name.name,
+                                            ofs->indexdir, name.len);
        if (IS_ERR(index)) {
                err = PTR_ERR(index);
                if (err == -ENOENT) {
@@ -856,6 +863,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        old_cred = ovl_override_creds(dentry->d_sb);
        upperdir = ovl_dentry_upper(dentry->d_parent);
        if (upperdir) {
+               d.mnt = ovl_upper_mnt(ofs);
                err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
                if (err)
                        goto out;
@@ -911,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                else
                        d.last = lower.layer->idx == roe->numlower;
 
+               d.mnt = lower.layer->mnt;
                err = ovl_lookup_layer(lower.dentry, &d, &this, false);
                if (err)
                        goto out_put;
@@ -1071,14 +1080,18 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        if (upperdentry)
                ovl_dentry_set_upper_alias(dentry);
        else if (index) {
-               upperdentry = dget(index);
-               upperredirect = ovl_get_redirect_xattr(ofs, upperdentry, 0);
+               struct path upperpath = {
+                       .dentry = upperdentry = dget(index),
+                       .mnt = ovl_upper_mnt(ofs),
+               };
+
+               upperredirect = ovl_get_redirect_xattr(ofs, &upperpath, 0);
                if (IS_ERR(upperredirect)) {
                        err = PTR_ERR(upperredirect);
                        upperredirect = NULL;
                        goto out_free_oe;
                }
-               err = ovl_check_metacopy_xattr(ofs, upperdentry);
+               err = ovl_check_metacopy_xattr(ofs, &upperpath);
                if (err < 0)
                        goto out_free_oe;
                uppermetacopy = err;
@@ -1163,8 +1176,8 @@ bool ovl_lower_positive(struct dentry *dentry)
                struct dentry *this;
                struct dentry *lowerdir = poe->lowerstack[i].dentry;
 
-               this = lookup_positive_unlocked(name->name, lowerdir,
-                                              name->len);
+               this = lookup_one_positive_unlocked(mnt_user_ns(poe->lowerstack[i].layer->mnt),
+                                                  name->name, lowerdir, name->len);
                if (IS_ERR(this)) {
                        switch (PTR_ERR(this)) {
                        case -ENOENT:
index 2cd5741..4f34b7e 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/kernel.h>
 #include <linux/uuid.h>
 #include <linux/fs.h>
+#include <linux/namei.h>
 #include "ovl_entry.h"
 
 #undef pr_fmt
@@ -122,109 +123,180 @@ static inline const char *ovl_xattr(struct ovl_fs *ofs, enum ovl_xattr ox)
        return ovl_xattr_table[ox][ofs->config.userxattr];
 }
 
-static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
+/*
+ * When changing ownership of an upper object map the intended ownership
+ * according to the upper layer's idmapping. When an upper mount idmaps files
+ * that are stored on-disk as owned by id 1001 to id 1000 this means stat on
+ * this object will report it as being owned by id 1000 when calling stat via
+ * the upper mount.
+ * In order to change ownership of an object so stat reports id 1000 when
+ * called on an idmapped upper mount the value written to disk - i.e., the
+ * value stored in ia_*id - must 1001. The mount mapping helper will thus take
+ * care to map 1000 to 1001.
+ * The mnt idmapping helpers are nops if the upper layer isn't idmapped.
+ */
+static inline int ovl_do_notify_change(struct ovl_fs *ofs,
+                                      struct dentry *upperdentry,
+                                      struct iattr *attr)
+{
+       struct user_namespace *upper_mnt_userns = ovl_upper_mnt_userns(ofs);
+       struct user_namespace *fs_userns = i_user_ns(d_inode(upperdentry));
+
+       if (attr->ia_valid & ATTR_UID)
+               attr->ia_uid = mapped_kuid_user(upper_mnt_userns,
+                                               fs_userns, attr->ia_uid);
+       if (attr->ia_valid & ATTR_GID)
+               attr->ia_gid = mapped_kgid_user(upper_mnt_userns,
+                                               fs_userns, attr->ia_gid);
+
+       return notify_change(upper_mnt_userns, upperdentry, attr, NULL);
+}
+
+static inline int ovl_do_rmdir(struct ovl_fs *ofs,
+                              struct inode *dir, struct dentry *dentry)
 {
-       int err = vfs_rmdir(&init_user_ns, dir, dentry);
+       int err = vfs_rmdir(ovl_upper_mnt_userns(ofs), dir, dentry);
 
        pr_debug("rmdir(%pd2) = %i\n", dentry, err);
        return err;
 }
 
-static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry)
+static inline int ovl_do_unlink(struct ovl_fs *ofs, struct inode *dir,
+                               struct dentry *dentry)
 {
-       int err = vfs_unlink(&init_user_ns, dir, dentry, NULL);
+       int err = vfs_unlink(ovl_upper_mnt_userns(ofs), dir, dentry, NULL);
 
        pr_debug("unlink(%pd2) = %i\n", dentry, err);
        return err;
 }
 
-static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir,
-                             struct dentry *new_dentry)
+static inline int ovl_do_link(struct ovl_fs *ofs, struct dentry *old_dentry,
+                             struct inode *dir, struct dentry *new_dentry)
 {
-       int err = vfs_link(old_dentry, &init_user_ns, dir, new_dentry, NULL);
+       int err = vfs_link(old_dentry, ovl_upper_mnt_userns(ofs), dir, new_dentry, NULL);
 
        pr_debug("link(%pd2, %pd2) = %i\n", old_dentry, new_dentry, err);
        return err;
 }
 
-static inline int ovl_do_create(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_create(struct ovl_fs *ofs,
+                               struct inode *dir, struct dentry *dentry,
                                umode_t mode)
 {
-       int err = vfs_create(&init_user_ns, dir, dentry, mode, true);
+       int err = vfs_create(ovl_upper_mnt_userns(ofs), dir, dentry, mode, true);
 
        pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err);
        return err;
 }
 
-static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_mkdir(struct ovl_fs *ofs,
+                              struct inode *dir, struct dentry *dentry,
                               umode_t mode)
 {
-       int err = vfs_mkdir(&init_user_ns, dir, dentry, mode);
+       int err = vfs_mkdir(ovl_upper_mnt_userns(ofs), dir, dentry, mode);
        pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err);
        return err;
 }
 
-static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_mknod(struct ovl_fs *ofs,
+                              struct inode *dir, struct dentry *dentry,
                               umode_t mode, dev_t dev)
 {
-       int err = vfs_mknod(&init_user_ns, dir, dentry, mode, dev);
+       int err = vfs_mknod(ovl_upper_mnt_userns(ofs), dir, dentry, mode, dev);
 
        pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n", dentry, mode, dev, err);
        return err;
 }
 
-static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry,
+static inline int ovl_do_symlink(struct ovl_fs *ofs,
+                                struct inode *dir, struct dentry *dentry,
                                 const char *oldname)
 {
-       int err = vfs_symlink(&init_user_ns, dir, dentry, oldname);
+       int err = vfs_symlink(ovl_upper_mnt_userns(ofs), dir, dentry, oldname);
 
        pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err);
        return err;
 }
 
-static inline ssize_t ovl_do_getxattr(struct ovl_fs *ofs, struct dentry *dentry,
-                                     enum ovl_xattr ox, void *value,
-                                     size_t size)
+static inline ssize_t ovl_do_getxattr(struct path *path, const char *name,
+                                     void *value, size_t size)
 {
-       const char *name = ovl_xattr(ofs, ox);
-       int err = vfs_getxattr(&init_user_ns, dentry, name, value, size);
-       int len = (value && err > 0) ? err : 0;
+       int err, len;
+
+       WARN_ON(path->dentry->d_sb != path->mnt->mnt_sb);
+
+       err = vfs_getxattr(mnt_user_ns(path->mnt), path->dentry,
+                              name, value, size);
+       len = (value && err > 0) ? err : 0;
 
        pr_debug("getxattr(%pd2, \"%s\", \"%*pE\", %zu, 0) = %i\n",
-                dentry, name, min(len, 48), value, size, err);
+                path->dentry, name, min(len, 48), value, size, err);
        return err;
 }
 
+static inline ssize_t ovl_getxattr_upper(struct ovl_fs *ofs,
+                                        struct dentry *upperdentry,
+                                        enum ovl_xattr ox, void *value,
+                                        size_t size)
+{
+       struct path upperpath = {
+               .dentry = upperdentry,
+               .mnt = ovl_upper_mnt(ofs),
+       };
+
+       return ovl_do_getxattr(&upperpath, ovl_xattr(ofs, ox), value, size);
+}
+
+static inline ssize_t ovl_path_getxattr(struct ovl_fs *ofs,
+                                        struct path *path,
+                                        enum ovl_xattr ox, void *value,
+                                        size_t size)
+{
+       return ovl_do_getxattr(path, ovl_xattr(ofs, ox), value, size);
+}
+
 static inline int ovl_do_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
-                                 enum ovl_xattr ox, const void *value,
-                                 size_t size)
+                                 const char *name, const void *value,
+                                 size_t size, int flags)
 {
-       const char *name = ovl_xattr(ofs, ox);
-       int err = vfs_setxattr(&init_user_ns, dentry, name, value, size, 0);
-       pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0) = %i\n",
-                dentry, name, min((int)size, 48), value, size, err);
+       int err = vfs_setxattr(ovl_upper_mnt_userns(ofs), dentry, name, value, size, flags);
+
+       pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, %d) = %i\n",
+                dentry, name, min((int)size, 48), value, size, flags, err);
        return err;
 }
 
+static inline int ovl_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
+                              enum ovl_xattr ox, const void *value,
+                              size_t size)
+{
+       return ovl_do_setxattr(ofs, dentry, ovl_xattr(ofs, ox), value, size, 0);
+}
+
 static inline int ovl_do_removexattr(struct ovl_fs *ofs, struct dentry *dentry,
-                                    enum ovl_xattr ox)
+                                    const char *name)
 {
-       const char *name = ovl_xattr(ofs, ox);
-       int err = vfs_removexattr(&init_user_ns, dentry, name);
+       int err = vfs_removexattr(ovl_upper_mnt_userns(ofs), dentry, name);
        pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err);
        return err;
 }
 
-static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
-                               struct inode *newdir, struct dentry *newdentry,
-                               unsigned int flags)
+static inline int ovl_removexattr(struct ovl_fs *ofs, struct dentry *dentry,
+                                 enum ovl_xattr ox)
+{
+       return ovl_do_removexattr(ofs, dentry, ovl_xattr(ofs, ox));
+}
+
+static inline int ovl_do_rename(struct ovl_fs *ofs, struct inode *olddir,
+                               struct dentry *olddentry, struct inode *newdir,
+                               struct dentry *newdentry, unsigned int flags)
 {
        int err;
        struct renamedata rd = {
-               .old_mnt_userns = &init_user_ns,
+               .old_mnt_userns = ovl_upper_mnt_userns(ofs),
                .old_dir        = olddir,
                .old_dentry     = olddentry,
-               .new_mnt_userns = &init_user_ns,
+               .new_mnt_userns = ovl_upper_mnt_userns(ofs),
                .new_dir        = newdir,
                .new_dentry     = newdentry,
                .flags          = flags,
@@ -239,22 +311,31 @@ static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
        return err;
 }
 
-static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
+static inline int ovl_do_whiteout(struct ovl_fs *ofs,
+                                 struct inode *dir, struct dentry *dentry)
 {
-       int err = vfs_whiteout(&init_user_ns, dir, dentry);
+       int err = vfs_whiteout(ovl_upper_mnt_userns(ofs), dir, dentry);
        pr_debug("whiteout(%pd2) = %i\n", dentry, err);
        return err;
 }
 
-static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
+static inline struct dentry *ovl_do_tmpfile(struct ovl_fs *ofs,
+                                           struct dentry *dentry, umode_t mode)
 {
-       struct dentry *ret = vfs_tmpfile(&init_user_ns, dentry, mode, 0);
+       struct dentry *ret = vfs_tmpfile(ovl_upper_mnt_userns(ofs), dentry, mode, 0);
        int err = PTR_ERR_OR_ZERO(ret);
 
        pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err);
        return ret;
 }
 
+static inline struct dentry *ovl_lookup_upper(struct ovl_fs *ofs,
+                                             const char *name,
+                                             struct dentry *base, int len)
+{
+       return lookup_one(ovl_upper_mnt_userns(ofs), name, base, len);
+}
+
 static inline bool ovl_open_flags_need_copy_up(int flags)
 {
        if (!flags)
@@ -293,10 +374,13 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry);
 void ovl_path_upper(struct dentry *dentry, struct path *path);
 void ovl_path_lower(struct dentry *dentry, struct path *path);
 void ovl_path_lowerdata(struct dentry *dentry, struct path *path);
+void ovl_i_path_real(struct inode *inode, struct path *path);
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
+const struct ovl_layer *ovl_i_layer_lower(struct inode *inode);
 const struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct dentry *ovl_i_dentry_upper(struct inode *inode);
@@ -330,9 +414,20 @@ struct file *ovl_path_open(struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry, int flags);
 void ovl_copy_up_end(struct dentry *dentry);
 bool ovl_already_copied_up(struct dentry *dentry, int flags);
-bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry);
-bool ovl_check_dir_xattr(struct super_block *sb, struct dentry *dentry,
-                        enum ovl_xattr ox);
+bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, struct path *path,
+                             enum ovl_xattr ox);
+bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, struct path *path);
+
+static inline bool ovl_check_origin_xattr(struct ovl_fs *ofs,
+                                         struct dentry *upperdentry)
+{
+       struct path upperpath = {
+               .dentry = upperdentry,
+               .mnt = ovl_upper_mnt(ofs),
+       };
+       return ovl_path_check_origin_xattr(ofs, &upperpath);
+}
+
 int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
                       enum ovl_xattr ox, const void *value, size_t size,
                       int xerr);
@@ -344,10 +439,9 @@ bool ovl_need_index(struct dentry *dentry);
 int ovl_nlink_start(struct dentry *dentry);
 void ovl_nlink_end(struct dentry *dentry);
 int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
-int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry);
+int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct path *path);
 bool ovl_is_metacopy_dentry(struct dentry *dentry);
-char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
-                            int padding);
+char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct path *path, int padding);
 int ovl_sync_status(struct ovl_fs *ofs);
 
 static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
@@ -366,9 +460,15 @@ static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
 }
 
 static inline bool ovl_is_impuredir(struct super_block *sb,
-                                   struct dentry *dentry)
+                                   struct dentry *upperdentry)
 {
-       return ovl_check_dir_xattr(sb, dentry, OVL_XATTR_IMPURE);
+       struct ovl_fs *ofs = OVL_FS(sb);
+       struct path upperpath = {
+               .dentry = upperdentry,
+               .mnt = ovl_upper_mnt(ofs),
+       };
+
+       return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
 }
 
 /*
@@ -461,12 +561,13 @@ static inline int ovl_verify_upper(struct ovl_fs *ofs, struct dentry *index,
 extern const struct file_operations ovl_dir_operations;
 struct file *ovl_dir_real_file(const struct file *file, bool want_upper);
 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
-void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
+void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper,
+                          struct list_head *list);
 void ovl_cache_free(struct list_head *list);
 void ovl_dir_cache_free(struct inode *inode);
 int ovl_check_d_type_supported(struct path *realpath);
-int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
-                       struct dentry *dentry, int level);
+int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir,
+                       struct vfsmount *mnt, struct dentry *dentry, int level);
 int ovl_indexdir_cleanup(struct ovl_fs *ofs);
 
 /*
@@ -520,16 +621,7 @@ bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir);
 struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir);
 struct inode *ovl_get_inode(struct super_block *sb,
                            struct ovl_inode_params *oip);
-static inline void ovl_copyattr(struct inode *from, struct inode *to)
-{
-       to->i_uid = from->i_uid;
-       to->i_gid = from->i_gid;
-       to->i_mode = from->i_mode;
-       to->i_atime = from->i_atime;
-       to->i_mtime = from->i_mtime;
-       to->i_ctime = from->i_ctime;
-       i_size_write(to, i_size_read(from));
-}
+void ovl_copyattr(struct inode *to);
 
 /* vfs inode flags copied from real to ovl inode */
 #define OVL_COPY_I_FLAGS_MASK  (S_SYNC | S_NOATIME | S_APPEND | S_IMMUTABLE)
@@ -570,12 +662,15 @@ struct ovl_cattr {
 
 #define OVL_CATTR(m) (&(struct ovl_cattr) { .mode = (m) })
 
-int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode);
-struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
+int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir,
+                  struct dentry **newdentry, umode_t mode);
+struct dentry *ovl_create_real(struct ovl_fs *ofs,
+                              struct inode *dir, struct dentry *newdentry,
+                              struct ovl_cattr *attr);
+int ovl_cleanup(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry);
+struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir);
+struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir,
                               struct ovl_cattr *attr);
-int ovl_cleanup(struct inode *dir, struct dentry *dentry);
-struct dentry *ovl_lookup_temp(struct dentry *workdir);
-struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr);
 
 /* file.c */
 extern const struct file_operations ovl_file_operations;
@@ -591,9 +686,8 @@ int ovl_fileattr_set(struct user_namespace *mnt_userns,
 int ovl_copy_up(struct dentry *dentry);
 int ovl_copy_up_with_data(struct dentry *dentry);
 int ovl_maybe_copy_up(struct dentry *dentry, int flags);
-int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
-                  struct dentry *new);
-int ovl_set_attr(struct dentry *upper, struct kstat *stat);
+int ovl_copy_xattr(struct super_block *sb, struct path *path, struct dentry *new);
+int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
 struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
                                  bool is_upper);
 int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower,
index 63efee5..e1af8f6 100644 (file)
@@ -90,6 +90,11 @@ static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
        return ofs->layers[0].mnt;
 }
 
+static inline struct user_namespace *ovl_upper_mnt_userns(struct ovl_fs *ofs)
+{
+       return mnt_user_ns(ovl_upper_mnt(ofs));
+}
+
 static inline struct ovl_fs *OVL_FS(struct super_block *sb)
 {
        return (struct ovl_fs *)sb->s_fs_info;
@@ -129,7 +134,7 @@ struct ovl_inode {
        unsigned long flags;
        struct inode vfs_inode;
        struct dentry *__upperdentry;
-       struct inode *lower;
+       struct ovl_path lowerpath;
 
        /* synchronize copy up and more */
        struct mutex lock;
index 150fdf3..78f62cc 100644 (file)
@@ -264,11 +264,11 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name,
                return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
 }
 
-static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
+static int ovl_check_whiteouts(struct path *path, struct ovl_readdir_data *rdd)
 {
        int err;
        struct ovl_cache_entry *p;
-       struct dentry *dentry;
+       struct dentry *dentry, *dir = path->dentry;
        const struct cred *old_cred;
 
        old_cred = ovl_override_creds(rdd->dentry->d_sb);
@@ -278,7 +278,7 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
                while (rdd->first_maybe_whiteout) {
                        p = rdd->first_maybe_whiteout;
                        rdd->first_maybe_whiteout = p->next_maybe_whiteout;
-                       dentry = lookup_one_len(p->name, dir, p->len);
+                       dentry = lookup_one(mnt_user_ns(path->mnt), p->name, dir, p->len);
                        if (!IS_ERR(dentry)) {
                                p->is_whiteout = ovl_is_whiteout(dentry);
                                dput(dentry);
@@ -312,7 +312,7 @@ static inline int ovl_dir_read(struct path *realpath,
        } while (!err && rdd->count);
 
        if (!err && rdd->first_maybe_whiteout && rdd->dentry)
-               err = ovl_check_whiteouts(realpath->dentry, rdd);
+               err = ovl_check_whiteouts(realpath, rdd);
 
        fput(realfile);
 
@@ -479,7 +479,7 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
                        goto get;
                }
        }
-       this = lookup_one_len(p->name, dir, p->len);
+       this = lookup_one(mnt_user_ns(path->mnt), p->name, dir, p->len);
        if (IS_ERR_OR_NULL(this) || !this->d_inode) {
                /* Mark a stale entry */
                p->is_whiteout = true;
@@ -623,8 +623,8 @@ static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path)
                 * Removing the "impure" xattr is best effort.
                 */
                if (!ovl_want_write(dentry)) {
-                       ovl_do_removexattr(ofs, ovl_dentry_upper(dentry),
-                                          OVL_XATTR_IMPURE);
+                       ovl_removexattr(ofs, ovl_dentry_upper(dentry),
+                                       OVL_XATTR_IMPURE);
                        ovl_drop_write(dentry);
                }
                ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
@@ -1001,7 +1001,8 @@ del_entry:
        return err;
 }
 
-void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
+void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper,
+                          struct list_head *list)
 {
        struct ovl_cache_entry *p;
 
@@ -1012,7 +1013,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
                if (WARN_ON(!p->is_whiteout || !p->is_upper))
                        continue;
 
-               dentry = lookup_one_len(p->name, upper, p->len);
+               dentry = ovl_lookup_upper(ofs, p->name, upper, p->len);
                if (IS_ERR(dentry)) {
                        pr_err("lookup '%s/%.*s' failed (%i)\n",
                               upper->d_name.name, p->len, p->name,
@@ -1020,7 +1021,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
                        continue;
                }
                if (dentry->d_inode)
-                       ovl_cleanup(upper->d_inode, dentry);
+                       ovl_cleanup(ofs, upper->d_inode, dentry);
                dput(dentry);
        }
        inode_unlock(upper->d_inode);
@@ -1064,7 +1065,8 @@ int ovl_check_d_type_supported(struct path *realpath)
 
 #define OVL_INCOMPATDIR_NAME "incompat"
 
-static int ovl_workdir_cleanup_recurse(struct path *path, int level)
+static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, struct path *path,
+                                      int level)
 {
        int err;
        struct inode *dir = path->dentry->d_inode;
@@ -1111,11 +1113,11 @@ static int ovl_workdir_cleanup_recurse(struct path *path, int level)
                        err = -EINVAL;
                        break;
                }
-               dentry = lookup_one_len(p->name, path->dentry, p->len);
+               dentry = ovl_lookup_upper(ofs, p->name, path->dentry, p->len);
                if (IS_ERR(dentry))
                        continue;
                if (dentry->d_inode)
-                       err = ovl_workdir_cleanup(dir, path->mnt, dentry, level);
+                       err = ovl_workdir_cleanup(ofs, dir, path->mnt, dentry, level);
                dput(dentry);
                if (err)
                        break;
@@ -1126,24 +1128,24 @@ out:
        return err;
 }
 
-int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
-                        struct dentry *dentry, int level)
+int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir,
+                       struct vfsmount *mnt, struct dentry *dentry, int level)
 {
        int err;
 
        if (!d_is_dir(dentry) || level > 1) {
-               return ovl_cleanup(dir, dentry);
+               return ovl_cleanup(ofs, dir, dentry);
        }
 
-       err = ovl_do_rmdir(dir, dentry);
+       err = ovl_do_rmdir(ofs, dir, dentry);
        if (err) {
                struct path path = { .mnt = mnt, .dentry = dentry };
 
                inode_unlock(dir);
-               err = ovl_workdir_cleanup_recurse(&path, level + 1);
+               err = ovl_workdir_cleanup_recurse(ofs, &path, level + 1);
                inode_lock_nested(dir, I_MUTEX_PARENT);
                if (!err)
-                       err = ovl_cleanup(dir, dentry);
+                       err = ovl_cleanup(ofs, dir, dentry);
        }
 
        return err;
@@ -1179,7 +1181,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
                        if (p->len == 2 && p->name[1] == '.')
                                continue;
                }
-               index = lookup_one_len(p->name, indexdir, p->len);
+               index = ovl_lookup_upper(ofs, p->name, indexdir, p->len);
                if (IS_ERR(index)) {
                        err = PTR_ERR(index);
                        index = NULL;
@@ -1187,7 +1189,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
                }
                /* Cleanup leftover from index create/cleanup attempt */
                if (index->d_name.name[0] == '#') {
-                       err = ovl_workdir_cleanup(dir, path.mnt, index, 1);
+                       err = ovl_workdir_cleanup(ofs, dir, path.mnt, index, 1);
                        if (err)
                                break;
                        goto next;
@@ -1197,7 +1199,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
                        goto next;
                } else if (err == -ESTALE) {
                        /* Cleanup stale index entries */
-                       err = ovl_cleanup(dir, index);
+                       err = ovl_cleanup(ofs, dir, index);
                } else if (err != -ENOENT) {
                        /*
                         * Abort mount to avoid corrupting the index if
@@ -1213,7 +1215,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
                        err = ovl_cleanup_and_whiteout(ofs, dir, index);
                } else {
                        /* Cleanup orphan index entries */
-                       err = ovl_cleanup(dir, index);
+                       err = ovl_cleanup(ofs, dir, index);
                }
 
                if (err)
index 001cdbb..e0a2e04 100644 (file)
@@ -184,7 +184,8 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
        oi->version = 0;
        oi->flags = 0;
        oi->__upperdentry = NULL;
-       oi->lower = NULL;
+       oi->lowerpath.dentry = NULL;
+       oi->lowerpath.layer = NULL;
        oi->lowerdata = NULL;
        mutex_init(&oi->lock);
 
@@ -205,7 +206,7 @@ static void ovl_destroy_inode(struct inode *inode)
        struct ovl_inode *oi = OVL_I(inode);
 
        dput(oi->__upperdentry);
-       iput(oi->lower);
+       dput(oi->lowerpath.dentry);
        if (S_ISDIR(inode->i_mode))
                ovl_dir_cache_free(inode);
        else
@@ -761,7 +762,7 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
 
        inode_lock_nested(dir, I_MUTEX_PARENT);
 retry:
-       work = lookup_one_len(name, ofs->workbasedir, strlen(name));
+       work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name));
 
        if (!IS_ERR(work)) {
                struct iattr attr = {
@@ -778,7 +779,7 @@ retry:
                                goto out_unlock;
 
                        retried = true;
-                       err = ovl_workdir_cleanup(dir, mnt, work, 0);
+                       err = ovl_workdir_cleanup(ofs, dir, mnt, work, 0);
                        dput(work);
                        if (err == -EINVAL) {
                                work = ERR_PTR(err);
@@ -787,7 +788,7 @@ retry:
                        goto retry;
                }
 
-               err = ovl_mkdir_real(dir, &work, attr.ia_mode);
+               err = ovl_mkdir_real(ofs, dir, &work, attr.ia_mode);
                if (err)
                        goto out_dput;
 
@@ -809,19 +810,19 @@ retry:
                 * allowed as upper are limited to "normal" ones, where checking
                 * for the above two errors is sufficient.
                 */
-               err = vfs_removexattr(&init_user_ns, work,
-                                     XATTR_NAME_POSIX_ACL_DEFAULT);
+               err = ovl_do_removexattr(ofs, work,
+                                        XATTR_NAME_POSIX_ACL_DEFAULT);
                if (err && err != -ENODATA && err != -EOPNOTSUPP)
                        goto out_dput;
 
-               err = vfs_removexattr(&init_user_ns, work,
-                                     XATTR_NAME_POSIX_ACL_ACCESS);
+               err = ovl_do_removexattr(ofs, work,
+                                        XATTR_NAME_POSIX_ACL_ACCESS);
                if (err && err != -ENODATA && err != -EOPNOTSUPP)
                        goto out_dput;
 
                /* Clear any inherited mode bits */
                inode_lock(work->d_inode);
-               err = notify_change(&init_user_ns, work, &attr, NULL);
+               err = ovl_do_notify_change(ofs, work, &attr);
                inode_unlock(work->d_inode);
                if (err)
                        goto out_dput;
@@ -873,10 +874,6 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
                pr_err("filesystem on '%s' not supported\n", name);
                goto out_put;
        }
-       if (is_idmapped_mnt(path->mnt)) {
-               pr_err("idmapped layers are currently not supported\n");
-               goto out_put;
-       }
        if (!d_is_dir(path->dentry)) {
                pr_err("'%s' not a directory\n", name);
                goto out_put;
@@ -1256,8 +1253,9 @@ out:
  * Returns 1 if RENAME_WHITEOUT is supported, 0 if not supported and
  * negative values if error is encountered.
  */
-static int ovl_check_rename_whiteout(struct dentry *workdir)
+static int ovl_check_rename_whiteout(struct ovl_fs *ofs)
 {
+       struct dentry *workdir = ofs->workdir;
        struct inode *dir = d_inode(workdir);
        struct dentry *temp;
        struct dentry *dest;
@@ -1267,12 +1265,12 @@ static int ovl_check_rename_whiteout(struct dentry *workdir)
 
        inode_lock_nested(dir, I_MUTEX_PARENT);
 
-       temp = ovl_create_temp(workdir, OVL_CATTR(S_IFREG | 0));
+       temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0));
        err = PTR_ERR(temp);
        if (IS_ERR(temp))
                goto out_unlock;
 
-       dest = ovl_lookup_temp(workdir);
+       dest = ovl_lookup_temp(ofs, workdir);
        err = PTR_ERR(dest);
        if (IS_ERR(dest)) {
                dput(temp);
@@ -1281,14 +1279,14 @@ static int ovl_check_rename_whiteout(struct dentry *workdir)
 
        /* Name is inline and stable - using snapshot as a copy helper */
        take_dentry_name_snapshot(&name, temp);
-       err = ovl_do_rename(dir, temp, dir, dest, RENAME_WHITEOUT);
+       err = ovl_do_rename(ofs, dir, temp, dir, dest, RENAME_WHITEOUT);
        if (err) {
                if (err == -EINVAL)
                        err = 0;
                goto cleanup_temp;
        }
 
-       whiteout = lookup_one_len(name.name.name, workdir, name.name.len);
+       whiteout = ovl_lookup_upper(ofs, name.name.name, workdir, name.name.len);
        err = PTR_ERR(whiteout);
        if (IS_ERR(whiteout))
                goto cleanup_temp;
@@ -1297,11 +1295,11 @@ static int ovl_check_rename_whiteout(struct dentry *workdir)
 
        /* Best effort cleanup of whiteout and temp file */
        if (err)
-               ovl_cleanup(dir, whiteout);
+               ovl_cleanup(ofs, dir, whiteout);
        dput(whiteout);
 
 cleanup_temp:
-       ovl_cleanup(dir, temp);
+       ovl_cleanup(ofs, dir, temp);
        release_dentry_name_snapshot(&name);
        dput(temp);
        dput(dest);
@@ -1312,16 +1310,17 @@ out_unlock:
        return err;
 }
 
-static struct dentry *ovl_lookup_or_create(struct dentry *parent,
+static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs,
+                                          struct dentry *parent,
                                           const char *name, umode_t mode)
 {
        size_t len = strlen(name);
        struct dentry *child;
 
        inode_lock_nested(parent->d_inode, I_MUTEX_PARENT);
-       child = lookup_one_len(name, parent, len);
+       child = ovl_lookup_upper(ofs, name, parent, len);
        if (!IS_ERR(child) && !child->d_inode)
-               child = ovl_create_real(parent->d_inode, child,
+               child = ovl_create_real(ofs, parent->d_inode, child,
                                        OVL_CATTR(mode));
        inode_unlock(parent->d_inode);
        dput(parent);
@@ -1343,7 +1342,7 @@ static int ovl_create_volatile_dirty(struct ovl_fs *ofs)
        const char *const *name = volatile_path;
 
        for (ctr = ARRAY_SIZE(volatile_path); ctr; ctr--, name++) {
-               d = ovl_lookup_or_create(d, *name, ctr > 1 ? S_IFDIR : S_IFREG);
+               d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG);
                if (IS_ERR(d))
                        return PTR_ERR(d);
        }
@@ -1391,7 +1390,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
                pr_warn("upper fs needs to support d_type.\n");
 
        /* Check if upper/work fs supports O_TMPFILE */
-       temp = ovl_do_tmpfile(ofs->workdir, S_IFREG | 0);
+       temp = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0);
        ofs->tmpfile = !IS_ERR(temp);
        if (ofs->tmpfile)
                dput(temp);
@@ -1400,7 +1399,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
 
 
        /* Check if upper/work fs supports RENAME_WHITEOUT */
-       err = ovl_check_rename_whiteout(ofs->workdir);
+       err = ovl_check_rename_whiteout(ofs);
        if (err < 0)
                goto out;
 
@@ -1411,7 +1410,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
        /*
         * Check if upper/work fs supports (trusted|user).overlay.* xattr
         */
-       err = ovl_do_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1);
+       err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1);
        if (err) {
                ofs->noxattr = true;
                if (ofs->config.index || ofs->config.metacopy) {
@@ -1429,7 +1428,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
                }
                err = 0;
        } else {
-               ovl_do_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
+               ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
        }
 
        /*
index f48284a..87f811c 100644 (file)
@@ -194,6 +194,20 @@ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
        return type;
 }
 
+enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path)
+{
+       enum ovl_path_type type = ovl_path_type(dentry);
+
+       WARN_ON_ONCE(d_is_dir(dentry));
+
+       if (!OVL_TYPE_UPPER(type) || OVL_TYPE_MERGE(type))
+               ovl_path_lowerdata(dentry, path);
+       else
+               ovl_path_upper(dentry, path);
+
+       return type;
+}
+
 struct dentry *ovl_dentry_upper(struct dentry *dentry)
 {
        return ovl_upperdentry_dereference(OVL_I(d_inode(dentry)));
@@ -236,6 +250,17 @@ struct dentry *ovl_i_dentry_upper(struct inode *inode)
        return ovl_upperdentry_dereference(OVL_I(inode));
 }
 
+void ovl_i_path_real(struct inode *inode, struct path *path)
+{
+       path->dentry = ovl_i_dentry_upper(inode);
+       if (!path->dentry) {
+               path->dentry = OVL_I(inode)->lowerpath.dentry;
+               path->mnt = OVL_I(inode)->lowerpath.layer->mnt;
+       } else {
+               path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb));
+       }
+}
+
 struct inode *ovl_inode_upper(struct inode *inode)
 {
        struct dentry *upperdentry = ovl_i_dentry_upper(inode);
@@ -245,7 +270,9 @@ struct inode *ovl_inode_upper(struct inode *inode)
 
 struct inode *ovl_inode_lower(struct inode *inode)
 {
-       return OVL_I(inode)->lower;
+       struct dentry *lowerdentry = OVL_I(inode)->lowerpath.dentry;
+
+       return lowerdentry ? d_inode(lowerdentry) : NULL;
 }
 
 struct inode *ovl_inode_real(struct inode *inode)
@@ -443,7 +470,7 @@ static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
 void ovl_dir_modified(struct dentry *dentry, bool impurity)
 {
        /* Copy mtime/ctime */
-       ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
+       ovl_copyattr(d_inode(dentry));
 
        ovl_dir_version_inc(dentry, impurity);
 }
@@ -466,6 +493,7 @@ bool ovl_is_whiteout(struct dentry *dentry)
 struct file *ovl_path_open(struct path *path, int flags)
 {
        struct inode *inode = d_inode(path->dentry);
+       struct user_namespace *real_mnt_userns = mnt_user_ns(path->mnt);
        int err, acc_mode;
 
        if (flags & ~(O_ACCMODE | O_LARGEFILE))
@@ -482,12 +510,12 @@ struct file *ovl_path_open(struct path *path, int flags)
                BUG();
        }
 
-       err = inode_permission(&init_user_ns, inode, acc_mode | MAY_OPEN);
+       err = inode_permission(real_mnt_userns, inode, acc_mode | MAY_OPEN);
        if (err)
                return ERR_PTR(err);
 
        /* O_NOATIME is an optimization, don't fail if not permitted */
-       if (inode_owner_or_capable(&init_user_ns, inode))
+       if (inode_owner_or_capable(real_mnt_userns, inode))
                flags |= O_NOATIME;
 
        return dentry_open(path, flags, current_cred());
@@ -550,11 +578,11 @@ void ovl_copy_up_end(struct dentry *dentry)
        ovl_inode_unlock(d_inode(dentry));
 }
 
-bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry)
+bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, struct path *path)
 {
        int res;
 
-       res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_ORIGIN, NULL, 0);
+       res = ovl_path_getxattr(ofs, path, OVL_XATTR_ORIGIN, NULL, 0);
 
        /* Zero size value means "copied up but origin unknown" */
        if (res >= 0)
@@ -563,16 +591,16 @@ bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry)
        return false;
 }
 
-bool ovl_check_dir_xattr(struct super_block *sb, struct dentry *dentry,
-                        enum ovl_xattr ox)
+bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, struct path *path,
+                              enum ovl_xattr ox)
 {
        int res;
        char val;
 
-       if (!d_is_dir(dentry))
+       if (!d_is_dir(path->dentry))
                return false;
 
-       res = ovl_do_getxattr(OVL_FS(sb), dentry, ox, &val, 1);
+       res = ovl_path_getxattr(ofs, path, ox, &val, 1);
        if (res == 1 && val == 'y')
                return true;
 
@@ -612,7 +640,7 @@ int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
        if (ofs->noxattr)
                return xerr;
 
-       err = ovl_do_setxattr(ofs, upperdentry, ox, value, size);
+       err = ovl_setxattr(ofs, upperdentry, ox, value, size);
 
        if (err == -EOPNOTSUPP) {
                pr_warn("cannot set %s xattr on upper\n", ovl_xattr(ofs, ox));
@@ -652,8 +680,8 @@ void ovl_check_protattr(struct inode *inode, struct dentry *upper)
        char buf[OVL_PROTATTR_MAX+1];
        int res, n;
 
-       res = ovl_do_getxattr(ofs, upper, OVL_XATTR_PROTATTR, buf,
-                             OVL_PROTATTR_MAX);
+       res = ovl_getxattr_upper(ofs, upper, OVL_XATTR_PROTATTR, buf,
+                                OVL_PROTATTR_MAX);
        if (res < 0)
                return;
 
@@ -708,7 +736,7 @@ int ovl_set_protattr(struct inode *inode, struct dentry *upper,
                err = ovl_check_setxattr(ofs, upper, OVL_XATTR_PROTATTR,
                                         buf, len, -EPERM);
        } else if (inode->i_flags & OVL_PROT_I_FLAGS_MASK) {
-               err = ovl_do_removexattr(ofs, upper, OVL_XATTR_PROTATTR);
+               err = ovl_removexattr(ofs, upper, OVL_XATTR_PROTATTR);
                if (err == -EOPNOTSUPP || err == -ENODATA)
                        err = 0;
        }
@@ -824,7 +852,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
        }
 
        inode_lock_nested(dir, I_MUTEX_PARENT);
-       index = lookup_one_len(name.name, indexdir, name.len);
+       index = ovl_lookup_upper(ofs, name.name, indexdir, name.len);
        err = PTR_ERR(index);
        if (IS_ERR(index)) {
                index = NULL;
@@ -834,7 +862,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
                                               dir, index);
        } else {
                /* Cleanup orphan index entries */
-               err = ovl_cleanup(dir, index);
+               err = ovl_cleanup(ofs, dir, index);
        }
 
        inode_unlock(dir);
@@ -943,15 +971,15 @@ err:
 }
 
 /* err < 0, 0 if no metacopy xattr, 1 if metacopy xattr found */
-int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry)
+int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct path *path)
 {
        int res;
 
        /* Only regular files can have metacopy xattr */
-       if (!S_ISREG(d_inode(dentry)->i_mode))
+       if (!S_ISREG(d_inode(path->dentry)->i_mode))
                return 0;
 
-       res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_METACOPY, NULL, 0);
+       res = ovl_path_getxattr(ofs, path, OVL_XATTR_METACOPY, NULL, 0);
        if (res < 0) {
                if (res == -ENODATA || res == -EOPNOTSUPP)
                        return 0;
@@ -987,13 +1015,12 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
        return (oe->numlower > 1);
 }
 
-char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
-                            int padding)
+char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct path *path, int padding)
 {
        int res;
        char *s, *next, *buf = NULL;
 
-       res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_REDIRECT, NULL, 0);
+       res = ovl_path_getxattr(ofs, path, OVL_XATTR_REDIRECT, NULL, 0);
        if (res == -ENODATA || res == -EOPNOTSUPP)
                return NULL;
        if (res < 0)
@@ -1005,7 +1032,7 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
        if (!buf)
                return ERR_PTR(-ENOMEM);
 
-       res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_REDIRECT, buf, res);
+       res = ovl_path_getxattr(ofs, path, OVL_XATTR_REDIRECT, buf, res);
        if (res < 0)
                goto fail;
        if (res == 0)
@@ -1060,3 +1087,33 @@ int ovl_sync_status(struct ovl_fs *ofs)
 
        return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
 }
+
+/*
+ * ovl_copyattr() - copy inode attributes from layer to ovl inode
+ *
+ * When overlay copies inode information from an upper or lower layer to the
+ * relevant overlay inode it will apply the idmapping of the upper or lower
+ * layer when doing so ensuring that the ovl inode ownership will correctly
+ * reflect the ownership of the idmapped upper or lower layer. For example, an
+ * idmapped upper or lower layer mapping id 1001 to id 1000 will take care to
+ * map any lower or upper inode owned by id 1001 to id 1000. These mapping
+ * helpers are nops when the relevant layer isn't idmapped.
+ */
+void ovl_copyattr(struct inode *inode)
+{
+       struct path realpath;
+       struct inode *realinode;
+       struct user_namespace *real_mnt_userns;
+
+       ovl_i_path_real(inode, &realpath);
+       realinode = d_inode(realpath.dentry);
+       real_mnt_userns = mnt_user_ns(realpath.mnt);
+
+       inode->i_uid = i_uid_into_mnt(real_mnt_userns, realinode);
+       inode->i_gid = i_gid_into_mnt(real_mnt_userns, realinode);
+       inode->i_mode = realinode->i_mode;
+       inode->i_atime = realinode->i_atime;
+       inode->i_mtime = realinode->i_mtime;
+       inode->i_ctime = realinode->i_ctime;
+       i_size_write(inode, i_size_read(realinode));
+}
index e643aec..b1b1cdf 100644 (file)
@@ -682,6 +682,14 @@ SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
        return ksys_pread64(fd, buf, count, pos);
 }
 
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PREAD64)
+COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, buf,
+                      size_t, count, compat_arg_u64_dual(pos))
+{
+       return ksys_pread64(fd, buf, count, compat_arg_u64_glue(pos));
+}
+#endif
+
 ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
                      size_t count, loff_t pos)
 {
@@ -708,6 +716,14 @@ SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
        return ksys_pwrite64(fd, buf, count, pos);
 }
 
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PWRITE64)
+COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, const char __user *, buf,
+                      size_t, count, compat_arg_u64_dual(pos))
+{
+       return ksys_pwrite64(fd, buf, count, compat_arg_u64_glue(pos));
+}
+#endif
+
 static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
                loff_t *ppos, int type, rwf_t flags)
 {
index 5c2c944..9ced886 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -659,7 +659,7 @@ SYSCALL_DEFINE5(statx,
        return ret;
 }
 
-#ifdef CONFIG_COMPAT
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
 {
        struct compat_stat tmp;
index c769001..dc72591 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -373,6 +373,15 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
        return ksys_sync_file_range(fd, offset, nbytes, flags);
 }
 
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_SYNC_FILE_RANGE)
+COMPAT_SYSCALL_DEFINE6(sync_file_range, int, fd, compat_arg_u64_dual(offset),
+                      compat_arg_u64_dual(nbytes), unsigned int, flags)
+{
+       return ksys_sync_file_range(fd, compat_arg_u64_glue(offset),
+                                   compat_arg_u64_glue(nbytes), flags);
+}
+#endif
+
 /* It would be nice if people remember that not all the world's an i386
    when they introduce new system calls */
 SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags,
index c0b84e9..e8b9b75 100644 (file)
@@ -65,7 +65,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write)
  */
 static int run_gc(struct ubifs_info *c)
 {
-       int err, lnum;
+       int lnum;
 
        /* Make some free space by garbage-collecting dirty space */
        down_read(&c->commit_sem);
@@ -76,10 +76,7 @@ static int run_gc(struct ubifs_info *c)
 
        /* GC freed one LEB, return it to lprops */
        dbg_budg("GC freed LEB %d", lnum);
-       err = ubifs_return_leb(c, lnum);
-       if (err)
-               return err;
-       return 0;
+       return ubifs_return_leb(c, lnum);
 }
 
 /**
index e4f193e..e4c4761 100644 (file)
@@ -677,7 +677,7 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
        int err;
 
        err = security_inode_init_security(inode, dentry, qstr,
-                                          &init_xattrs, 0);
+                                          &init_xattrs, NULL);
        if (err) {
                struct ubifs_info *c = dentry->i_sb->s_fs_info;
                ubifs_err(c, "cannot initialize security for inode %lu, error %d",
index 1e4ee04..3e920cf 100644 (file)
@@ -173,7 +173,6 @@ __xfs_free_perag(
        struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
 
        ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
-       ASSERT(atomic_read(&pag->pag_ref) == 0);
        kmem_free(pag);
 }
 
@@ -192,7 +191,7 @@ xfs_free_perag(
                pag = radix_tree_delete(&mp->m_perag_tree, agno);
                spin_unlock(&mp->m_perag_lock);
                ASSERT(pag);
-               ASSERT(atomic_read(&pag->pag_ref) == 0);
+               XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
 
                cancel_delayed_work_sync(&pag->pag_blockgc_work);
                xfs_iunlink_destroy(pag);
index 14ae082..836ab1b 100644 (file)
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_attr_item.h"
-#include "xfs_log.h"
+#include "xfs_xattr.h"
 
-struct kmem_cache              *xfs_attri_cache;
-struct kmem_cache              *xfs_attrd_cache;
+struct kmem_cache              *xfs_attr_intent_cache;
 
 /*
  * xfs_attr.c
@@ -58,11 +57,11 @@ STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args, struct xfs_buf *bp);
  */
 STATIC int xfs_attr_node_get(xfs_da_args_t *args);
 STATIC void xfs_attr_restore_rmt_blk(struct xfs_da_args *args);
-static int xfs_attr_node_try_addname(struct xfs_attr_item *attr);
-STATIC int xfs_attr_node_addname_find_attr(struct xfs_attr_item *attr);
-STATIC int xfs_attr_node_remove_attr(struct xfs_attr_item *attr);
-STATIC int xfs_attr_node_hasname(xfs_da_args_t *args,
-                                struct xfs_da_state **state);
+static int xfs_attr_node_try_addname(struct xfs_attr_intent *attr);
+STATIC int xfs_attr_node_addname_find_attr(struct xfs_attr_intent *attr);
+STATIC int xfs_attr_node_remove_attr(struct xfs_attr_intent *attr);
+STATIC int xfs_attr_node_lookup(struct xfs_da_args *args,
+               struct xfs_da_state *state);
 
 int
 xfs_inode_hasattr(
@@ -377,7 +376,7 @@ xfs_attr_try_sf_addname(
 
 static int
 xfs_attr_sf_addname(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        struct xfs_inode                *dp = args->dp;
@@ -423,7 +422,7 @@ out:
  */
 static enum xfs_delattr_state
 xfs_attr_complete_op(
-       struct xfs_attr_item    *attr,
+       struct xfs_attr_intent  *attr,
        enum xfs_delattr_state  replace_state)
 {
        struct xfs_da_args      *args = attr->xattri_da_args;
@@ -439,7 +438,7 @@ xfs_attr_complete_op(
 
 static int
 xfs_attr_leaf_addname(
-       struct xfs_attr_item    *attr)
+       struct xfs_attr_intent  *attr)
 {
        struct xfs_da_args      *args = attr->xattri_da_args;
        int                     error;
@@ -493,7 +492,7 @@ out:
  */
 static int
 xfs_attr_node_addname(
-       struct xfs_attr_item    *attr)
+       struct xfs_attr_intent  *attr)
 {
        struct xfs_da_args      *args = attr->xattri_da_args;
        int                     error;
@@ -530,7 +529,7 @@ out:
 
 static int
 xfs_attr_rmtval_alloc(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        int                             error = 0;
@@ -594,6 +593,19 @@ xfs_attr_leaf_mark_incomplete(
        return xfs_attr3_leaf_setflag(args);
 }
 
+/* Ensure the da state of an xattr deferred work item is ready to go. */
+static inline void
+xfs_attr_item_init_da_state(
+       struct xfs_attr_intent  *attr)
+{
+       struct xfs_da_args      *args = attr->xattri_da_args;
+
+       if (!attr->xattri_da_state)
+               attr->xattri_da_state = xfs_da_state_alloc(args);
+       else
+               xfs_da_state_reset(attr->xattri_da_state, args);
+}
+
 /*
  * Initial setup for xfs_attr_node_removename.  Make sure the attr is there and
  * the blocks are valid.  Attr keys with remote blocks will be marked
@@ -601,29 +613,33 @@ xfs_attr_leaf_mark_incomplete(
  */
 static
 int xfs_attr_node_removename_setup(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
-       struct xfs_da_state             **state = &attr->xattri_da_state;
+       struct xfs_da_state             *state;
        int                             error;
 
-       error = xfs_attr_node_hasname(args, state);
+       xfs_attr_item_init_da_state(attr);
+       error = xfs_attr_node_lookup(args, attr->xattri_da_state);
        if (error != -EEXIST)
                goto out;
        error = 0;
 
-       ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL);
-       ASSERT((*state)->path.blk[(*state)->path.active - 1].magic ==
+       state = attr->xattri_da_state;
+       ASSERT(state->path.blk[state->path.active - 1].bp != NULL);
+       ASSERT(state->path.blk[state->path.active - 1].magic ==
                XFS_ATTR_LEAF_MAGIC);
 
-       error = xfs_attr_leaf_mark_incomplete(args, *state);
+       error = xfs_attr_leaf_mark_incomplete(args, state);
        if (error)
                goto out;
        if (args->rmtblkno > 0)
                error = xfs_attr_rmtval_invalidate(args);
 out:
-       if (error)
-               xfs_da_state_free(*state);
+       if (error) {
+               xfs_da_state_free(attr->xattri_da_state);
+               attr->xattri_da_state = NULL;
+       }
 
        return error;
 }
@@ -635,7 +651,7 @@ out:
  */
 static int
 xfs_attr_leaf_remove_attr(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        struct xfs_inode                *dp = args->dp;
@@ -700,7 +716,7 @@ xfs_attr_leaf_shrink(
  */
 int
 xfs_attr_set_iter(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        int                             error = 0;
@@ -852,6 +868,7 @@ xfs_attr_lookup(
 {
        struct xfs_inode        *dp = args->dp;
        struct xfs_buf          *bp = NULL;
+       struct xfs_da_state     *state;
        int                     error;
 
        if (!xfs_inode_hasattr(dp))
@@ -869,19 +886,22 @@ xfs_attr_lookup(
                return error;
        }
 
-       return xfs_attr_node_hasname(args, NULL);
+       state = xfs_da_state_alloc(args);
+       error = xfs_attr_node_lookup(args, state);
+       xfs_da_state_free(state);
+       return error;
 }
 
 static int
-xfs_attr_item_init(
+xfs_attr_intent_init(
        struct xfs_da_args      *args,
        unsigned int            op_flags,       /* op flag (set or remove) */
-       struct xfs_attr_item    **attr)         /* new xfs_attr_item */
+       struct xfs_attr_intent  **attr)         /* new xfs_attr_intent */
 {
 
-       struct xfs_attr_item    *new;
+       struct xfs_attr_intent  *new;
 
-       new = kmem_zalloc(sizeof(struct xfs_attr_item), KM_NOFS);
+       new = kmem_cache_zalloc(xfs_attr_intent_cache, GFP_NOFS | __GFP_NOFAIL);
        new->xattri_op_flags = op_flags;
        new->xattri_da_args = args;
 
@@ -894,10 +914,10 @@ static int
 xfs_attr_defer_add(
        struct xfs_da_args      *args)
 {
-       struct xfs_attr_item    *new;
+       struct xfs_attr_intent  *new;
        int                     error = 0;
 
-       error = xfs_attr_item_init(args, XFS_ATTR_OP_FLAGS_SET, &new);
+       error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_SET, &new);
        if (error)
                return error;
 
@@ -913,10 +933,10 @@ static int
 xfs_attr_defer_replace(
        struct xfs_da_args      *args)
 {
-       struct xfs_attr_item    *new;
+       struct xfs_attr_intent  *new;
        int                     error = 0;
 
-       error = xfs_attr_item_init(args, XFS_ATTR_OP_FLAGS_REPLACE, &new);
+       error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_REPLACE, &new);
        if (error)
                return error;
 
@@ -933,10 +953,10 @@ xfs_attr_defer_remove(
        struct xfs_da_args      *args)
 {
 
-       struct xfs_attr_item    *new;
+       struct xfs_attr_intent  *new;
        int                     error;
 
-       error  = xfs_attr_item_init(args, XFS_ATTR_OP_FLAGS_REMOVE, &new);
+       error  = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_REMOVE, &new);
        if (error)
                return error;
 
@@ -962,7 +982,6 @@ xfs_attr_set(
        int                     error, local;
        int                     rmt_blks = 0;
        unsigned int            total;
-       int                     delayed = xfs_has_larp(mp);
 
        if (xfs_is_shutdown(dp->i_mount))
                return -EIO;
@@ -1007,12 +1026,6 @@ xfs_attr_set(
                rmt_blks = xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX);
        }
 
-       if (delayed) {
-               error = xfs_attr_use_log_assist(mp);
-               if (error)
-                       return error;
-       }
-
        /*
         * Root fork attributes can use reserved data blocks for this
         * operation if necessary
@@ -1020,7 +1033,7 @@ xfs_attr_set(
        xfs_init_attr_trans(args, &tres, &total);
        error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans);
        if (error)
-               goto drop_incompat;
+               return error;
 
        if (args->value || xfs_inode_hasattr(dp)) {
                error = xfs_iext_count_may_overflow(dp, XFS_ATTR_FORK,
@@ -1080,9 +1093,6 @@ xfs_attr_set(
        error = xfs_trans_commit(args->trans);
 out_unlock:
        xfs_iunlock(dp, XFS_ILOCK_EXCL);
-drop_incompat:
-       if (delayed)
-               xlog_drop_incompat_feat(mp->m_log);
        return error;
 
 out_trans_cancel:
@@ -1091,40 +1101,6 @@ out_trans_cancel:
        goto out_unlock;
 }
 
-int __init
-xfs_attri_init_cache(void)
-{
-       xfs_attri_cache = kmem_cache_create("xfs_attri",
-                                           sizeof(struct xfs_attri_log_item),
-                                           0, 0, NULL);
-
-       return xfs_attri_cache != NULL ? 0 : -ENOMEM;
-}
-
-void
-xfs_attri_destroy_cache(void)
-{
-       kmem_cache_destroy(xfs_attri_cache);
-       xfs_attri_cache = NULL;
-}
-
-int __init
-xfs_attrd_init_cache(void)
-{
-       xfs_attrd_cache = kmem_cache_create("xfs_attrd",
-                                           sizeof(struct xfs_attrd_log_item),
-                                           0, 0, NULL);
-
-       return xfs_attrd_cache != NULL ? 0 : -ENOMEM;
-}
-
-void
-xfs_attrd_destroy_cache(void)
-{
-       kmem_cache_destroy(xfs_attrd_cache);
-       xfs_attrd_cache = NULL;
-}
-
 /*========================================================================
  * External routines when attribute list is inside the inode
  *========================================================================*/
@@ -1384,32 +1360,20 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
        return error;
 }
 
-/*
- * Return EEXIST if attr is found, or ENOATTR if not
- * statep: If not null is set to point at the found state.  Caller will
- *         be responsible for freeing the state in this case.
- */
+/* Return EEXIST if attr is found, or ENOATTR if not. */
 STATIC int
-xfs_attr_node_hasname(
+xfs_attr_node_lookup(
        struct xfs_da_args      *args,
-       struct xfs_da_state     **statep)
+       struct xfs_da_state     *state)
 {
-       struct xfs_da_state     *state;
        int                     retval, error;
 
-       state = xfs_da_state_alloc(args);
-       if (statep != NULL)
-               *statep = state;
-
        /*
         * Search to see if name exists, and get back a pointer to it.
         */
        error = xfs_da3_node_lookup_int(state, &retval);
        if (error)
-               retval = error;
-
-       if (!statep)
-               xfs_da_state_free(state);
+               return error;
 
        return retval;
 }
@@ -1420,7 +1384,7 @@ xfs_attr_node_hasname(
 
 STATIC int
 xfs_attr_node_addname_find_attr(
-        struct xfs_attr_item   *attr)
+        struct xfs_attr_intent *attr)
 {
        struct xfs_da_args      *args = attr->xattri_da_args;
        int                     error;
@@ -1429,7 +1393,8 @@ xfs_attr_node_addname_find_attr(
         * Search to see if name already exists, and get back a pointer
         * to where it should go.
         */
-       error = xfs_attr_node_hasname(args, &attr->xattri_da_state);
+       xfs_attr_item_init_da_state(attr);
+       error = xfs_attr_node_lookup(args, attr->xattri_da_state);
        switch (error) {
        case -ENOATTR:
                if (args->op_flags & XFS_DA_OP_REPLACE)
@@ -1456,8 +1421,10 @@ xfs_attr_node_addname_find_attr(
 
        return 0;
 error:
-       if (attr->xattri_da_state)
+       if (attr->xattri_da_state) {
                xfs_da_state_free(attr->xattri_da_state);
+               attr->xattri_da_state = NULL;
+       }
        return error;
 }
 
@@ -1470,7 +1437,7 @@ error:
  */
 static int
 xfs_attr_node_try_addname(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        struct xfs_da_state             *state = attr->xattri_da_state;
@@ -1511,6 +1478,7 @@ xfs_attr_node_try_addname(
 
 out:
        xfs_da_state_free(state);
+       attr->xattri_da_state = NULL;
        return error;
 }
 
@@ -1535,10 +1503,10 @@ xfs_attr_node_removename(
 
 static int
 xfs_attr_node_remove_attr(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
-       struct xfs_da_state             *state = NULL;
+       struct xfs_da_state             *state = xfs_da_state_alloc(args);
        int                             retval = 0;
        int                             error = 0;
 
@@ -1548,8 +1516,6 @@ xfs_attr_node_remove_attr(
         * attribute entry after any split ops.
         */
        args->attr_filter |= XFS_ATTR_INCOMPLETE;
-       state = xfs_da_state_alloc(args);
-       state->inleaf = 0;
        error = xfs_da3_node_lookup_int(state, &retval);
        if (error)
                goto out;
@@ -1567,8 +1533,7 @@ xfs_attr_node_remove_attr(
        retval = error = 0;
 
 out:
-       if (state)
-               xfs_da_state_free(state);
+       xfs_da_state_free(state);
        if (error)
                return error;
        return retval;
@@ -1597,7 +1562,8 @@ xfs_attr_node_get(
        /*
         * Search to see if name exists, and get back a pointer to it.
         */
-       error = xfs_attr_node_hasname(args, &state);
+       state = xfs_da_state_alloc(args);
+       error = xfs_attr_node_lookup(args, state);
        if (error != -EEXIST)
                goto out_release;
 
@@ -1616,8 +1582,7 @@ out_release:
                state->path.blk[i].bp = NULL;
        }
 
-       if (state)
-               xfs_da_state_free(state);
+       xfs_da_state_free(state);
        return error;
 }
 
@@ -1637,3 +1602,20 @@ xfs_attr_namecheck(
        /* There shouldn't be any nulls here */
        return !memchr(name, 0, length);
 }
+
+int __init
+xfs_attr_intent_init_cache(void)
+{
+       xfs_attr_intent_cache = kmem_cache_create("xfs_attr_intent",
+                       sizeof(struct xfs_attr_intent),
+                       0, 0, NULL);
+
+       return xfs_attr_intent_cache != NULL ? 0 : -ENOMEM;
+}
+
+void
+xfs_attr_intent_destroy_cache(void)
+{
+       kmem_cache_destroy(xfs_attr_intent_cache);
+       xfs_attr_intent_cache = NULL;
+}
index 1af7abe..e329da3 100644 (file)
@@ -31,7 +31,8 @@ struct xfs_attr_list_context;
 static inline bool xfs_has_larp(struct xfs_mount *mp)
 {
 #ifdef DEBUG
-       return xfs_globals.larp;
+       /* Logged xattrs require a V5 super for log_incompat */
+       return xfs_has_crc(mp) && xfs_globals.larp;
 #else
        return false;
 #endif
@@ -434,7 +435,7 @@ struct xfs_attr_list_context {
  */
 
 /*
- * Enum values for xfs_attr_item.xattri_da_state
+ * Enum values for xfs_attr_intent.xattri_da_state
  *
  * These values are used by delayed attribute operations to keep track  of where
  * they were before they returned -EAGAIN.  A return code of -EAGAIN signals the
@@ -501,44 +502,46 @@ enum xfs_delattr_state {
        { XFS_DAS_NODE_REMOVE_ATTR,     "XFS_DAS_NODE_REMOVE_ATTR" }, \
        { XFS_DAS_DONE,                 "XFS_DAS_DONE" }
 
-/*
- * Defines for xfs_attr_item.xattri_flags
- */
-#define XFS_DAC_LEAF_ADDNAME_INIT      0x01 /* xfs_attr_leaf_addname init*/
+struct xfs_attri_log_nameval;
 
 /*
  * Context used for keeping track of delayed attribute operations
  */
-struct xfs_attr_item {
+struct xfs_attr_intent {
+       /*
+        * used to log this item to an intent containing a list of attrs to
+        * commit later
+        */
+       struct list_head                xattri_list;
+
+       /* Used in xfs_attr_node_removename to roll through removing blocks */
+       struct xfs_da_state             *xattri_da_state;
+
        struct xfs_da_args              *xattri_da_args;
 
        /*
+        * Shared buffer containing the attr name and value so that the logging
+        * code can share large memory buffers between log items.
+        */
+       struct xfs_attri_log_nameval    *xattri_nameval;
+
+       /*
         * Used by xfs_attr_set to hold a leaf buffer across a transaction roll
         */
        struct xfs_buf                  *xattri_leaf_bp;
 
-       /* Used in xfs_attr_rmtval_set_blk to roll through allocating blocks */
-       struct xfs_bmbt_irec            xattri_map;
-       xfs_dablk_t                     xattri_lblkno;
-       int                             xattri_blkcnt;
-
-       /* Used in xfs_attr_node_removename to roll through removing blocks */
-       struct xfs_da_state             *xattri_da_state;
-
        /* Used to keep track of current state of delayed operation */
-       unsigned int                    xattri_flags;
        enum xfs_delattr_state          xattri_dela_state;
 
        /*
-        * Attr operation being performed - XFS_ATTR_OP_FLAGS_*
+        * Attr operation being performed - XFS_ATTRI_OP_FLAGS_*
         */
        unsigned int                    xattri_op_flags;
 
-       /*
-        * used to log this item to an intent containing a list of attrs to
-        * commit later
-        */
-       struct list_head                xattri_list;
+       /* Used in xfs_attr_rmtval_set_blk to roll through allocating blocks */
+       xfs_dablk_t                     xattri_lblkno;
+       int                             xattri_blkcnt;
+       struct xfs_bmbt_irec            xattri_map;
 };
 
 
@@ -557,21 +560,13 @@ bool xfs_attr_is_leaf(struct xfs_inode *ip);
 int xfs_attr_get_ilocked(struct xfs_da_args *args);
 int xfs_attr_get(struct xfs_da_args *args);
 int xfs_attr_set(struct xfs_da_args *args);
-int xfs_attr_set_iter(struct xfs_attr_item *attr);
-int xfs_attr_remove_iter(struct xfs_attr_item *attr);
+int xfs_attr_set_iter(struct xfs_attr_intent *attr);
+int xfs_attr_remove_iter(struct xfs_attr_intent *attr);
 bool xfs_attr_namecheck(const void *name, size_t length);
 int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
 void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
                         unsigned int *total);
 
-extern struct kmem_cache       *xfs_attri_cache;
-extern struct kmem_cache       *xfs_attrd_cache;
-
-int __init xfs_attri_init_cache(void);
-void xfs_attri_destroy_cache(void);
-int __init xfs_attrd_init_cache(void);
-void xfs_attrd_destroy_cache(void);
-
 /*
  * Check to see if the attr should be upgraded from non-existent or shortform to
  * single-leaf-block attribute list.
@@ -634,4 +629,8 @@ xfs_attr_init_replace_state(struct xfs_da_args *args)
        return xfs_attr_init_add_state(args);
 }
 
+extern struct kmem_cache *xfs_attr_intent_cache;
+int __init xfs_attr_intent_init_cache(void);
+void xfs_attr_intent_destroy_cache(void);
+
 #endif /* __XFS_ATTR_H__ */
index 4250159..7298c14 100644 (file)
@@ -568,7 +568,7 @@ xfs_attr_rmtval_stale(
  */
 int
 xfs_attr_rmtval_find_space(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        struct xfs_bmbt_irec            *map = &attr->xattri_map;
@@ -598,7 +598,7 @@ xfs_attr_rmtval_find_space(
  */
 int
 xfs_attr_rmtval_set_blk(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        struct xfs_inode                *dp = args->dp;
@@ -674,7 +674,7 @@ xfs_attr_rmtval_invalidate(
  */
 int
 xfs_attr_rmtval_remove(
-       struct xfs_attr_item            *attr)
+       struct xfs_attr_intent          *attr)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
        int                             error, done;
index 62b398e..d097ec6 100644 (file)
@@ -12,9 +12,9 @@ int xfs_attr_rmtval_get(struct xfs_da_args *args);
 int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
                xfs_buf_flags_t incore_flags);
 int xfs_attr_rmtval_invalidate(struct xfs_da_args *args);
-int xfs_attr_rmtval_remove(struct xfs_attr_item *attr);
+int xfs_attr_rmtval_remove(struct xfs_attr_intent *attr);
 int xfs_attr_rmt_find_hole(struct xfs_da_args *args);
 int xfs_attr_rmtval_set_value(struct xfs_da_args *args);
-int xfs_attr_rmtval_set_blk(struct xfs_attr_item *attr);
-int xfs_attr_rmtval_find_space(struct xfs_attr_item *attr);
+int xfs_attr_rmtval_set_blk(struct xfs_attr_intent *attr);
+int xfs_attr_rmtval_find_space(struct xfs_attr_intent *attr);
 #endif /* __XFS_ATTR_REMOTE_H__ */
index 2aa300f..2eecc49 100644 (file)
@@ -51,16 +51,31 @@ xfs_btree_magic(
        return magic;
 }
 
-static xfs_failaddr_t
+/*
+ * These sibling pointer checks are optimised for null sibling pointers. This
+ * happens a lot, and we don't need to byte swap at runtime if the sibling
+ * pointer is NULL.
+ *
+ * These are explicitly marked at inline because the cost of calling them as
+ * functions instead of inlining them is about 36 bytes extra code per call site
+ * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
+ * two sibling check functions reduces the compiled code size by over 300
+ * bytes.
+ */
+static inline xfs_failaddr_t
 xfs_btree_check_lblock_siblings(
        struct xfs_mount        *mp,
        struct xfs_btree_cur    *cur,
        int                     level,
        xfs_fsblock_t           fsb,
-       xfs_fsblock_t           sibling)
+       __be64                  dsibling)
 {
-       if (sibling == NULLFSBLOCK)
+       xfs_fsblock_t           sibling;
+
+       if (dsibling == cpu_to_be64(NULLFSBLOCK))
                return NULL;
+
+       sibling = be64_to_cpu(dsibling);
        if (sibling == fsb)
                return __this_address;
        if (level >= 0) {
@@ -74,17 +89,21 @@ xfs_btree_check_lblock_siblings(
        return NULL;
 }
 
-static xfs_failaddr_t
+static inline xfs_failaddr_t
 xfs_btree_check_sblock_siblings(
        struct xfs_mount        *mp,
        struct xfs_btree_cur    *cur,
        int                     level,
        xfs_agnumber_t          agno,
        xfs_agblock_t           agbno,
-       xfs_agblock_t           sibling)
+       __be32                  dsibling)
 {
-       if (sibling == NULLAGBLOCK)
+       xfs_agblock_t           sibling;
+
+       if (dsibling == cpu_to_be32(NULLAGBLOCK))
                return NULL;
+
+       sibling = be32_to_cpu(dsibling);
        if (sibling == agbno)
                return __this_address;
        if (level >= 0) {
@@ -136,10 +155,10 @@ __xfs_btree_check_lblock(
                fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
 
        fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
-                       be64_to_cpu(block->bb_u.l.bb_leftsib));
+                       block->bb_u.l.bb_leftsib);
        if (!fa)
                fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
-                               be64_to_cpu(block->bb_u.l.bb_rightsib));
+                               block->bb_u.l.bb_rightsib);
        return fa;
 }
 
@@ -204,10 +223,10 @@ __xfs_btree_check_sblock(
        }
 
        fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno,
-                       be32_to_cpu(block->bb_u.s.bb_leftsib));
+                       block->bb_u.s.bb_leftsib);
        if (!fa)
                fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno,
-                                agbno, be32_to_cpu(block->bb_u.s.bb_rightsib));
+                                agbno, block->bb_u.s.bb_rightsib);
        return fa;
 }
 
@@ -426,8 +445,14 @@ xfs_btree_del_cursor(
                        break;
        }
 
+       /*
+        * If we are doing a BMBT update, the number of unaccounted blocks
+        * allocated during this cursor life time should be zero. If it's not
+        * zero, then we should be shut down or on our way to shutdown due to
+        * cancelling a dirty transaction on error.
+        */
        ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
-              xfs_is_shutdown(cur->bc_mp));
+              xfs_is_shutdown(cur->bc_mp) || error != 0);
        if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
                kmem_free(cur->bc_ops);
        if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
@@ -3247,7 +3272,7 @@ xfs_btree_insrec(
        struct xfs_btree_block  *block; /* btree block */
        struct xfs_buf          *bp;    /* buffer for block */
        union xfs_btree_ptr     nptr;   /* new block ptr */
-       struct xfs_btree_cur    *ncur;  /* new btree cursor */
+       struct xfs_btree_cur    *ncur = NULL;   /* new btree cursor */
        union xfs_btree_key     nkey;   /* new block key */
        union xfs_btree_key     *lkey;
        int                     optr;   /* old key/record index */
@@ -3327,7 +3352,7 @@ xfs_btree_insrec(
 #ifdef DEBUG
        error = xfs_btree_check_block(cur, block, level, bp);
        if (error)
-               return error;
+               goto error0;
 #endif
 
        /*
@@ -3347,7 +3372,7 @@ xfs_btree_insrec(
                for (i = numrecs - ptr; i >= 0; i--) {
                        error = xfs_btree_debug_check_ptr(cur, pp, i, level);
                        if (error)
-                               return error;
+                               goto error0;
                }
 
                xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
@@ -3432,6 +3457,8 @@ xfs_btree_insrec(
        return 0;
 
 error0:
+       if (ncur)
+               xfs_btree_del_cursor(ncur, error);
        return error;
 }
 
@@ -4523,10 +4550,10 @@ xfs_btree_lblock_verify(
        /* sibling pointer verification */
        fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
        fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
-                       be64_to_cpu(block->bb_u.l.bb_leftsib));
+                       block->bb_u.l.bb_leftsib);
        if (!fa)
                fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
-                               be64_to_cpu(block->bb_u.l.bb_rightsib));
+                               block->bb_u.l.bb_rightsib);
        return fa;
 }
 
@@ -4580,10 +4607,10 @@ xfs_btree_sblock_verify(
        agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
        agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
        fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
-                       be32_to_cpu(block->bb_u.s.bb_leftsib));
+                       block->bb_u.s.bb_leftsib);
        if (!fa)
                fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
-                               be32_to_cpu(block->bb_u.s.bb_rightsib));
+                               block->bb_u.s.bb_rightsib);
        return fa;
 }
 
index aa74f3f..e7201dc 100644 (file)
@@ -117,6 +117,17 @@ xfs_da_state_free(xfs_da_state_t *state)
        kmem_cache_free(xfs_da_state_cache, state);
 }
 
+void
+xfs_da_state_reset(
+       struct xfs_da_state     *state,
+       struct xfs_da_args      *args)
+{
+       xfs_da_state_kill_altpath(state);
+       memset(state, 0, sizeof(struct xfs_da_state));
+       state->args = args;
+       state->mp = state->args->dp->i_mount;
+}
+
 static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
 {
        if (whichfork == XFS_DATA_FORK)
index ed2303e..d33b768 100644 (file)
@@ -225,6 +225,7 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
 
 struct xfs_da_state *xfs_da_state_alloc(struct xfs_da_args *args);
 void xfs_da_state_free(xfs_da_state_t *state);
+void xfs_da_state_reset(struct xfs_da_state *state, struct xfs_da_args *args);
 
 void   xfs_da3_node_hdr_from_disk(struct xfs_mount *mp,
                struct xfs_da3_icnode_hdr *to, struct xfs_da_intnode *from);
index ceb222b..5a321b7 100644 (file)
@@ -191,35 +191,56 @@ static const struct xfs_defer_op_type *defer_op_types[] = {
        [XFS_DEFER_OPS_TYPE_ATTR]       = &xfs_attr_defer_type,
 };
 
-static bool
+/*
+ * Ensure there's a log intent item associated with this deferred work item if
+ * the operation must be restarted on crash.  Returns 1 if there's a log item;
+ * 0 if there isn't; or a negative errno.
+ */
+static int
 xfs_defer_create_intent(
        struct xfs_trans                *tp,
        struct xfs_defer_pending        *dfp,
        bool                            sort)
 {
        const struct xfs_defer_op_type  *ops = defer_op_types[dfp->dfp_type];
+       struct xfs_log_item             *lip;
+
+       if (dfp->dfp_intent)
+               return 1;
 
-       if (!dfp->dfp_intent)
-               dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
-                                                    dfp->dfp_count, sort);
-       return dfp->dfp_intent != NULL;
+       lip = ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count, sort);
+       if (!lip)
+               return 0;
+       if (IS_ERR(lip))
+               return PTR_ERR(lip);
+
+       dfp->dfp_intent = lip;
+       return 1;
 }
 
 /*
  * For each pending item in the intake list, log its intent item and the
  * associated extents, then add the entire intake list to the end of
  * the pending list.
+ *
+ * Returns 1 if at least one log item was associated with the deferred work;
+ * 0 if there are no log items; or a negative errno.
  */
-static bool
+static int
 xfs_defer_create_intents(
        struct xfs_trans                *tp)
 {
        struct xfs_defer_pending        *dfp;
-       bool                            ret = false;
+       int                             ret = 0;
 
        list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
+               int                     ret2;
+
                trace_xfs_defer_create_intent(tp->t_mountp, dfp);
-               ret |= xfs_defer_create_intent(tp, dfp, true);
+               ret2 = xfs_defer_create_intent(tp, dfp, true);
+               if (ret2 < 0)
+                       return ret2;
+               ret |= ret2;
        }
        return ret;
 }
@@ -457,6 +478,8 @@ xfs_defer_finish_one(
                dfp->dfp_count--;
                error = ops->finish_item(tp, dfp->dfp_done, li, &state);
                if (error == -EAGAIN) {
+                       int             ret;
+
                        /*
                         * Caller wants a fresh transaction; put the work item
                         * back on the list and log a new log intent item to
@@ -467,7 +490,9 @@ xfs_defer_finish_one(
                        dfp->dfp_count++;
                        dfp->dfp_done = NULL;
                        dfp->dfp_intent = NULL;
-                       xfs_defer_create_intent(tp, dfp, false);
+                       ret = xfs_defer_create_intent(tp, dfp, false);
+                       if (ret < 0)
+                               error = ret;
                }
 
                if (error)
@@ -514,10 +539,14 @@ xfs_defer_finish_noroll(
                 * of time that any one intent item can stick around in memory,
                 * pinning the log tail.
                 */
-               bool has_intents = xfs_defer_create_intents(*tp);
+               int has_intents = xfs_defer_create_intents(*tp);
 
                list_splice_init(&(*tp)->t_dfops, &dop_pending);
 
+               if (has_intents < 0) {
+                       error = has_intents;
+                       goto out_shutdown;
+               }
                if (has_intents || dfp) {
                        error = xfs_defer_trans_roll(tp);
                        if (error)
@@ -676,13 +705,15 @@ xfs_defer_ops_capture(
        if (list_empty(&tp->t_dfops))
                return NULL;
 
+       error = xfs_defer_create_intents(tp);
+       if (error < 0)
+               return ERR_PTR(error);
+
        /* Create an object to capture the defer ops. */
        dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
        INIT_LIST_HEAD(&dfc->dfc_list);
        INIT_LIST_HEAD(&dfc->dfc_dfops);
 
-       xfs_defer_create_intents(tp);
-
        /* Move the dfops chain and transaction state to the capture struct. */
        list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
        dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
@@ -759,6 +790,10 @@ xfs_defer_ops_capture_and_commit(
 
        /* If we don't capture anything, commit transaction and exit. */
        dfc = xfs_defer_ops_capture(tp);
+       if (IS_ERR(dfc)) {
+               xfs_trans_cancel(tp);
+               return PTR_ERR(dfc);
+       }
        if (!dfc)
                return xfs_trans_commit(tp);
 
@@ -873,10 +908,7 @@ xfs_defer_init_item_caches(void)
        error = xfs_extfree_intent_init_cache();
        if (error)
                goto err;
-       error = xfs_attri_init_cache();
-       if (error)
-               goto err;
-       error = xfs_attrd_init_cache();
+       error = xfs_attr_intent_init_cache();
        if (error)
                goto err;
        return 0;
@@ -889,8 +921,7 @@ err:
 void
 xfs_defer_destroy_item_caches(void)
 {
-       xfs_attri_destroy_cache();
-       xfs_attrd_destroy_cache();
+       xfs_attr_intent_destroy_cache();
        xfs_extfree_intent_destroy_cache();
        xfs_bmap_intent_destroy_cache();
        xfs_refcount_intent_destroy_cache();
index f7edd1e..b351b9d 100644 (file)
@@ -906,10 +906,18 @@ struct xfs_icreate_log {
  * Flags for deferred attribute operations.
  * Upper bits are flags, lower byte is type code
  */
-#define XFS_ATTR_OP_FLAGS_SET          1       /* Set the attribute */
-#define XFS_ATTR_OP_FLAGS_REMOVE       2       /* Remove the attribute */
-#define XFS_ATTR_OP_FLAGS_REPLACE      3       /* Replace the attribute */
-#define XFS_ATTR_OP_FLAGS_TYPE_MASK    0xFF    /* Flags type mask */
+#define XFS_ATTRI_OP_FLAGS_SET         1       /* Set the attribute */
+#define XFS_ATTRI_OP_FLAGS_REMOVE      2       /* Remove the attribute */
+#define XFS_ATTRI_OP_FLAGS_REPLACE     3       /* Replace the attribute */
+#define XFS_ATTRI_OP_FLAGS_TYPE_MASK   0xFF    /* Flags type mask */
+
+/*
+ * alfi_attr_filter captures the state of xfs_da_args.attr_filter, so it should
+ * never have any other bits set.
+ */
+#define XFS_ATTRI_FILTER_MASK          (XFS_ATTR_ROOT | \
+                                        XFS_ATTR_SECURE | \
+                                        XFS_ATTR_INCOMPLETE)
 
 /*
  * This is the structure used to lay out an attr log item in the
@@ -924,7 +932,7 @@ struct xfs_attri_log_format {
        uint32_t        alfi_op_flags;  /* marks the op as a set or remove */
        uint32_t        alfi_name_len;  /* attr name length */
        uint32_t        alfi_value_len; /* attr value length */
-       uint32_t        alfi_attr_flags;/* attr flags */
+       uint32_t        alfi_attr_filter;/* attr filter flags */
 };
 
 struct xfs_attrd_log_format {
index 32e2162..2420865 100644 (file)
@@ -110,12 +110,6 @@ struct xlog_recover {
 
 #define ITEM_TYPE(i)   (*(unsigned short *)(i)->ri_buf[0].i_addr)
 
-/*
- * This is the number of entries in the l_buf_cancel_table used during
- * recovery.
- */
-#define        XLOG_BC_TABLE_SIZE      64
-
 #define        XLOG_RECOVER_CRCPASS    0
 #define        XLOG_RECOVER_PASS1      1
 #define        XLOG_RECOVER_PASS2      2
@@ -128,5 +122,13 @@ int xlog_recover_iget(struct xfs_mount *mp, xfs_ino_t ino,
                struct xfs_inode **ipp);
 void xlog_recover_release_intent(struct xlog *log, unsigned short intent_type,
                uint64_t intent_id);
+int xlog_alloc_buf_cancel_table(struct xlog *log);
+void xlog_free_buf_cancel_table(struct xlog *log);
+
+#ifdef DEBUG
+void xlog_check_buf_cancel_table(struct xlog *log);
+#else
+#define xlog_check_buf_cancel_table(log) do { } while (0)
+#endif
 
 #endif /* __XFS_LOG_RECOVER_H__ */
index f0b38f4..8b9bd17 100644 (file)
@@ -213,7 +213,7 @@ xfs_symlink_shortform_verify(
 
        /*
         * Zero length symlinks should never occur in memory as they are
-        * never alllowed to exist on disk.
+        * never allowed to exist on disk.
         */
        if (!size)
                return __this_address;
index b11870d..2e8e400 100644 (file)
@@ -340,20 +340,6 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
        },
 };
 
-/* This isn't a stable feature, warn once per day. */
-static inline void
-xchk_experimental_warning(
-       struct xfs_mount        *mp)
-{
-       static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
-                       "xchk_warning", 86400 * HZ, 1);
-       ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
-
-       if (__ratelimit(&scrub_warning))
-               xfs_alert(mp,
-"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
-}
-
 static int
 xchk_validate_inputs(
        struct xfs_mount                *mp,
@@ -478,7 +464,8 @@ xfs_scrub_metadata(
        if (error)
                goto out;
 
-       xchk_experimental_warning(mp);
+       xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB,
+ "EXPERIMENTAL online scrub feature in use. Use at your own risk!");
 
        sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL);
        if (!sc) {
index 3df9c17..b744c62 100644 (file)
@@ -17,6 +17,7 @@
 #include "xfs_error.h"
 #include "xfs_acl.h"
 #include "xfs_trans.h"
+#include "xfs_xattr.h"
 
 #include <linux/posix_acl_xattr.h>
 
@@ -202,7 +203,7 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
                xfs_acl_to_disk(args.value, acl);
        }
 
-       error = xfs_attr_set(&args);
+       error = xfs_attr_change(&args);
        kmem_free(args.value);
 
        /*
index e8ac88d..4a28c2d 100644 (file)
 #include "xfs_attr.h"
 #include "xfs_attr_item.h"
 #include "xfs_trace.h"
-#include "xfs_inode.h"
 #include "xfs_trans_space.h"
 #include "xfs_errortag.h"
 #include "xfs_error.h"
 #include "xfs_log_priv.h"
 #include "xfs_log_recover.h"
 
+struct kmem_cache              *xfs_attri_cache;
+struct kmem_cache              *xfs_attrd_cache;
+
 static const struct xfs_item_ops xfs_attri_item_ops;
 static const struct xfs_item_ops xfs_attrd_item_ops;
 static struct xfs_attrd_log_item *xfs_trans_get_attrd(struct xfs_trans *tp,
@@ -39,12 +41,80 @@ static inline struct xfs_attri_log_item *ATTRI_ITEM(struct xfs_log_item *lip)
        return container_of(lip, struct xfs_attri_log_item, attri_item);
 }
 
+/*
+ * Shared xattr name/value buffers for logged extended attribute operations
+ *
+ * When logging updates to extended attributes, we can create quite a few
+ * attribute log intent items for a single xattr update.  To avoid cycling the
+ * memory allocator and memcpy overhead, the name (and value, for setxattr)
+ * are kept in a refcounted object that is shared across all related log items
+ * and the upper-level deferred work state structure.  The shared buffer has
+ * a control structure, followed by the name, and then the value.
+ */
+
+static inline struct xfs_attri_log_nameval *
+xfs_attri_log_nameval_get(
+       struct xfs_attri_log_nameval    *nv)
+{
+       if (!refcount_inc_not_zero(&nv->refcount))
+               return NULL;
+       return nv;
+}
+
+static inline void
+xfs_attri_log_nameval_put(
+       struct xfs_attri_log_nameval    *nv)
+{
+       if (!nv)
+               return;
+       if (refcount_dec_and_test(&nv->refcount))
+               kvfree(nv);
+}
+
+static inline struct xfs_attri_log_nameval *
+xfs_attri_log_nameval_alloc(
+       const void                      *name,
+       unsigned int                    name_len,
+       const void                      *value,
+       unsigned int                    value_len)
+{
+       struct xfs_attri_log_nameval    *nv;
+
+       /*
+        * This could be over 64kB in length, so we have to use kvmalloc() for
+        * this. But kvmalloc() utterly sucks, so we use our own version.
+        */
+       nv = xlog_kvmalloc(sizeof(struct xfs_attri_log_nameval) +
+                                       name_len + value_len);
+       if (!nv)
+               return nv;
+
+       nv->name.i_addr = nv + 1;
+       nv->name.i_len = name_len;
+       nv->name.i_type = XLOG_REG_TYPE_ATTR_NAME;
+       memcpy(nv->name.i_addr, name, name_len);
+
+       if (value_len) {
+               nv->value.i_addr = nv->name.i_addr + name_len;
+               nv->value.i_len = value_len;
+               memcpy(nv->value.i_addr, value, value_len);
+       } else {
+               nv->value.i_addr = NULL;
+               nv->value.i_len = 0;
+       }
+       nv->value.i_type = XLOG_REG_TYPE_ATTR_VALUE;
+
+       refcount_set(&nv->refcount, 1);
+       return nv;
+}
+
 STATIC void
 xfs_attri_item_free(
        struct xfs_attri_log_item       *attrip)
 {
        kmem_free(attrip->attri_item.li_lv_shadow);
-       kvfree(attrip);
+       xfs_attri_log_nameval_put(attrip->attri_nameval);
+       kmem_cache_free(xfs_attri_cache, attrip);
 }
 
 /*
@@ -73,16 +143,17 @@ xfs_attri_item_size(
        int                             *nbytes)
 {
        struct xfs_attri_log_item       *attrip = ATTRI_ITEM(lip);
+       struct xfs_attri_log_nameval    *nv = attrip->attri_nameval;
 
        *nvecs += 2;
        *nbytes += sizeof(struct xfs_attri_log_format) +
-                       xlog_calc_iovec_len(attrip->attri_name_len);
+                       xlog_calc_iovec_len(nv->name.i_len);
 
-       if (!attrip->attri_value_len)
+       if (!nv->value.i_len)
                return;
 
        *nvecs += 1;
-       *nbytes += xlog_calc_iovec_len(attrip->attri_value_len);
+       *nbytes += xlog_calc_iovec_len(nv->value.i_len);
 }
 
 /*
@@ -97,6 +168,7 @@ xfs_attri_item_format(
 {
        struct xfs_attri_log_item       *attrip = ATTRI_ITEM(lip);
        struct xfs_log_iovec            *vecp = NULL;
+       struct xfs_attri_log_nameval    *nv = attrip->attri_nameval;
 
        attrip->attri_format.alfi_type = XFS_LI_ATTRI;
        attrip->attri_format.alfi_size = 1;
@@ -108,22 +180,18 @@ xfs_attri_item_format(
         * the log recovery.
         */
 
-       ASSERT(attrip->attri_name_len > 0);
+       ASSERT(nv->name.i_len > 0);
        attrip->attri_format.alfi_size++;
 
-       if (attrip->attri_value_len > 0)
+       if (nv->value.i_len > 0)
                attrip->attri_format.alfi_size++;
 
        xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTRI_FORMAT,
                        &attrip->attri_format,
                        sizeof(struct xfs_attri_log_format));
-       xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_NAME,
-                       attrip->attri_name,
-                       attrip->attri_name_len);
-       if (attrip->attri_value_len > 0)
-               xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ATTR_VALUE,
-                               attrip->attri_value,
-                               attrip->attri_value_len);
+       xlog_copy_from_iovec(lv, &vecp, &nv->name);
+       if (nv->value.i_len > 0)
+               xlog_copy_from_iovec(lv, &vecp, &nv->value);
 }
 
 /*
@@ -158,41 +226,18 @@ xfs_attri_item_release(
 STATIC struct xfs_attri_log_item *
 xfs_attri_init(
        struct xfs_mount                *mp,
-       uint32_t                        name_len,
-       uint32_t                        value_len)
-
+       struct xfs_attri_log_nameval    *nv)
 {
        struct xfs_attri_log_item       *attrip;
-       uint32_t                        buffer_size = name_len + value_len;
 
-       if (buffer_size) {
-               /*
-                * This could be over 64kB in length, so we have to use
-                * kvmalloc() for this. But kvmalloc() utterly sucks, so we
-                * use own version.
-                */
-               attrip = xlog_kvmalloc(sizeof(struct xfs_attri_log_item) +
-                                       buffer_size);
-       } else {
-               attrip = kmem_cache_alloc(xfs_attri_cache,
-                                       GFP_NOFS | __GFP_NOFAIL);
-       }
-       memset(attrip, 0, sizeof(struct xfs_attri_log_item));
+       attrip = kmem_cache_zalloc(xfs_attri_cache, GFP_NOFS | __GFP_NOFAIL);
 
-       attrip->attri_name_len = name_len;
-       if (name_len)
-               attrip->attri_name = ((char *)attrip) +
-                               sizeof(struct xfs_attri_log_item);
-       else
-               attrip->attri_name = NULL;
-
-       attrip->attri_value_len = value_len;
-       if (value_len)
-               attrip->attri_value = ((char *)attrip) +
-                               sizeof(struct xfs_attri_log_item) +
-                               name_len;
-       else
-               attrip->attri_value = NULL;
+       /*
+        * Grab an extra reference to the name/value buffer for this log item.
+        * The caller retains its own reference!
+        */
+       attrip->attri_nameval = xfs_attri_log_nameval_get(nv);
+       ASSERT(attrip->attri_nameval);
 
        xfs_log_item_init(mp, &attrip->attri_item, XFS_LI_ATTRI,
                          &xfs_attri_item_ops);
@@ -233,7 +278,7 @@ STATIC void
 xfs_attrd_item_free(struct xfs_attrd_log_item *attrdp)
 {
        kmem_free(attrdp->attrd_item.li_lv_shadow);
-       kmem_free(attrdp);
+       kmem_cache_free(xfs_attrd_cache, attrdp);
 }
 
 STATIC void
@@ -297,7 +342,7 @@ xfs_attrd_item_intent(
  */
 STATIC int
 xfs_xattri_finish_update(
-       struct xfs_attr_item            *attr,
+       struct xfs_attr_intent          *attr,
        struct xfs_attrd_log_item       *attrdp)
 {
        struct xfs_da_args              *args = attr->xattri_da_args;
@@ -335,7 +380,7 @@ STATIC void
 xfs_attr_log_item(
        struct xfs_trans                *tp,
        struct xfs_attri_log_item       *attrip,
-       struct xfs_attr_item            *attr)
+       const struct xfs_attr_intent    *attr)
 {
        struct xfs_attri_log_format     *attrp;
 
@@ -343,23 +388,18 @@ xfs_attr_log_item(
        set_bit(XFS_LI_DIRTY, &attrip->attri_item.li_flags);
 
        /*
-        * At this point the xfs_attr_item has been constructed, and we've
+        * At this point the xfs_attr_intent has been constructed, and we've
         * created the log intent. Fill in the attri log item and log format
-        * structure with fields from this xfs_attr_item
+        * structure with fields from this xfs_attr_intent
         */
        attrp = &attrip->attri_format;
        attrp->alfi_ino = attr->xattri_da_args->dp->i_ino;
+       ASSERT(!(attr->xattri_op_flags & ~XFS_ATTRI_OP_FLAGS_TYPE_MASK));
        attrp->alfi_op_flags = attr->xattri_op_flags;
-       attrp->alfi_value_len = attr->xattri_da_args->valuelen;
-       attrp->alfi_name_len = attr->xattri_da_args->namelen;
-       attrp->alfi_attr_flags = attr->xattri_da_args->attr_filter;
-
-       memcpy(attrip->attri_name, attr->xattri_da_args->name,
-              attr->xattri_da_args->namelen);
-       memcpy(attrip->attri_value, attr->xattri_da_args->value,
-              attr->xattri_da_args->valuelen);
-       attrip->attri_name_len = attr->xattri_da_args->namelen;
-       attrip->attri_value_len = attr->xattri_da_args->valuelen;
+       attrp->alfi_value_len = attr->xattri_nameval->value.i_len;
+       attrp->alfi_name_len = attr->xattri_nameval->name.i_len;
+       ASSERT(!(attr->xattri_da_args->attr_filter & ~XFS_ATTRI_FILTER_MASK));
+       attrp->alfi_attr_filter = attr->xattri_da_args->attr_filter;
 }
 
 /* Get an ATTRI. */
@@ -372,7 +412,7 @@ xfs_attr_create_intent(
 {
        struct xfs_mount                *mp = tp->t_mountp;
        struct xfs_attri_log_item       *attrip;
-       struct xfs_attr_item            *attr;
+       struct xfs_attr_intent          *attr;
 
        ASSERT(count == 1);
 
@@ -383,19 +423,47 @@ xfs_attr_create_intent(
         * Each attr item only performs one attribute operation at a time, so
         * this is a list of one
         */
-       list_for_each_entry(attr, items, xattri_list) {
-               attrip = xfs_attri_init(mp, attr->xattri_da_args->namelen,
-                                       attr->xattri_da_args->valuelen);
-               if (attrip == NULL)
-                       return NULL;
-
-               xfs_trans_add_item(tp, &attrip->attri_item);
-               xfs_attr_log_item(tp, attrip, attr);
+       attr = list_first_entry_or_null(items, struct xfs_attr_intent,
+                       xattri_list);
+
+       /*
+        * Create a buffer to store the attribute name and value.  This buffer
+        * will be shared between the higher level deferred xattr work state
+        * and the lower level xattr log items.
+        */
+       if (!attr->xattri_nameval) {
+               struct xfs_da_args      *args = attr->xattri_da_args;
+
+               /*
+                * Transfer our reference to the name/value buffer to the
+                * deferred work state structure.
+                */
+               attr->xattri_nameval = xfs_attri_log_nameval_alloc(args->name,
+                               args->namelen, args->value, args->valuelen);
        }
+       if (!attr->xattri_nameval)
+               return ERR_PTR(-ENOMEM);
+
+       attrip = xfs_attri_init(mp, attr->xattri_nameval);
+       xfs_trans_add_item(tp, &attrip->attri_item);
+       xfs_attr_log_item(tp, attrip, attr);
 
        return &attrip->attri_item;
 }
 
+static inline void
+xfs_attr_free_item(
+       struct xfs_attr_intent          *attr)
+{
+       if (attr->xattri_da_state)
+               xfs_da_state_free(attr->xattri_da_state);
+       xfs_attri_log_nameval_put(attr->xattri_nameval);
+       if (attr->xattri_da_args->op_flags & XFS_DA_OP_RECOVERY)
+               kmem_free(attr);
+       else
+               kmem_cache_free(xfs_attr_intent_cache, attr);
+}
+
 /* Process an attr. */
 STATIC int
 xfs_attr_finish_item(
@@ -404,11 +472,11 @@ xfs_attr_finish_item(
        struct list_head                *item,
        struct xfs_btree_cur            **state)
 {
-       struct xfs_attr_item            *attr;
+       struct xfs_attr_intent          *attr;
        struct xfs_attrd_log_item       *done_item = NULL;
        int                             error;
 
-       attr = container_of(item, struct xfs_attr_item, xattri_list);
+       attr = container_of(item, struct xfs_attr_intent, xattri_list);
        if (done)
                done_item = ATTRD_ITEM(done);
 
@@ -420,7 +488,7 @@ xfs_attr_finish_item(
 
        error = xfs_xattri_finish_update(attr, done_item);
        if (error != -EAGAIN)
-               kmem_free(attr);
+               xfs_attr_free_item(attr);
 
        return error;
 }
@@ -438,33 +506,10 @@ STATIC void
 xfs_attr_cancel_item(
        struct list_head                *item)
 {
-       struct xfs_attr_item            *attr;
-
-       attr = container_of(item, struct xfs_attr_item, xattri_list);
-       kmem_free(attr);
-}
-
-STATIC xfs_lsn_t
-xfs_attri_item_committed(
-       struct xfs_log_item             *lip,
-       xfs_lsn_t                       lsn)
-{
-       struct xfs_attri_log_item       *attrip = ATTRI_ITEM(lip);
-
-       /*
-        * The attrip refers to xfs_attr_item memory to log the name and value
-        * with the intent item. This already occurred when the intent was
-        * committed so these fields are no longer accessed. Clear them out of
-        * caution since we're about to free the xfs_attr_item.
-        */
-       attrip->attri_name = NULL;
-       attrip->attri_value = NULL;
+       struct xfs_attr_intent          *attr;
 
-       /*
-        * The ATTRI is logged only once and cannot be moved in the log, so
-        * simply return the lsn at which it's been logged.
-        */
-       return lsn;
+       attr = container_of(item, struct xfs_attr_intent, xattri_list);
+       xfs_attr_free_item(attr);
 }
 
 STATIC bool
@@ -482,16 +527,22 @@ xfs_attri_validate(
        struct xfs_attri_log_format     *attrp)
 {
        unsigned int                    op = attrp->alfi_op_flags &
-                                            XFS_ATTR_OP_FLAGS_TYPE_MASK;
+                                            XFS_ATTRI_OP_FLAGS_TYPE_MASK;
 
        if (attrp->__pad != 0)
                return false;
 
+       if (attrp->alfi_op_flags & ~XFS_ATTRI_OP_FLAGS_TYPE_MASK)
+               return false;
+
+       if (attrp->alfi_attr_filter & ~XFS_ATTRI_FILTER_MASK)
+               return false;
+
        /* alfi_op_flags should be either a set or remove */
        switch (op) {
-       case XFS_ATTR_OP_FLAGS_SET:
-       case XFS_ATTR_OP_FLAGS_REPLACE:
-       case XFS_ATTR_OP_FLAGS_REMOVE:
+       case XFS_ATTRI_OP_FLAGS_SET:
+       case XFS_ATTRI_OP_FLAGS_REPLACE:
+       case XFS_ATTRI_OP_FLAGS_REMOVE:
                break;
        default:
                return false;
@@ -517,13 +568,14 @@ xfs_attri_item_recover(
        struct list_head                *capture_list)
 {
        struct xfs_attri_log_item       *attrip = ATTRI_ITEM(lip);
-       struct xfs_attr_item            *attr;
+       struct xfs_attr_intent          *attr;
        struct xfs_mount                *mp = lip->li_log->l_mp;
        struct xfs_inode                *ip;
        struct xfs_da_args              *args;
        struct xfs_trans                *tp;
        struct xfs_trans_res            tres;
        struct xfs_attri_log_format     *attrp;
+       struct xfs_attri_log_nameval    *nv = attrip->attri_nameval;
        int                             error, ret = 0;
        int                             total;
        int                             local;
@@ -535,41 +587,50 @@ xfs_attri_item_recover(
         */
        attrp = &attrip->attri_format;
        if (!xfs_attri_validate(mp, attrp) ||
-           !xfs_attr_namecheck(attrip->attri_name, attrip->attri_name_len))
+           !xfs_attr_namecheck(nv->name.i_addr, nv->name.i_len))
                return -EFSCORRUPTED;
 
        error = xlog_recover_iget(mp,  attrp->alfi_ino, &ip);
        if (error)
                return error;
 
-       attr = kmem_zalloc(sizeof(struct xfs_attr_item) +
+       attr = kmem_zalloc(sizeof(struct xfs_attr_intent) +
                           sizeof(struct xfs_da_args), KM_NOFS);
        args = (struct xfs_da_args *)(attr + 1);
 
        attr->xattri_da_args = args;
-       attr->xattri_op_flags = attrp->alfi_op_flags;
+       attr->xattri_op_flags = attrp->alfi_op_flags &
+                                               XFS_ATTRI_OP_FLAGS_TYPE_MASK;
+
+       /*
+        * We're reconstructing the deferred work state structure from the
+        * recovered log item.  Grab a reference to the name/value buffer and
+        * attach it to the new work state.
+        */
+       attr->xattri_nameval = xfs_attri_log_nameval_get(nv);
+       ASSERT(attr->xattri_nameval);
 
        args->dp = ip;
        args->geo = mp->m_attr_geo;
        args->whichfork = XFS_ATTR_FORK;
-       args->name = attrip->attri_name;
-       args->namelen = attrp->alfi_name_len;
+       args->name = nv->name.i_addr;
+       args->namelen = nv->name.i_len;
        args->hashval = xfs_da_hashname(args->name, args->namelen);
-       args->attr_filter = attrp->alfi_attr_flags;
+       args->attr_filter = attrp->alfi_attr_filter & XFS_ATTRI_FILTER_MASK;
        args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT;
 
-       switch (attrp->alfi_op_flags & XFS_ATTR_OP_FLAGS_TYPE_MASK) {
-       case XFS_ATTR_OP_FLAGS_SET:
-       case XFS_ATTR_OP_FLAGS_REPLACE:
-               args->value = attrip->attri_value;
-               args->valuelen = attrp->alfi_value_len;
+       switch (attr->xattri_op_flags) {
+       case XFS_ATTRI_OP_FLAGS_SET:
+       case XFS_ATTRI_OP_FLAGS_REPLACE:
+               args->value = nv->value.i_addr;
+               args->valuelen = nv->value.i_len;
                args->total = xfs_attr_calc_size(args, &local);
                if (xfs_inode_hasattr(args->dp))
                        attr->xattri_dela_state = xfs_attr_init_replace_state(args);
                else
                        attr->xattri_dela_state = xfs_attr_init_add_state(args);
                break;
-       case XFS_ATTR_OP_FLAGS_REMOVE:
+       case XFS_ATTRI_OP_FLAGS_REMOVE:
                if (!xfs_inode_hasattr(args->dp))
                        goto out;
                attr->xattri_dela_state = xfs_attr_init_remove_state(args);
@@ -613,7 +674,7 @@ out_unlock:
        xfs_irele(ip);
 out:
        if (ret != -EAGAIN)
-               kmem_free(attr);
+               xfs_attr_free_item(attr);
        return error;
 }
 
@@ -636,22 +697,18 @@ xfs_attri_item_relog(
        attrdp = xfs_trans_get_attrd(tp, old_attrip);
        set_bit(XFS_LI_DIRTY, &attrdp->attrd_item.li_flags);
 
-       new_attrip = xfs_attri_init(tp->t_mountp, old_attrp->alfi_name_len,
-                                   old_attrp->alfi_value_len);
+       /*
+        * Create a new log item that shares the same name/value buffer as the
+        * old log item.
+        */
+       new_attrip = xfs_attri_init(tp->t_mountp, old_attrip->attri_nameval);
        new_attrp = &new_attrip->attri_format;
 
        new_attrp->alfi_ino = old_attrp->alfi_ino;
        new_attrp->alfi_op_flags = old_attrp->alfi_op_flags;
        new_attrp->alfi_value_len = old_attrp->alfi_value_len;
        new_attrp->alfi_name_len = old_attrp->alfi_name_len;
-       new_attrp->alfi_attr_flags = old_attrp->alfi_attr_flags;
-
-       memcpy(new_attrip->attri_name, old_attrip->attri_name,
-               new_attrip->attri_name_len);
-
-       if (new_attrip->attri_value_len > 0)
-               memcpy(new_attrip->attri_value, old_attrip->attri_value,
-                      new_attrip->attri_value_len);
+       new_attrp->alfi_attr_filter = old_attrp->alfi_attr_filter;
 
        xfs_trans_add_item(tp, &new_attrip->attri_item);
        set_bit(XFS_LI_DIRTY, &new_attrip->attri_item.li_flags);
@@ -666,46 +723,46 @@ xlog_recover_attri_commit_pass2(
        struct xlog_recover_item        *item,
        xfs_lsn_t                       lsn)
 {
-       int                             error;
        struct xfs_mount                *mp = log->l_mp;
        struct xfs_attri_log_item       *attrip;
        struct xfs_attri_log_format     *attri_formatp;
-       int                             region = 0;
+       struct xfs_attri_log_nameval    *nv;
+       const void                      *attr_value = NULL;
+       const void                      *attr_name;
+       int                             error;
 
-       attri_formatp = item->ri_buf[region].i_addr;
+       attri_formatp = item->ri_buf[0].i_addr;
+       attr_name = item->ri_buf[1].i_addr;
 
-       /* Validate xfs_attri_log_format */
+       /* Validate xfs_attri_log_format before the large memory allocation */
        if (!xfs_attri_validate(mp, attri_formatp)) {
                XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
                return -EFSCORRUPTED;
        }
 
-       /* memory alloc failure will cause replay to abort */
-       attrip = xfs_attri_init(mp, attri_formatp->alfi_name_len,
-                               attri_formatp->alfi_value_len);
-       if (attrip == NULL)
-               return -ENOMEM;
+       if (!xfs_attr_namecheck(attr_name, attri_formatp->alfi_name_len)) {
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
 
-       error = xfs_attri_copy_format(&item->ri_buf[region],
-                                     &attrip->attri_format);
-       if (error)
-               goto out;
+       if (attri_formatp->alfi_value_len)
+               attr_value = item->ri_buf[2].i_addr;
 
-       region++;
-       memcpy(attrip->attri_name, item->ri_buf[region].i_addr,
-              attrip->attri_name_len);
+       /*
+        * Memory alloc failure will cause replay to abort.  We attach the
+        * name/value buffer to the recovered incore log item and drop our
+        * reference.
+        */
+       nv = xfs_attri_log_nameval_alloc(attr_name,
+                       attri_formatp->alfi_name_len, attr_value,
+                       attri_formatp->alfi_value_len);
+       if (!nv)
+               return -ENOMEM;
 
-       if (!xfs_attr_namecheck(attrip->attri_name, attrip->attri_name_len)) {
-               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
-               error = -EFSCORRUPTED;
+       attrip = xfs_attri_init(mp, nv);
+       error = xfs_attri_copy_format(&item->ri_buf[0], &attrip->attri_format);
+       if (error)
                goto out;
-       }
-
-       if (attrip->attri_value_len > 0) {
-               region++;
-               memcpy(attrip->attri_value, item->ri_buf[region].i_addr,
-                      attrip->attri_value_len);
-       }
 
        /*
         * The ATTRI has two references. One for the ATTRD and one for ATTRI to
@@ -715,9 +772,11 @@ xlog_recover_attri_commit_pass2(
         */
        xfs_trans_ail_insert(log->l_ailp, &attrip->attri_item, lsn);
        xfs_attri_release(attrip);
+       xfs_attri_log_nameval_put(nv);
        return 0;
 out:
        xfs_attri_item_free(attrip);
+       xfs_attri_log_nameval_put(nv);
        return error;
 }
 
@@ -797,7 +856,6 @@ static const struct xfs_item_ops xfs_attri_item_ops = {
        .iop_size       = xfs_attri_item_size,
        .iop_format     = xfs_attri_item_format,
        .iop_unpin      = xfs_attri_item_unpin,
-       .iop_committed  = xfs_attri_item_committed,
        .iop_release    = xfs_attri_item_release,
        .iop_recover    = xfs_attri_item_recover,
        .iop_match      = xfs_attri_item_match,
index c3b779f..3280a79 100644 (file)
 struct xfs_mount;
 struct kmem_zone;
 
+struct xfs_attri_log_nameval {
+       struct xfs_log_iovec    name;
+       struct xfs_log_iovec    value;
+       refcount_t              refcount;
+
+       /* name and value follow the end of this struct */
+};
+
 /*
  * This is the "attr intention" log item.  It is used to log the fact that some
  * extended attribute operations need to be processed.  An operation is
  * currently either a set or remove.  Set or remove operations are described by
- * the xfs_attr_item which may be logged to this intent.
+ * the xfs_attr_intent which may be logged to this intent.
  *
  * During a normal attr operation, name and value point to the name and value
  * fields of the caller's xfs_da_args structure.  During a recovery, the name
  * and value buffers are copied from the log, and stored in a trailing buffer
- * attached to the xfs_attr_item until they are committed.  They are freed when
- * the xfs_attr_item itself is freed when the work is done.
+ * attached to the xfs_attr_intent until they are committed.  They are freed
+ * when the xfs_attr_intent itself is freed when the work is done.
  */
 struct xfs_attri_log_item {
        struct xfs_log_item             attri_item;
        atomic_t                        attri_refcount;
-       int                             attri_name_len;
-       int                             attri_value_len;
-       void                            *attri_name;
-       void                            *attri_value;
+       struct xfs_attri_log_nameval    *attri_nameval;
        struct xfs_attri_log_format     attri_format;
 };
 
@@ -43,4 +48,7 @@ struct xfs_attrd_log_item {
        struct xfs_attrd_log_format     attrd_format;
 };
 
+extern struct kmem_cache       *xfs_attri_cache;
+extern struct kmem_cache       *xfs_attrd_cache;
+
 #endif /* __XFS_ATTR_ITEM_H__ */
index e484251..ffa9410 100644 (file)
 #include "xfs_quota.h"
 
 /*
+ * This is the number of entries in the l_buf_cancel_table used during
+ * recovery.
+ */
+#define        XLOG_BC_TABLE_SIZE      64
+
+#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
+       ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
+
+/*
  * This structure is used during recovery to record the buf log items which
  * have been canceled and should not be replayed.
  */
@@ -993,3 +1002,60 @@ const struct xlog_recover_item_ops xlog_buf_item_ops = {
        .commit_pass1           = xlog_recover_buf_commit_pass1,
        .commit_pass2           = xlog_recover_buf_commit_pass2,
 };
+
+#ifdef DEBUG
+void
+xlog_check_buf_cancel_table(
+       struct xlog     *log)
+{
+       int             i;
+
+       for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+               ASSERT(list_empty(&log->l_buf_cancel_table[i]));
+}
+#endif
+
+int
+xlog_alloc_buf_cancel_table(
+       struct xlog     *log)
+{
+       void            *p;
+       int             i;
+
+       ASSERT(log->l_buf_cancel_table == NULL);
+
+       p = kmalloc_array(XLOG_BC_TABLE_SIZE, sizeof(struct list_head),
+                         GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       log->l_buf_cancel_table = p;
+       for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+               INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+
+       return 0;
+}
+
+void
+xlog_free_buf_cancel_table(
+       struct xlog     *log)
+{
+       int             i;
+
+       if (!log->l_buf_cancel_table)
+               return;
+
+       for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) {
+               struct xfs_buf_cancel   *bc;
+
+               while ((bc = list_first_entry_or_null(
+                               &log->l_buf_cancel_table[i],
+                               struct xfs_buf_cancel, bc_list))) {
+                       list_del(&bc->bc_list);
+                       kmem_free(bc);
+               }
+       }
+
+       kmem_free(log->l_buf_cancel_table);
+       log->l_buf_cancel_table = NULL;
+}
index a60632e..5a171c0 100644 (file)
@@ -576,9 +576,9 @@ xfs_file_dio_write_unaligned(
         * don't even bother trying the fast path in this case.
         */
        if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
-retry_exclusive:
                if (iocb->ki_flags & IOCB_NOWAIT)
                        return -EAGAIN;
+retry_exclusive:
                iolock = XFS_IOLOCK_EXCL;
                flags = IOMAP_DIO_FORCE_WAIT;
        }
index 888839e..d4a77c5 100644 (file)
@@ -149,12 +149,7 @@ xfs_growfs_data_private(
                error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
                                                  delta, &lastag_extended);
        } else {
-               static struct ratelimit_state shrink_warning = \
-                       RATELIMIT_STATE_INIT("shrink_warning", 86400 * HZ, 1);
-               ratelimit_set_flags(&shrink_warning, RATELIMIT_MSG_ON_RELEASE);
-
-               if (__ratelimit(&shrink_warning))
-                       xfs_alert(mp,
+               xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
        "EXPERIMENTAL online shrink feature in use. Use at your own risk!");
 
                error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
index b287987..52d6f2c 100644 (file)
@@ -2622,7 +2622,7 @@ xfs_ifree(
         */
        error = xfs_difree(tp, pag, ip->i_ino, &xic);
        if (error)
-               return error;
+               goto out;
 
        error = xfs_iunlink_remove(tp, pag, ip);
        if (error)
index 0e5cb79..5a364a7 100644 (file)
@@ -37,6 +37,7 @@
 #include "xfs_health.h"
 #include "xfs_reflink.h"
 #include "xfs_ioctl.h"
+#include "xfs_xattr.h"
 
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -524,7 +525,7 @@ xfs_attrmulti_attr_set(
                args.valuelen = len;
        }
 
-       error = xfs_attr_set(&args);
+       error = xfs_attr_change(&args);
        if (!error && (flags & XFS_IOC_ATTR_ROOT))
                xfs_forget_acl(inode, name);
        kfree(args.value);
index e912b7f..29f5b8b 100644 (file)
@@ -24,6 +24,7 @@
 #include "xfs_iomap.h"
 #include "xfs_error.h"
 #include "xfs_ioctl.h"
+#include "xfs_xattr.h"
 
 #include <linux/posix_acl.h>
 #include <linux/security.h>
@@ -61,7 +62,7 @@ xfs_initxattrs(
                        .value          = xattr->value,
                        .valuelen       = xattr->value_len,
                };
-               error = xfs_attr_set(&args);
+               error = xfs_attr_change(&args);
                if (error < 0)
                        break;
        }
index 9dc748a..1e972f8 100644 (file)
@@ -3877,44 +3877,3 @@ xlog_drop_incompat_feat(
 {
        up_read(&log->l_incompat_users);
 }
-
-/*
- * Get permission to use log-assisted atomic exchange of file extents.
- *
- * Callers must not be running any transactions or hold any inode locks, and
- * they must release the permission by calling xlog_drop_incompat_feat
- * when they're done.
- */
-int
-xfs_attr_use_log_assist(
-       struct xfs_mount        *mp)
-{
-       int                     error = 0;
-
-       /*
-        * Protect ourselves from an idle log clearing the logged xattrs log
-        * incompat feature bit.
-        */
-       xlog_use_incompat_feat(mp->m_log);
-
-       /*
-        * If log-assisted xattrs are already enabled, the caller can use the
-        * log assisted swap functions with the log-incompat reference we got.
-        */
-       if (xfs_sb_version_haslogxattrs(&mp->m_sb))
-               return 0;
-
-       /* Enable log-assisted xattrs. */
-       error = xfs_add_incompat_log_feature(mp,
-                       XFS_SB_FEAT_INCOMPAT_LOG_XATTRS);
-       if (error)
-               goto drop_incompat;
-
-       xfs_warn_once(mp,
-"EXPERIMENTAL logged extended attributes feature added. Use at your own risk!");
-
-       return 0;
-drop_incompat:
-       xlog_drop_incompat_feat(mp->m_log);
-       return error;
-}
index 252b098..f3ce046 100644 (file)
@@ -86,6 +86,13 @@ xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
        return buf;
 }
 
+static inline void *
+xlog_copy_from_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
+               const struct xfs_log_iovec *src)
+{
+       return xlog_copy_iovec(lv, vecp, src->i_type, src->i_addr, src->i_len);
+}
+
 /*
  * By comparing each component, we don't have to worry about extra
  * endian issues in treating two 32 bit numbers as one 64 bit number
index 67fd978..686c01e 100644 (file)
@@ -428,9 +428,6 @@ struct xlog {
        struct rw_semaphore     l_incompat_users;
 };
 
-#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
-       ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
-
 /*
  * Bits for operational state
  */
index 97b941c..5f7e4e6 100644 (file)
@@ -39,13 +39,6 @@ STATIC int
 xlog_clear_stale_blocks(
        struct xlog     *,
        xfs_lsn_t);
-#if defined(DEBUG)
-STATIC void
-xlog_recover_check_summary(
-       struct xlog *);
-#else
-#define        xlog_recover_check_summary(log)
-#endif
 STATIC int
 xlog_do_recovery_pass(
         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
@@ -3230,7 +3223,7 @@ xlog_do_log_recovery(
        xfs_daddr_t     head_blk,
        xfs_daddr_t     tail_blk)
 {
-       int             error, i;
+       int             error;
 
        ASSERT(head_blk != tail_blk);
 
@@ -3238,37 +3231,25 @@ xlog_do_log_recovery(
         * First do a pass to find all of the cancelled buf log items.
         * Store them in the buf_cancel_table for use in the second pass.
         */
-       log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
-                                                sizeof(struct list_head),
-                                                0);
-       for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
-               INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+       error = xlog_alloc_buf_cancel_table(log);
+       if (error)
+               return error;
 
        error = xlog_do_recovery_pass(log, head_blk, tail_blk,
                                      XLOG_RECOVER_PASS1, NULL);
-       if (error != 0) {
-               kmem_free(log->l_buf_cancel_table);
-               log->l_buf_cancel_table = NULL;
-               return error;
-       }
+       if (error != 0)
+               goto out_cancel;
+
        /*
         * Then do a second pass to actually recover the items in the log.
         * When it is complete free the table of buf cancel items.
         */
        error = xlog_do_recovery_pass(log, head_blk, tail_blk,
                                      XLOG_RECOVER_PASS2, NULL);
-#ifdef DEBUG
-       if (!error) {
-               int     i;
-
-               for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
-                       ASSERT(list_empty(&log->l_buf_cancel_table[i]));
-       }
-#endif /* DEBUG */
-
-       kmem_free(log->l_buf_cancel_table);
-       log->l_buf_cancel_table = NULL;
-
+       if (!error)
+               xlog_check_buf_cancel_table(log);
+out_cancel:
+       xlog_free_buf_cancel_table(log);
        return error;
 }
 
@@ -3339,8 +3320,6 @@ xlog_do_recover(
        }
        mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
 
-       xlog_recover_check_summary(log);
-
        /* Normal transactions can now occur */
        clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
        return 0;
@@ -3483,7 +3462,6 @@ xlog_recover_finish(
        }
 
        xlog_recover_process_iunlinks(log);
-       xlog_recover_check_summary(log);
 
        /*
         * Recover any CoW staging blocks that are still referenced by the
@@ -3517,52 +3495,3 @@ xlog_recover_cancel(
                xlog_recover_cancel_intents(log);
 }
 
-#if defined(DEBUG)
-/*
- * Read all of the agf and agi counters and check that they
- * are consistent with the superblock counters.
- */
-STATIC void
-xlog_recover_check_summary(
-       struct xlog             *log)
-{
-       struct xfs_mount        *mp = log->l_mp;
-       struct xfs_perag        *pag;
-       struct xfs_buf          *agfbp;
-       struct xfs_buf          *agibp;
-       xfs_agnumber_t          agno;
-       uint64_t                freeblks;
-       uint64_t                itotal;
-       uint64_t                ifree;
-       int                     error;
-
-       freeblks = 0LL;
-       itotal = 0LL;
-       ifree = 0LL;
-       for_each_perag(mp, agno, pag) {
-               error = xfs_read_agf(mp, NULL, pag->pag_agno, 0, &agfbp);
-               if (error) {
-                       xfs_alert(mp, "%s agf read failed agno %d error %d",
-                                               __func__, pag->pag_agno, error);
-               } else {
-                       struct xfs_agf  *agfp = agfbp->b_addr;
-
-                       freeblks += be32_to_cpu(agfp->agf_freeblks) +
-                                   be32_to_cpu(agfp->agf_flcount);
-                       xfs_buf_relse(agfbp);
-               }
-
-               error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
-               if (error) {
-                       xfs_alert(mp, "%s agi read failed agno %d error %d",
-                                               __func__, pag->pag_agno, error);
-               } else {
-                       struct xfs_agi  *agi = agibp->b_addr;
-
-                       itotal += be32_to_cpu(agi->agi_count);
-                       ifree += be32_to_cpu(agi->agi_freecount);
-                       xfs_buf_relse(agibp);
-               }
-       }
-}
-#endif /* DEBUG */
index 55ee464..cc32377 100644 (file)
@@ -75,6 +75,12 @@ do {                                                                 \
 #define xfs_debug_ratelimited(dev, fmt, ...)                           \
        xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
 
+#define xfs_warn_mount(mp, warntag, fmt, ...)                          \
+do {                                                                   \
+       if (xfs_should_warn((mp), (warntag)))                           \
+               xfs_warn((mp), (fmt), ##__VA_ARGS__);                   \
+} while (0)
+
 #define xfs_warn_once(dev, fmt, ...)                           \
        xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
 #define xfs_notice_once(dev, fmt, ...)                         \
index 0c0bcbd..daa8d29 100644 (file)
@@ -1356,7 +1356,6 @@ xfs_clear_incompat_log_features(
 
        if (xfs_sb_has_incompat_log_feature(&mp->m_sb,
                                XFS_SB_FEAT_INCOMPAT_LOG_ALL)) {
-               xfs_info(mp, "Clearing log incompat feature flags.");
                xfs_sb_remove_incompat_log_features(&mp->m_sb);
                ret = true;
        }
index 8c42786..ba5d42a 100644 (file)
@@ -391,6 +391,13 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
  */
 #define XFS_OPSTATE_BLOCKGC_ENABLED    6
 
+/* Kernel has logged a warning about online fsck being used on this fs. */
+#define XFS_OPSTATE_WARNED_SCRUB       7
+/* Kernel has logged a warning about shrink being used on this fs. */
+#define XFS_OPSTATE_WARNED_SHRINK      8
+/* Kernel has logged a warning about logged xattr updates being used. */
+#define XFS_OPSTATE_WARNED_LARP                9
+
 #define __XFS_IS_OPSTATE(name, NAME) \
 static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
 { \
@@ -413,6 +420,12 @@ __XFS_IS_OPSTATE(readonly, READONLY)
 __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
 __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
 
+static inline bool
+xfs_should_warn(struct xfs_mount *mp, long nr)
+{
+       return !test_and_set_bit(nr, &mp->m_opstate);
+}
+
 #define XFS_OPSTATE_STRINGS \
        { (1UL << XFS_OPSTATE_UNMOUNTING),              "unmounting" }, \
        { (1UL << XFS_OPSTATE_CLEAN),                   "clean" }, \
@@ -420,7 +433,10 @@ __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
        { (1UL << XFS_OPSTATE_INODE32),                 "inode32" }, \
        { (1UL << XFS_OPSTATE_READONLY),                "read_only" }, \
        { (1UL << XFS_OPSTATE_INODEGC_ENABLED),         "inodegc" }, \
-       { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED),         "blockgc" }
+       { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED),         "blockgc" }, \
+       { (1UL << XFS_OPSTATE_WARNED_SCRUB),            "wscrub" }, \
+       { (1UL << XFS_OPSTATE_WARNED_SHRINK),           "wshrink" }, \
+       { (1UL << XFS_OPSTATE_WARNED_LARP),             "wlarp" }
 
 /*
  * Max and min values for mount-option defined I/O
index 8fc813c..abf08bb 100644 (file)
@@ -1308,8 +1308,15 @@ xfs_qm_quotacheck(
 
        error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
                        NULL);
-       if (error)
+       if (error) {
+               /*
+                * The inode walk may have partially populated the dquot
+                * caches.  We must purge them before disabling quota and
+                * tearing down the quotainfo, or else the dquots will leak.
+                */
+               xfs_qm_dqpurge_all(mp);
                goto error_return;
+       }
 
        /*
         * We've made all the changes that we need to make incore.  Flush them
index 8495ef0..ed18160 100644 (file)
@@ -38,6 +38,8 @@
 #include "xfs_pwork.h"
 #include "xfs_ag.h"
 #include "xfs_defer.h"
+#include "xfs_attr_item.h"
+#include "xfs_xattr.h"
 
 #include <linux/magic.h>
 #include <linux/fs_context.h>
@@ -2079,8 +2081,24 @@ xfs_init_caches(void)
        if (!xfs_bui_cache)
                goto out_destroy_bud_cache;
 
+       xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
+                                           sizeof(struct xfs_attrd_log_item),
+                                           0, 0, NULL);
+       if (!xfs_attrd_cache)
+               goto out_destroy_bui_cache;
+
+       xfs_attri_cache = kmem_cache_create("xfs_attri_item",
+                                           sizeof(struct xfs_attri_log_item),
+                                           0, 0, NULL);
+       if (!xfs_attri_cache)
+               goto out_destroy_attrd_cache;
+
        return 0;
 
+ out_destroy_attrd_cache:
+       kmem_cache_destroy(xfs_attrd_cache);
+ out_destroy_bui_cache:
+       kmem_cache_destroy(xfs_bui_cache);
  out_destroy_bud_cache:
        kmem_cache_destroy(xfs_bud_cache);
  out_destroy_cui_cache:
@@ -2127,6 +2145,8 @@ xfs_destroy_caches(void)
         * destroy caches.
         */
        rcu_barrier();
+       kmem_cache_destroy(xfs_attri_cache);
+       kmem_cache_destroy(xfs_attrd_cache);
        kmem_cache_destroy(xfs_bui_cache);
        kmem_cache_destroy(xfs_bud_cache);
        kmem_cache_destroy(xfs_cui_cache);
index 167d23f..3cd5a51 100644 (file)
@@ -91,7 +91,6 @@ extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
                                           xfs_agnumber_t agcount);
 
 extern const struct export_operations xfs_export_operations;
-extern const struct xattr_handler *xfs_xattr_handlers[];
 extern const struct quotactl_ops xfs_quotactl_operations;
 
 extern void xfs_reinit_percpu_counters(struct xfs_mount *mp);
index 7a044af..35e13e1 100644 (file)
 #include "xfs_da_btree.h"
 #include "xfs_attr.h"
 #include "xfs_acl.h"
+#include "xfs_log.h"
+#include "xfs_xattr.h"
 
 #include <linux/posix_acl_xattr.h>
 
+/*
+ * Get permission to use log-assisted atomic exchange of file extents.
+ *
+ * Callers must not be running any transactions or hold any inode locks, and
+ * they must release the permission by calling xlog_drop_incompat_feat
+ * when they're done.
+ */
+static inline int
+xfs_attr_grab_log_assist(
+       struct xfs_mount        *mp)
+{
+       int                     error = 0;
+
+       /*
+        * Protect ourselves from an idle log clearing the logged xattrs log
+        * incompat feature bit.
+        */
+       xlog_use_incompat_feat(mp->m_log);
+
+       /*
+        * If log-assisted xattrs are already enabled, the caller can use the
+        * log assisted swap functions with the log-incompat reference we got.
+        */
+       if (xfs_sb_version_haslogxattrs(&mp->m_sb))
+               return 0;
+
+       /* Enable log-assisted xattrs. */
+       error = xfs_add_incompat_log_feature(mp,
+                       XFS_SB_FEAT_INCOMPAT_LOG_XATTRS);
+       if (error)
+               goto drop_incompat;
+
+       xfs_warn_mount(mp, XFS_OPSTATE_WARNED_LARP,
+ "EXPERIMENTAL logged extended attributes feature in use. Use at your own risk!");
+
+       return 0;
+drop_incompat:
+       xlog_drop_incompat_feat(mp->m_log);
+       return error;
+}
+
+static inline void
+xfs_attr_rele_log_assist(
+       struct xfs_mount        *mp)
+{
+       xlog_drop_incompat_feat(mp->m_log);
+}
+
+/*
+ * Set or remove an xattr, having grabbed the appropriate logging resources
+ * prior to calling libxfs.
+ */
+int
+xfs_attr_change(
+       struct xfs_da_args      *args)
+{
+       struct xfs_mount        *mp = args->dp->i_mount;
+       bool                    use_logging = false;
+       int                     error;
+
+       if (xfs_has_larp(mp)) {
+               error = xfs_attr_grab_log_assist(mp);
+               if (error)
+                       return error;
+
+               use_logging = true;
+       }
+
+       error = xfs_attr_set(args);
+
+       if (use_logging)
+               xfs_attr_rele_log_assist(mp);
+       return error;
+}
+
 
 static int
 xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
@@ -56,7 +133,7 @@ xfs_xattr_set(const struct xattr_handler *handler,
        };
        int                     error;
 
-       error = xfs_attr_set(&args);
+       error = xfs_attr_change(&args);
        if (!error && (handler->flags & XFS_ATTR_ROOT))
                xfs_forget_acl(inode, name);
        return error;
diff --git a/fs/xfs/xfs_xattr.h b/fs/xfs/xfs_xattr.h
new file mode 100644 (file)
index 0000000..2b09133
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#ifndef __XFS_XATTR_H__
+#define __XFS_XATTR_H__
+
+int xfs_attr_change(struct xfs_da_args *args);
+
+extern const struct xattr_handler *xfs_xattr_handlers[];
+
+#endif /* __XFS_XATTR_H__ */
index d46c020..d06308a 100644 (file)
@@ -2,6 +2,25 @@
 #ifndef __ASM_GENERIC_COMPAT_H
 #define __ASM_GENERIC_COMPAT_H
 
+#ifndef COMPAT_USER_HZ
+#define COMPAT_USER_HZ         100
+#endif
+
+#ifndef COMPAT_RLIM_INFINITY
+#define COMPAT_RLIM_INFINITY   0xffffffff
+#endif
+
+#ifndef COMPAT_OFF_T_MAX
+#define COMPAT_OFF_T_MAX       0x7fffffff
+#endif
+
+#if !defined(compat_arg_u64) && !defined(CONFIG_CPU_BIG_ENDIAN)
+#define compat_arg_u64(name)           u32  name##_lo, u32  name##_hi
+#define compat_arg_u64_dual(name)      u32, name##_lo, u32, name##_hi
+#define compat_arg_u64_glue(name)      (((u64)name##_lo & 0xffffffffUL) | \
+                                        ((u64)name##_hi << 32))
+#endif
+
 /* These types are common across all compat ABIs */
 typedef u32 compat_size_t;
 typedef s32 compat_ssize_t;
@@ -24,6 +43,11 @@ typedef u32 compat_caddr_t;
 typedef u32 compat_aio_context_t;
 typedef u32 compat_old_sigset_t;
 
+#ifndef __compat_uid_t
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+#endif
+
 #ifndef __compat_uid32_t
 typedef u32 __compat_uid32_t;
 typedef u32 __compat_gid32_t;
@@ -47,4 +71,93 @@ typedef u32 compat_sigset_word;
 #define _COMPAT_NSIG_BPW 32
 #endif
 
+#ifndef compat_dev_t
+typedef u32 compat_dev_t;
+#endif
+
+#ifndef compat_ipc_pid_t
+typedef s32 compat_ipc_pid_t;
+#endif
+
+#ifndef compat_fsid_t
+typedef __kernel_fsid_t        compat_fsid_t;
+#endif
+
+#ifndef compat_statfs
+struct compat_statfs {
+       compat_int_t    f_type;
+       compat_int_t    f_bsize;
+       compat_int_t    f_blocks;
+       compat_int_t    f_bfree;
+       compat_int_t    f_bavail;
+       compat_int_t    f_files;
+       compat_int_t    f_ffree;
+       compat_fsid_t   f_fsid;
+       compat_int_t    f_namelen;
+       compat_int_t    f_frsize;
+       compat_int_t    f_flags;
+       compat_int_t    f_spare[4];
+};
+#endif
+
+#ifndef compat_ipc64_perm
+struct compat_ipc64_perm {
+       compat_key_t key;
+       __compat_uid32_t uid;
+       __compat_gid32_t gid;
+       __compat_uid32_t cuid;
+       __compat_gid32_t cgid;
+       compat_mode_t   mode;
+       unsigned char   __pad1[4 - sizeof(compat_mode_t)];
+       compat_ushort_t seq;
+       compat_ushort_t __pad2;
+       compat_ulong_t  unused1;
+       compat_ulong_t  unused2;
+};
+
+struct compat_semid64_ds {
+       struct compat_ipc64_perm sem_perm;
+       compat_ulong_t sem_otime;
+       compat_ulong_t sem_otime_high;
+       compat_ulong_t sem_ctime;
+       compat_ulong_t sem_ctime_high;
+       compat_ulong_t sem_nsems;
+       compat_ulong_t __unused3;
+       compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+       struct compat_ipc64_perm msg_perm;
+       compat_ulong_t msg_stime;
+       compat_ulong_t msg_stime_high;
+       compat_ulong_t msg_rtime;
+       compat_ulong_t msg_rtime_high;
+       compat_ulong_t msg_ctime;
+       compat_ulong_t msg_ctime_high;
+       compat_ulong_t msg_cbytes;
+       compat_ulong_t msg_qnum;
+       compat_ulong_t msg_qbytes;
+       compat_pid_t   msg_lspid;
+       compat_pid_t   msg_lrpid;
+       compat_ulong_t __unused4;
+       compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+       struct compat_ipc64_perm shm_perm;
+       compat_size_t  shm_segsz;
+       compat_ulong_t shm_atime;
+       compat_ulong_t shm_atime_high;
+       compat_ulong_t shm_dtime;
+       compat_ulong_t shm_dtime_high;
+       compat_ulong_t shm_ctime;
+       compat_ulong_t shm_ctime_high;
+       compat_pid_t   shm_cpid;
+       compat_pid_t   shm_lpid;
+       compat_ulong_t shm_nattch;
+       compat_ulong_t __unused4;
+       compat_ulong_t __unused5;
+};
+#endif
+
 #endif
diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
new file mode 100644 (file)
index 0000000..c0f56fe
--- /dev/null
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com>
+ */
+
+#ifndef XILINX_TIMER_H
+#define XILINX_TIMER_H
+
+#include <linux/compiler.h>
+
+#define TCSR0  0x00
+#define TLR0   0x04
+#define TCR0   0x08
+#define TCSR1  0x10
+#define TLR1   0x14
+#define TCR1   0x18
+
+#define TCSR_MDT       BIT(0)
+#define TCSR_UDT       BIT(1)
+#define TCSR_GENT      BIT(2)
+#define TCSR_CAPT      BIT(3)
+#define TCSR_ARHT      BIT(4)
+#define TCSR_LOAD      BIT(5)
+#define TCSR_ENIT      BIT(6)
+#define TCSR_ENT       BIT(7)
+#define TCSR_TINT      BIT(8)
+#define TCSR_PWMA      BIT(9)
+#define TCSR_ENALL     BIT(10)
+#define TCSR_CASC      BIT(11)
+
+struct clk;
+struct device_node;
+struct regmap;
+
+/**
+ * struct xilinx_timer_priv - Private data for Xilinx AXI timer drivers
+ * @map: Regmap of the device, possibly with an offset
+ * @clk: Parent clock
+ * @max: Maximum value of the counters
+ */
+struct xilinx_timer_priv {
+       struct regmap *map;
+       struct clk *clk;
+       u32 max;
+};
+
+/**
+ * xilinx_timer_tlr_cycles() - Calculate the TLR for a period specified
+ *                             in clock cycles
+ * @priv: The timer's private data
+ * @tcsr: The value of the TCSR register for this counter
+ * @cycles: The number of cycles in this period
+ *
+ * Callers of this function MUST ensure that @cycles is representable as
+ * a TLR.
+ *
+ * Return: The calculated value for TLR
+ */
+u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr,
+                           u64 cycles);
+
+/**
+ * xilinx_timer_get_period() - Get the current period of a counter
+ * @priv: The timer's private data
+ * @tlr: The value of TLR for this counter
+ * @tcsr: The value of TCSR for this counter
+ *
+ * Return: The period, in ns
+ */
+unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
+                                    u32 tlr, u32 tcsr);
+
+#endif /* XILINX_TIMER_H */
index 22deb21..08e0e3f 100644 (file)
@@ -67,6 +67,14 @@ static inline bool drm_arch_can_wc_memory(void)
         * optimization entirely for ARM and arm64.
         */
        return false;
+#elif defined(CONFIG_LOONGARCH)
+       /*
+        * LoongArch maintains cache coherency in hardware, but its WUC attribute
+        * (Weak-ordered UnCached, which is similar to WC) is out of the scope of
+        * cache coherency machanism. This means WUC can only used for write-only
+        * memory regions.
+        */
+       return false;
 #else
        return true;
 #endif
index 235b525..e84cfec 100644 (file)
 #define SLAVE_MNOC_SF_MEM_NOC_DISPLAY          3
 #define SLAVE_MNOC_HF_MEM_NOC_DISPLAY          4
 
+#define MASTER_QUP_CORE_0                      0
+#define MASTER_QUP_CORE_1                      1
+#define MASTER_QUP_CORE_2                      2
+#define SLAVE_QUP_CORE_0                       3
+#define SLAVE_QUP_CORE_1                       4
+#define SLAVE_QUP_CORE_2                       5
+
 #endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h
new file mode 100644 (file)
index 0000000..a3e5fda
--- /dev/null
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+
+/* aggre1_noc */
+#define MASTER_QSPI_0                  0
+#define MASTER_QUP_1                   1
+#define MASTER_QUP_2                   2
+#define MASTER_A1NOC_CFG               3
+#define MASTER_IPA                     4
+#define MASTER_EMAC_1                  5
+#define MASTER_SDCC_4                  6
+#define MASTER_UFS_MEM                 7
+#define MASTER_USB3_0                  8
+#define MASTER_USB3_1                  9
+#define MASTER_USB3_MP                 10
+#define MASTER_USB4_0                  11
+#define MASTER_USB4_1                  12
+#define SLAVE_A1NOC_SNOC               13
+#define SLAVE_USB_NOC_SNOC             14
+#define SLAVE_SERVICE_A1NOC            15
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM                        0
+#define MASTER_QUP_0                   1
+#define MASTER_A2NOC_CFG               2
+#define MASTER_CRYPTO                  3
+#define MASTER_SENSORS_PROC            4
+#define MASTER_SP                      5
+#define MASTER_EMAC                    6
+#define MASTER_PCIE_0                  7
+#define MASTER_PCIE_1                  8
+#define MASTER_PCIE_2A                 9
+#define MASTER_PCIE_2B                 10
+#define MASTER_PCIE_3A                 11
+#define MASTER_PCIE_3B                 12
+#define MASTER_PCIE_4                  13
+#define MASTER_QDSS_ETR                        14
+#define MASTER_SDCC_2                  15
+#define MASTER_UFS_CARD                        16
+#define SLAVE_A2NOC_SNOC               17
+#define SLAVE_ANOC_PCIE_GEM_NOC                18
+#define SLAVE_SERVICE_A2NOC            19
+
+/* clk_virt */
+#define MASTER_IPA_CORE                        0
+#define MASTER_QUP_CORE_0              1
+#define MASTER_QUP_CORE_1              2
+#define MASTER_QUP_CORE_2              3
+#define SLAVE_IPA_CORE                 4
+#define SLAVE_QUP_CORE_0               5
+#define SLAVE_QUP_CORE_1               6
+#define SLAVE_QUP_CORE_2               7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC            0
+#define MASTER_GEM_NOC_PCIE_SNOC       1
+#define SLAVE_AHB2PHY_0                        2
+#define SLAVE_AHB2PHY_1                        3
+#define SLAVE_AHB2PHY_2                        4
+#define SLAVE_AOSS                     5
+#define SLAVE_APPSS                    6
+#define SLAVE_CAMERA_CFG               7
+#define SLAVE_CLK_CTL                  8
+#define SLAVE_CDSP_CFG                 9
+#define SLAVE_CDSP1_CFG                        10
+#define SLAVE_RBCPR_CX_CFG             11
+#define SLAVE_RBCPR_MMCX_CFG           12
+#define SLAVE_RBCPR_MX_CFG             13
+#define SLAVE_CPR_NSPCX                        14
+#define SLAVE_CRYPTO_0_CFG             15
+#define SLAVE_CX_RDPM                  16
+#define SLAVE_DCC_CFG                  17
+#define SLAVE_DISPLAY_CFG              18
+#define SLAVE_DISPLAY1_CFG             19
+#define SLAVE_EMAC_CFG                 20
+#define SLAVE_EMAC1_CFG                        21
+#define SLAVE_GFX3D_CFG                        22
+#define SLAVE_HWKM                     23
+#define SLAVE_IMEM_CFG                 24
+#define SLAVE_IPA_CFG                  25
+#define SLAVE_IPC_ROUTER_CFG           26
+#define SLAVE_LPASS                    27
+#define SLAVE_MX_RDPM                  28
+#define SLAVE_MXC_RDPM                 29
+#define SLAVE_PCIE_0_CFG               30
+#define SLAVE_PCIE_1_CFG               31
+#define SLAVE_PCIE_2A_CFG              32
+#define SLAVE_PCIE_2B_CFG              33
+#define SLAVE_PCIE_3A_CFG              34
+#define SLAVE_PCIE_3B_CFG              35
+#define SLAVE_PCIE_4_CFG               36
+#define SLAVE_PCIE_RSC_CFG             37
+#define SLAVE_PDM                      38
+#define SLAVE_PIMEM_CFG                        39
+#define SLAVE_PKA_WRAPPER_CFG          40
+#define SLAVE_PMU_WRAPPER_CFG          41
+#define SLAVE_QDSS_CFG                 42
+#define SLAVE_QSPI_0                   43
+#define SLAVE_QUP_0                    44
+#define SLAVE_QUP_1                    45
+#define SLAVE_QUP_2                    46
+#define SLAVE_SDCC_2                   47
+#define SLAVE_SDCC_4                   48
+#define SLAVE_SECURITY                 49
+#define SLAVE_SMMUV3_CFG               50
+#define SLAVE_SMSS_CFG                 51
+#define SLAVE_SPSS_CFG                 52
+#define SLAVE_TCSR                     53
+#define SLAVE_TLMM                     54
+#define SLAVE_UFS_CARD_CFG             55
+#define SLAVE_UFS_MEM_CFG              56
+#define SLAVE_USB3_0                   57
+#define SLAVE_USB3_1                   58
+#define SLAVE_USB3_MP                  59
+#define SLAVE_USB4_0                   60
+#define SLAVE_USB4_1                   61
+#define SLAVE_VENUS_CFG                        62
+#define SLAVE_VSENSE_CTRL_CFG          63
+#define SLAVE_VSENSE_CTRL_R_CFG                64
+#define SLAVE_A1NOC_CFG                        65
+#define SLAVE_A2NOC_CFG                        66
+#define SLAVE_ANOC_PCIE_BRIDGE_CFG     67
+#define SLAVE_DDRSS_CFG                        68
+#define SLAVE_CNOC_MNOC_CFG            69
+#define SLAVE_SNOC_CFG                 70
+#define SLAVE_SNOC_SF_BRIDGE_CFG       71
+#define SLAVE_IMEM                     72
+#define SLAVE_PIMEM                    73
+#define SLAVE_SERVICE_CNOC             74
+#define SLAVE_PCIE_0                   75
+#define SLAVE_PCIE_1                   76
+#define SLAVE_PCIE_2A                  77
+#define SLAVE_PCIE_2B                  78
+#define SLAVE_PCIE_3A                  79
+#define SLAVE_PCIE_3B                  80
+#define SLAVE_PCIE_4                   81
+#define SLAVE_QDSS_STM                 82
+#define SLAVE_SMSS                     83
+#define SLAVE_TCU                      84
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC             0
+#define SLAVE_LLCC_CFG                 1
+#define SLAVE_GEM_NOC_CFG              2
+
+/* gem_noc */
+#define MASTER_GPU_TCU                 0
+#define MASTER_PCIE_TCU                        1
+#define MASTER_SYS_TCU                 2
+#define MASTER_APPSS_PROC              3
+#define MASTER_COMPUTE_NOC             4
+#define MASTER_COMPUTE_NOC_1           5
+#define MASTER_GEM_NOC_CFG             6
+#define MASTER_GFX3D                   7
+#define MASTER_MNOC_HF_MEM_NOC         8
+#define MASTER_MNOC_SF_MEM_NOC         9
+#define MASTER_ANOC_PCIE_GEM_NOC       10
+#define MASTER_SNOC_GC_MEM_NOC         11
+#define MASTER_SNOC_SF_MEM_NOC         12
+#define SLAVE_GEM_NOC_CNOC             13
+#define SLAVE_LLCC                     14
+#define SLAVE_GEM_NOC_PCIE_CNOC                15
+#define SLAVE_SERVICE_GEM_NOC_1                16
+#define SLAVE_SERVICE_GEM_NOC_2                17
+#define SLAVE_SERVICE_GEM_NOC          18
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC       0
+#define MASTER_LPASS_PROC              1
+#define SLAVE_LPASS_CORE_CFG           2
+#define SLAVE_LPASS_LPI_CFG            3
+#define SLAVE_LPASS_MPU_CFG            4
+#define SLAVE_LPASS_TOP_CFG            5
+#define SLAVE_LPASS_SNOC               6
+#define SLAVE_SERVICES_LPASS_AML_NOC   7
+#define SLAVE_SERVICE_LPASS_AG_NOC     8
+
+/* mc_virt */
+#define MASTER_LLCC                    0
+#define SLAVE_EBI1                     1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF               0
+#define MASTER_MDP0                    1
+#define MASTER_MDP1                    2
+#define MASTER_MDP_CORE1_0             3
+#define MASTER_MDP_CORE1_1             4
+#define MASTER_CNOC_MNOC_CFG           5
+#define MASTER_ROTATOR                 6
+#define MASTER_ROTATOR_1               7
+#define MASTER_VIDEO_P0                        8
+#define MASTER_VIDEO_P1                        9
+#define MASTER_VIDEO_PROC              10
+#define MASTER_CAMNOC_ICP              11
+#define MASTER_CAMNOC_SF               12
+#define SLAVE_MNOC_HF_MEM_NOC          13
+#define SLAVE_MNOC_SF_MEM_NOC          14
+#define SLAVE_SERVICE_MNOC             15
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG            0
+#define MASTER_CDSP_PROC               1
+#define SLAVE_CDSP_MEM_NOC             2
+#define SLAVE_NSP_XFR                  3
+#define SLAVE_SERVICE_NSP_NOC          4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG           0
+#define MASTER_CDSP_PROC_B             1
+#define SLAVE_CDSPB_MEM_NOC            2
+#define SLAVE_NSPB_XFR                 3
+#define SLAVE_SERVICE_NSPB_NOC         4
+
+/* system_noc */
+#define MASTER_A1NOC_SNOC              0
+#define MASTER_A2NOC_SNOC              1
+#define MASTER_USB_NOC_SNOC            2
+#define MASTER_LPASS_ANOC              3
+#define MASTER_SNOC_CFG                        4
+#define MASTER_PIMEM                   5
+#define MASTER_GIC                     6
+#define SLAVE_SNOC_GEM_NOC_GC          7
+#define SLAVE_SNOC_GEM_NOC_SF          8
+#define SLAVE_SERVICE_SNOC             9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx65.h b/include/dt-bindings/interconnect/qcom,sdx65.h
new file mode 100644 (file)
index 0000000..b25288a
--- /dev/null
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+
+#define MASTER_LLCC    0
+#define SLAVE_EBI1     1
+
+#define MASTER_TCU_0           0
+#define MASTER_SNOC_GC_MEM_NOC 1
+#define MASTER_APPSS_PROC      2
+#define SLAVE_LLCC             3
+#define SLAVE_MEM_NOC_SNOC     4
+#define SLAVE_MEM_NOC_PCIE_SNOC        5
+
+#define MASTER_AUDIO           0
+#define MASTER_BLSP_1          1
+#define MASTER_QDSS_BAM                2
+#define MASTER_QPIC            3
+#define MASTER_SNOC_CFG                4
+#define MASTER_SPMI_FETCHER    5
+#define MASTER_ANOC_SNOC       6
+#define MASTER_IPA             7
+#define MASTER_MEM_NOC_SNOC    8
+#define MASTER_MEM_NOC_PCIE_SNOC       9
+#define MASTER_CRYPTO          10
+#define MASTER_IPA_PCIE                11
+#define MASTER_PCIE_0          12
+#define MASTER_QDSS_ETR                13
+#define MASTER_SDCC_1          14
+#define MASTER_USB3            15
+#define SLAVE_AOSS             16
+#define SLAVE_APPSS            17
+#define SLAVE_AUDIO            18
+#define SLAVE_BLSP_1           19
+#define SLAVE_CLK_CTL          20
+#define SLAVE_CRYPTO_0_CFG     21
+#define SLAVE_CNOC_DDRSS       22
+#define SLAVE_ECC_CFG          23
+#define SLAVE_IMEM_CFG         24
+#define SLAVE_IPA_CFG          25
+#define SLAVE_CNOC_MSS         26
+#define SLAVE_PCIE_PARF                27
+#define SLAVE_PDM              28
+#define SLAVE_PRNG             29
+#define SLAVE_QDSS_CFG         30
+#define SLAVE_QPIC             31
+#define SLAVE_SDCC_1           32
+#define SLAVE_SNOC_CFG         33
+#define SLAVE_SPMI_FETCHER     34
+#define SLAVE_SPMI_VGI_COEX    35
+#define SLAVE_TCSR             36
+#define SLAVE_TLMM             37
+#define SLAVE_USB3             38
+#define SLAVE_USB3_PHY_CFG     39
+#define SLAVE_ANOC_SNOC                40
+#define SLAVE_SNOC_MEM_NOC_GC  41
+#define SLAVE_IMEM             42
+#define SLAVE_SERVICE_SNOC     43
+#define SLAVE_PCIE_0           44
+#define SLAVE_QDSS_STM         45
+#define SLAVE_TCU              46
+
+#endif
diff --git a/include/dt-bindings/memory/mt8186-memory-port.h b/include/dt-bindings/memory/mt8186-memory-port.h
new file mode 100644 (file)
index 0000000..2bc6e44
--- /dev/null
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ *
+ * Author: Anan Sun <anan.sun@mediatek.com>
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules    dma-address-region       larbs-ports
+ * disp         0 ~ 4G                  larb0/1/2
+ * vcodec      4G ~ 8G                  larb4/7
+ * cam/mdp     8G ~ 12G                 the other larbs.
+ * N/A         12G ~ 16G
+ * CCU0   0x24000_0000 ~ 0x243ff_ffff   larb13: port 9/10
+ * CCU1   0x24400_0000 ~ 0x247ff_ffff   larb14: port 4/5
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- MMSYS */
+#define IOMMU_PORT_L0_DISP_POSTMASK0   MTK_M4U_ID(0, 0)
+#define IOMMU_PORT_L0_REVERSED         MTK_M4U_ID(0, 1)
+#define IOMMU_PORT_L0_OVL_RDMA0                MTK_M4U_ID(0, 2)
+#define IOMMU_PORT_L0_DISP_FAKE0       MTK_M4U_ID(0, 3)
+
+/* LARB 1 -- MMSYS */
+#define IOMMU_PORT_L1_DISP_RDMA1       MTK_M4U_ID(1, 0)
+#define IOMMU_PORT_L1_OVL_2L_RDMA0     MTK_M4U_ID(1, 1)
+#define IOMMU_PORT_L1_DISP_RDMA0       MTK_M4U_ID(1, 2)
+#define IOMMU_PORT_L1_DISP_WDMA0       MTK_M4U_ID(1, 3)
+#define IOMMU_PORT_L1_DISP_FAKE1       MTK_M4U_ID(1, 4)
+
+/* LARB 2 -- MMSYS */
+#define IOMMU_PORT_L2_MDP_RDMA0                MTK_M4U_ID(2, 0)
+#define IOMMU_PORT_L2_MDP_RDMA1                MTK_M4U_ID(2, 1)
+#define IOMMU_PORT_L2_MDP_WROT0                MTK_M4U_ID(2, 2)
+#define IOMMU_PORT_L2_MDP_WROT1                MTK_M4U_ID(2, 3)
+#define IOMMU_PORT_L2_DISP_FAKE0       MTK_M4U_ID(2, 4)
+
+/* LARB 4 -- VDEC */
+#define IOMMU_PORT_L4_HW_VDEC_MC_EXT           MTK_M4U_ID(4, 0)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_EXT          MTK_M4U_ID(4, 1)
+#define IOMMU_PORT_L4_HW_VDEC_PP_EXT           MTK_M4U_ID(4, 2)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_RD_EXT      MTK_M4U_ID(4, 3)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_WR_EXT      MTK_M4U_ID(4, 4)
+#define IOMMU_PORT_L4_HW_VDEC_PPWRAP_EXT       MTK_M4U_ID(4, 5)
+#define IOMMU_PORT_L4_HW_VDEC_TILE_EXT         MTK_M4U_ID(4, 6)
+#define IOMMU_PORT_L4_HW_VDEC_VLD_EXT          MTK_M4U_ID(4, 7)
+#define IOMMU_PORT_L4_HW_VDEC_VLD2_EXT         MTK_M4U_ID(4, 8)
+#define IOMMU_PORT_L4_HW_VDEC_AVC_MV_EXT       MTK_M4U_ID(4, 9)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_ENC_EXT      MTK_M4U_ID(4, 10)
+#define IOMMU_PORT_L4_HW_VDEC_RG_CTRL_DMA_EXT  MTK_M4U_ID(4, 11)
+#define IOMMU_PORT_L4_HW_MINI_MDP_R0_EXT       MTK_M4U_ID(4, 12)
+#define IOMMU_PORT_L4_HW_MINI_MDP_W0_EXT       MTK_M4U_ID(4, 13)
+
+/* LARB 7 -- VENC */
+#define IOMMU_PORT_L7_VENC_RCPU                MTK_M4U_ID(7, 0)
+#define IOMMU_PORT_L7_VENC_REC         MTK_M4U_ID(7, 1)
+#define IOMMU_PORT_L7_VENC_BSDMA       MTK_M4U_ID(7, 2)
+#define IOMMU_PORT_L7_VENC_SV_COMV     MTK_M4U_ID(7, 3)
+#define IOMMU_PORT_L7_VENC_RD_COMV     MTK_M4U_ID(7, 4)
+#define IOMMU_PORT_L7_VENC_CUR_LUMA    MTK_M4U_ID(7, 5)
+#define IOMMU_PORT_L7_VENC_CUR_CHROMA  MTK_M4U_ID(7, 6)
+#define IOMMU_PORT_L7_VENC_REF_LUMA    MTK_M4U_ID(7, 7)
+#define IOMMU_PORT_L7_VENC_REF_CHROMA  MTK_M4U_ID(7, 8)
+#define IOMMU_PORT_L7_JPGENC_Y_RDMA    MTK_M4U_ID(7, 9)
+#define IOMMU_PORT_L7_JPGENC_C_RDMA    MTK_M4U_ID(7, 10)
+#define IOMMU_PORT_L7_JPGENC_Q_TABLE   MTK_M4U_ID(7, 11)
+#define IOMMU_PORT_L7_JPGENC_BSDMA     MTK_M4U_ID(7, 12)
+
+/* LARB 8 -- WPE */
+#define IOMMU_PORT_L8_WPE_RDMA_0       MTK_M4U_ID(8, 0)
+#define IOMMU_PORT_L8_WPE_RDMA_1       MTK_M4U_ID(8, 1)
+#define IOMMU_PORT_L8_WPE_WDMA_0       MTK_M4U_ID(8, 2)
+
+/* LARB 9 -- IMG-1 */
+#define IOMMU_PORT_L9_IMG_IMGI_D1      MTK_M4U_ID(9, 0)
+#define IOMMU_PORT_L9_IMG_IMGBI_D1     MTK_M4U_ID(9, 1)
+#define IOMMU_PORT_L9_IMG_DMGI_D1      MTK_M4U_ID(9, 2)
+#define IOMMU_PORT_L9_IMG_DEPI_D1      MTK_M4U_ID(9, 3)
+#define IOMMU_PORT_L9_IMG_LCE_D1       MTK_M4U_ID(9, 4)
+#define IOMMU_PORT_L9_IMG_SMTI_D1      MTK_M4U_ID(9, 5)
+#define IOMMU_PORT_L9_IMG_SMTO_D2      MTK_M4U_ID(9, 6)
+#define IOMMU_PORT_L9_IMG_SMTO_D1      MTK_M4U_ID(9, 7)
+#define IOMMU_PORT_L9_IMG_CRZO_D1      MTK_M4U_ID(9, 8)
+#define IOMMU_PORT_L9_IMG_IMG3O_D1     MTK_M4U_ID(9, 9)
+#define IOMMU_PORT_L9_IMG_VIPI_D1      MTK_M4U_ID(9, 10)
+#define IOMMU_PORT_L9_IMG_SMTI_D5      MTK_M4U_ID(9, 11)
+#define IOMMU_PORT_L9_IMG_TIMGO_D1     MTK_M4U_ID(9, 12)
+#define IOMMU_PORT_L9_IMG_UFBC_W0      MTK_M4U_ID(9, 13)
+#define IOMMU_PORT_L9_IMG_UFBC_R0      MTK_M4U_ID(9, 14)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA1    MTK_M4U_ID(9, 15)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA0    MTK_M4U_ID(9, 16)
+#define IOMMU_PORT_L9_IMG_WPE_WDMA     MTK_M4U_ID(9, 17)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA0    MTK_M4U_ID(9, 18)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA1    MTK_M4U_ID(9, 19)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA2    MTK_M4U_ID(9, 20)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA3    MTK_M4U_ID(9, 21)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA4    MTK_M4U_ID(9, 22)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA5    MTK_M4U_ID(9, 23)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA0    MTK_M4U_ID(9, 24)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA1    MTK_M4U_ID(9, 25)
+#define IOMMU_PORT_L9_IMG_RESERVE6     MTK_M4U_ID(9, 26)
+#define IOMMU_PORT_L9_IMG_RESERVE7     MTK_M4U_ID(9, 27)
+#define IOMMU_PORT_L9_IMG_RESERVE8     MTK_M4U_ID(9, 28)
+
+/* LARB 11 -- IMG-2 */
+#define IOMMU_PORT_L11_IMG_IMGI_D1     MTK_M4U_ID(11, 0)
+#define IOMMU_PORT_L11_IMG_IMGBI_D1    MTK_M4U_ID(11, 1)
+#define IOMMU_PORT_L11_IMG_DMGI_D1     MTK_M4U_ID(11, 2)
+#define IOMMU_PORT_L11_IMG_DEPI_D1     MTK_M4U_ID(11, 3)
+#define IOMMU_PORT_L11_IMG_LCE_D1      MTK_M4U_ID(11, 4)
+#define IOMMU_PORT_L11_IMG_SMTI_D1     MTK_M4U_ID(11, 5)
+#define IOMMU_PORT_L11_IMG_SMTO_D2     MTK_M4U_ID(11, 6)
+#define IOMMU_PORT_L11_IMG_SMTO_D1     MTK_M4U_ID(11, 7)
+#define IOMMU_PORT_L11_IMG_CRZO_D1     MTK_M4U_ID(11, 8)
+#define IOMMU_PORT_L11_IMG_IMG3O_D1    MTK_M4U_ID(11, 9)
+#define IOMMU_PORT_L11_IMG_VIPI_D1     MTK_M4U_ID(11, 10)
+#define IOMMU_PORT_L11_IMG_SMTI_D5     MTK_M4U_ID(11, 11)
+#define IOMMU_PORT_L11_IMG_TIMGO_D1    MTK_M4U_ID(11, 12)
+#define IOMMU_PORT_L11_IMG_UFBC_W0     MTK_M4U_ID(11, 13)
+#define IOMMU_PORT_L11_IMG_UFBC_R0     MTK_M4U_ID(11, 14)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA1   MTK_M4U_ID(11, 15)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA0   MTK_M4U_ID(11, 16)
+#define IOMMU_PORT_L11_IMG_WPE_WDMA    MTK_M4U_ID(11, 17)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA0   MTK_M4U_ID(11, 18)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA1   MTK_M4U_ID(11, 19)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA2   MTK_M4U_ID(11, 20)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA3   MTK_M4U_ID(11, 21)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA4   MTK_M4U_ID(11, 22)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA5   MTK_M4U_ID(11, 23)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA0   MTK_M4U_ID(11, 24)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA1   MTK_M4U_ID(11, 25)
+#define IOMMU_PORT_L11_IMG_RESERVE6    MTK_M4U_ID(11, 26)
+#define IOMMU_PORT_L11_IMG_RESERVE7    MTK_M4U_ID(11, 27)
+#define IOMMU_PORT_L11_IMG_RESERVE8    MTK_M4U_ID(11, 28)
+
+/* LARB 13 -- CAM */
+#define IOMMU_PORT_L13_CAM_MRAWI       MTK_M4U_ID(13, 0)
+#define IOMMU_PORT_L13_CAM_MRAWO_0     MTK_M4U_ID(13, 1)
+#define IOMMU_PORT_L13_CAM_MRAWO_1     MTK_M4U_ID(13, 2)
+#define IOMMU_PORT_L13_CAM_CAMSV_4     MTK_M4U_ID(13, 6)
+#define IOMMU_PORT_L13_CAM_CAMSV_5     MTK_M4U_ID(13, 7)
+#define IOMMU_PORT_L13_CAM_CAMSV_6     MTK_M4U_ID(13, 8)
+#define IOMMU_PORT_L13_CAM_CCUI                MTK_M4U_ID(13, 9)
+#define IOMMU_PORT_L13_CAM_CCUO                MTK_M4U_ID(13, 10)
+#define IOMMU_PORT_L13_CAM_FAKE                MTK_M4U_ID(13, 11)
+
+/* LARB 14 -- CAM */
+#define IOMMU_PORT_L14_CAM_CCUI                MTK_M4U_ID(14, 4)
+#define IOMMU_PORT_L14_CAM_CCUO                MTK_M4U_ID(14, 5)
+
+/* LARB 16 -- RAW-A */
+#define IOMMU_PORT_L16_CAM_IMGO_R1_A   MTK_M4U_ID(16, 0)
+#define IOMMU_PORT_L16_CAM_RRZO_R1_A   MTK_M4U_ID(16, 1)
+#define IOMMU_PORT_L16_CAM_CQI_R1_A    MTK_M4U_ID(16, 2)
+#define IOMMU_PORT_L16_CAM_BPCI_R1_A   MTK_M4U_ID(16, 3)
+#define IOMMU_PORT_L16_CAM_YUVO_R1_A   MTK_M4U_ID(16, 4)
+#define IOMMU_PORT_L16_CAM_UFDI_R2_A   MTK_M4U_ID(16, 5)
+#define IOMMU_PORT_L16_CAM_RAWI_R2_A   MTK_M4U_ID(16, 6)
+#define IOMMU_PORT_L16_CAM_RAWI_R3_A   MTK_M4U_ID(16, 7)
+#define IOMMU_PORT_L16_CAM_AAO_R1_A    MTK_M4U_ID(16, 8)
+#define IOMMU_PORT_L16_CAM_AFO_R1_A    MTK_M4U_ID(16, 9)
+#define IOMMU_PORT_L16_CAM_FLKO_R1_A   MTK_M4U_ID(16, 10)
+#define IOMMU_PORT_L16_CAM_LCESO_R1_A  MTK_M4U_ID(16, 11)
+#define IOMMU_PORT_L16_CAM_CRZO_R1_A   MTK_M4U_ID(16, 12)
+#define IOMMU_PORT_L16_CAM_LTMSO_R1_A  MTK_M4U_ID(16, 13)
+#define IOMMU_PORT_L16_CAM_RSSO_R1_A   MTK_M4U_ID(16, 14)
+#define IOMMU_PORT_L16_CAM_AAHO_R1_A   MTK_M4U_ID(16, 15)
+#define IOMMU_PORT_L16_CAM_LSCI_R1_A   MTK_M4U_ID(16, 16)
+
+/* LARB 17 -- RAW-B */
+#define IOMMU_PORT_L17_CAM_IMGO_R1_B   MTK_M4U_ID(17, 0)
+#define IOMMU_PORT_L17_CAM_RRZO_R1_B   MTK_M4U_ID(17, 1)
+#define IOMMU_PORT_L17_CAM_CQI_R1_B    MTK_M4U_ID(17, 2)
+#define IOMMU_PORT_L17_CAM_BPCI_R1_B   MTK_M4U_ID(17, 3)
+#define IOMMU_PORT_L17_CAM_YUVO_R1_B   MTK_M4U_ID(17, 4)
+#define IOMMU_PORT_L17_CAM_UFDI_R2_B   MTK_M4U_ID(17, 5)
+#define IOMMU_PORT_L17_CAM_RAWI_R2_B   MTK_M4U_ID(17, 6)
+#define IOMMU_PORT_L17_CAM_RAWI_R3_B   MTK_M4U_ID(17, 7)
+#define IOMMU_PORT_L17_CAM_AAO_R1_B    MTK_M4U_ID(17, 8)
+#define IOMMU_PORT_L17_CAM_AFO_R1_B    MTK_M4U_ID(17, 9)
+#define IOMMU_PORT_L17_CAM_FLKO_R1_B   MTK_M4U_ID(17, 10)
+#define IOMMU_PORT_L17_CAM_LCESO_R1_B  MTK_M4U_ID(17, 11)
+#define IOMMU_PORT_L17_CAM_CRZO_R1_B   MTK_M4U_ID(17, 12)
+#define IOMMU_PORT_L17_CAM_LTMSO_R1_B  MTK_M4U_ID(17, 13)
+#define IOMMU_PORT_L17_CAM_RSSO_R1_B   MTK_M4U_ID(17, 14)
+#define IOMMU_PORT_L17_CAM_AAHO_R1_B   MTK_M4U_ID(17, 15)
+#define IOMMU_PORT_L17_CAM_LSCI_R1_B   MTK_M4U_ID(17, 16)
+
+/* LARB 19 -- IPE */
+#define IOMMU_PORT_L19_IPE_DVS_RDMA    MTK_M4U_ID(19, 0)
+#define IOMMU_PORT_L19_IPE_DVS_WDMA    MTK_M4U_ID(19, 1)
+#define IOMMU_PORT_L19_IPE_DVP_RDMA    MTK_M4U_ID(19, 2)
+#define IOMMU_PORT_L19_IPE_DVP_WDMA    MTK_M4U_ID(19, 3)
+
+/* LARB 20 -- IPE */
+#define IOMMU_PORT_L20_IPE_FDVT_RDA    MTK_M4U_ID(20, 0)
+#define IOMMU_PORT_L20_IPE_FDVT_RDB    MTK_M4U_ID(20, 1)
+#define IOMMU_PORT_L20_IPE_FDVT_WRA    MTK_M4U_ID(20, 2)
+#define IOMMU_PORT_L20_IPE_FDVT_WRB    MTK_M4U_ID(20, 3)
+#define IOMMU_PORT_L20_IPE_RSC_RDMA0   MTK_M4U_ID(20, 4)
+#define IOMMU_PORT_L20_IPE_RSC_WDMA    MTK_M4U_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8195-memory-port.h b/include/dt-bindings/memory/mt8195-memory-port.h
new file mode 100644 (file)
index 0000000..70ba9f4
--- /dev/null
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules    dma-address-region       larbs-ports
+ * disp         0 ~ 4G                  larb0/1/2/3
+ * vcodec      4G ~ 8G                  larb19/20/21/22/23/24
+ * cam/mdp     8G ~ 12G                 the other larbs.
+ * N/A         12G ~ 16G
+ * CCU0   0x24000_0000 ~ 0x243ff_ffff   larb18: port 0/1
+ * CCU1   0x24400_0000 ~ 0x247ff_ffff   larb18: port 2/3
+ *
+ * This SoC have two IOMMU HWs, this is the detailed connected information:
+ * iommu-vdo: larb0/2/5/7/9/10/11/13/17/19/21/24/25/28
+ * iommu-vpp: larb1/3/4/6/8/12/14/16/18/20/22/23/26/27
+ */
+
+/* MM IOMMU ports */
+/* larb0 */
+#define M4U_PORT_L0_DISP_RDMA0                 MTK_M4U_ID(0, 0)
+#define M4U_PORT_L0_DISP_WDMA0                 MTK_M4U_ID(0, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0            MTK_M4U_ID(0, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1            MTK_M4U_ID(0, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR              MTK_M4U_ID(0, 4)
+#define M4U_PORT_L0_DISP_FAKE0                 MTK_M4U_ID(0, 5)
+
+/* larb1 */
+#define M4U_PORT_L1_DISP_RDMA0                 MTK_M4U_ID(1, 0)
+#define M4U_PORT_L1_DISP_WDMA0                 MTK_M4U_ID(1, 1)
+#define M4U_PORT_L1_DISP_OVL0_RDMA0            MTK_M4U_ID(1, 2)
+#define M4U_PORT_L1_DISP_OVL0_RDMA1            MTK_M4U_ID(1, 3)
+#define M4U_PORT_L1_DISP_OVL0_HDR              MTK_M4U_ID(1, 4)
+#define M4U_PORT_L1_DISP_FAKE0                 MTK_M4U_ID(1, 5)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0                  MTK_M4U_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA2                  MTK_M4U_ID(2, 1)
+#define M4U_PORT_L2_MDP_RDMA4                  MTK_M4U_ID(2, 2)
+#define M4U_PORT_L2_MDP_RDMA6                  MTK_M4U_ID(2, 3)
+#define M4U_PORT_L2_DISP_FAKE1                 MTK_M4U_ID(2, 4)
+
+/* larb3 */
+#define M4U_PORT_L3_MDP_RDMA1                  MTK_M4U_ID(3, 0)
+#define M4U_PORT_L3_MDP_RDMA3                  MTK_M4U_ID(3, 1)
+#define M4U_PORT_L3_MDP_RDMA5                  MTK_M4U_ID(3, 2)
+#define M4U_PORT_L3_MDP_RDMA7                  MTK_M4U_ID(3, 3)
+#define M4U_PORT_L3_HDR_DS                     MTK_M4U_ID(3, 4)
+#define M4U_PORT_L3_HDR_ADL                    MTK_M4U_ID(3, 5)
+#define M4U_PORT_L3_DISP_FAKE1                 MTK_M4U_ID(3, 6)
+
+/* larb4 */
+#define M4U_PORT_L4_MDP_RDMA                   MTK_M4U_ID(4, 0)
+#define M4U_PORT_L4_MDP_FG                     MTK_M4U_ID(4, 1)
+#define M4U_PORT_L4_MDP_OVL                    MTK_M4U_ID(4, 2)
+#define M4U_PORT_L4_MDP_WROT                   MTK_M4U_ID(4, 3)
+#define M4U_PORT_L4_FAKE                       MTK_M4U_ID(4, 4)
+
+/* larb5 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA             MTK_M4U_ID(5, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG               MTK_M4U_ID(5, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL              MTK_M4U_ID(5, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT             MTK_M4U_ID(5, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA             MTK_M4U_ID(5, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG               MTK_M4U_ID(5, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT             MTK_M4U_ID(5, 6)
+#define M4U_PORT_L5_FAKE                       MTK_M4U_ID(5, 7)
+
+/* larb6 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA             MTK_M4U_ID(6, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG               MTK_M4U_ID(6, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT             MTK_M4U_ID(6, 2)
+#define M4U_PORT_L6_FAKE                       MTK_M4U_ID(6, 3)
+
+/* larb7 */
+#define M4U_PORT_L7_IMG_WPE_RDMA0              MTK_M4U_ID(7, 0)
+#define M4U_PORT_L7_IMG_WPE_RDMA1              MTK_M4U_ID(7, 1)
+#define M4U_PORT_L7_IMG_WPE_WDMA0              MTK_M4U_ID(7, 2)
+
+/* larb8 */
+#define M4U_PORT_L8_IMG_WPE_RDMA0              MTK_M4U_ID(8, 0)
+#define M4U_PORT_L8_IMG_WPE_RDMA1              MTK_M4U_ID(8, 1)
+#define M4U_PORT_L8_IMG_WPE_WDMA0              MTK_M4U_ID(8, 2)
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_T1_A              MTK_M4U_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_T1_A             MTK_M4U_ID(9, 1)
+#define M4U_PORT_L9_IMG_IMGCI_T1_A             MTK_M4U_ID(9, 2)
+#define M4U_PORT_L9_IMG_SMTI_T1_A              MTK_M4U_ID(9, 3)
+#define M4U_PORT_L9_IMG_TNCSTI_T1_A            MTK_M4U_ID(9, 4)
+#define M4U_PORT_L9_IMG_TNCSTI_T4_A            MTK_M4U_ID(9, 5)
+#define M4U_PORT_L9_IMG_YUVO_T1_A              MTK_M4U_ID(9, 6)
+#define M4U_PORT_L9_IMG_TIMGO_T1_A             MTK_M4U_ID(9, 7)
+#define M4U_PORT_L9_IMG_YUVO_T2_A              MTK_M4U_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMGI_T1_B              MTK_M4U_ID(9, 9)
+#define M4U_PORT_L9_IMG_IMGBI_T1_B             MTK_M4U_ID(9, 10)
+#define M4U_PORT_L9_IMG_IMGCI_T1_B             MTK_M4U_ID(9, 11)
+#define M4U_PORT_L9_IMG_YUVO_T5_A              MTK_M4U_ID(9, 12)
+#define M4U_PORT_L9_IMG_SMTI_T1_B              MTK_M4U_ID(9, 13)
+#define M4U_PORT_L9_IMG_TNCSO_T1_A             MTK_M4U_ID(9, 14)
+#define M4U_PORT_L9_IMG_SMTO_T1_A              MTK_M4U_ID(9, 15)
+#define M4U_PORT_L9_IMG_TNCSTO_T1_A            MTK_M4U_ID(9, 16)
+#define M4U_PORT_L9_IMG_YUVO_T2_B              MTK_M4U_ID(9, 17)
+#define M4U_PORT_L9_IMG_YUVO_T5_B              MTK_M4U_ID(9, 18)
+#define M4U_PORT_L9_IMG_SMTO_T1_B              MTK_M4U_ID(9, 19)
+
+/* larb10 */
+#define M4U_PORT_L10_IMG_IMGI_D1_A             MTK_M4U_ID(10, 0)
+#define M4U_PORT_L10_IMG_IMGCI_D1_A            MTK_M4U_ID(10, 1)
+#define M4U_PORT_L10_IMG_DEPI_D1_A             MTK_M4U_ID(10, 2)
+#define M4U_PORT_L10_IMG_DMGI_D1_A             MTK_M4U_ID(10, 3)
+#define M4U_PORT_L10_IMG_VIPI_D1_A             MTK_M4U_ID(10, 4)
+#define M4U_PORT_L10_IMG_TNRWI_D1_A            MTK_M4U_ID(10, 5)
+#define M4U_PORT_L10_IMG_RECI_D1_A             MTK_M4U_ID(10, 6)
+#define M4U_PORT_L10_IMG_SMTI_D1_A             MTK_M4U_ID(10, 7)
+#define M4U_PORT_L10_IMG_SMTI_D6_A             MTK_M4U_ID(10, 8)
+#define M4U_PORT_L10_IMG_PIMGI_P1_A            MTK_M4U_ID(10, 9)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_A           MTK_M4U_ID(10, 10)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_A           MTK_M4U_ID(10, 11)
+#define M4U_PORT_L10_IMG_PIMGI_P1_B            MTK_M4U_ID(10, 12)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_B           MTK_M4U_ID(10, 13)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_B           MTK_M4U_ID(10, 14)
+#define M4U_PORT_L10_IMG_IMG3O_D1_A            MTK_M4U_ID(10, 15)
+#define M4U_PORT_L10_IMG_IMG4O_D1_A            MTK_M4U_ID(10, 16)
+#define M4U_PORT_L10_IMG_IMG3CO_D1_A           MTK_M4U_ID(10, 17)
+#define M4U_PORT_L10_IMG_FEO_D1_A              MTK_M4U_ID(10, 18)
+#define M4U_PORT_L10_IMG_IMG2O_D1_A            MTK_M4U_ID(10, 19)
+#define M4U_PORT_L10_IMG_TNRWO_D1_A            MTK_M4U_ID(10, 20)
+#define M4U_PORT_L10_IMG_SMTO_D1_A             MTK_M4U_ID(10, 21)
+#define M4U_PORT_L10_IMG_WROT_P1_A             MTK_M4U_ID(10, 22)
+#define M4U_PORT_L10_IMG_WROT_P1_B             MTK_M4U_ID(10, 23)
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA0_A       MTK_M4U_ID(11, 0)
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA1_A       MTK_M4U_ID(11, 1)
+#define M4U_PORT_L11_IMG_WPE_EIS_WDMA0_A       MTK_M4U_ID(11, 2)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA0_A       MTK_M4U_ID(11, 3)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA1_A       MTK_M4U_ID(11, 4)
+#define M4U_PORT_L11_IMG_WPE_TNR_WDMA0_A       MTK_M4U_ID(11, 5)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ0_A         MTK_M4U_ID(11, 6)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ1_A         MTK_M4U_ID(11, 7)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ0_A         MTK_M4U_ID(11, 8)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ1_A         MTK_M4U_ID(11, 9)
+
+/* larb12 */
+#define M4U_PORT_L12_IMG_FDVT_RDA              MTK_M4U_ID(12, 0)
+#define M4U_PORT_L12_IMG_FDVT_RDB              MTK_M4U_ID(12, 1)
+#define M4U_PORT_L12_IMG_FDVT_WRA              MTK_M4U_ID(12, 2)
+#define M4U_PORT_L12_IMG_FDVT_WRB              MTK_M4U_ID(12, 3)
+#define M4U_PORT_L12_IMG_ME_RDMA               MTK_M4U_ID(12, 4)
+#define M4U_PORT_L12_IMG_ME_WDMA               MTK_M4U_ID(12, 5)
+#define M4U_PORT_L12_IMG_DVS_RDMA              MTK_M4U_ID(12, 6)
+#define M4U_PORT_L12_IMG_DVS_WDMA              MTK_M4U_ID(12, 7)
+#define M4U_PORT_L12_IMG_DVP_RDMA              MTK_M4U_ID(12, 8)
+#define M4U_PORT_L12_IMG_DVP_WDMA              MTK_M4U_ID(12, 9)
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E1          MTK_M4U_ID(13, 0)
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E2          MTK_M4U_ID(13, 1)
+#define M4U_PORT_L13_CAM_GCAMSV_A_IMGO_0       MTK_M4U_ID(13, 2)
+#define M4U_PORT_L13_CAM_SCAMSV_A_IMGO_0       MTK_M4U_ID(13, 3)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_0       MTK_M4U_ID(13, 4)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_1       MTK_M4U_ID(13, 5)
+#define M4U_PORT_L13_CAM_GCAMSV_A_UFEO_0       MTK_M4U_ID(13, 6)
+#define M4U_PORT_L13_CAM_GCAMSV_B_UFEO_0       MTK_M4U_ID(13, 7)
+#define M4U_PORT_L13_CAM_PDAI_0                        MTK_M4U_ID(13, 8)
+#define M4U_PORT_L13_CAM_FAKE                  MTK_M4U_ID(13, 9)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_GCAMSV_A_IMGO_1       MTK_M4U_ID(14, 0)
+#define M4U_PORT_L14_CAM_SCAMSV_A_IMGO_1       MTK_M4U_ID(14, 1)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_0       MTK_M4U_ID(14, 2)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_1       MTK_M4U_ID(14, 3)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_0       MTK_M4U_ID(14, 4)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_1       MTK_M4U_ID(14, 5)
+#define M4U_PORT_L14_CAM_IPUI                  MTK_M4U_ID(14, 6)
+#define M4U_PORT_L14_CAM_IPU2I                 MTK_M4U_ID(14, 7)
+#define M4U_PORT_L14_CAM_IPUO                  MTK_M4U_ID(14, 8)
+#define M4U_PORT_L14_CAM_IPU2O                 MTK_M4U_ID(14, 9)
+#define M4U_PORT_L14_CAM_IPU3O                 MTK_M4U_ID(14, 10)
+#define M4U_PORT_L14_CAM_GCAMSV_A_UFEO_1       MTK_M4U_ID(14, 11)
+#define M4U_PORT_L14_CAM_GCAMSV_B_UFEO_1       MTK_M4U_ID(14, 12)
+#define M4U_PORT_L14_CAM_PDAI_1                        MTK_M4U_ID(14, 13)
+#define M4U_PORT_L14_CAM_PDAO                  MTK_M4U_ID(14, 14)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1               MTK_M4U_ID(16, 0)
+#define M4U_PORT_L16_CAM_CQI_R1                        MTK_M4U_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R2                        MTK_M4U_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1               MTK_M4U_ID(16, 3)
+#define M4U_PORT_L16_CAM_LSCI_R1               MTK_M4U_ID(16, 4)
+#define M4U_PORT_L16_CAM_RAWI_R2               MTK_M4U_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R3               MTK_M4U_ID(16, 6)
+#define M4U_PORT_L16_CAM_UFDI_R2               MTK_M4U_ID(16, 7)
+#define M4U_PORT_L16_CAM_UFDI_R3               MTK_M4U_ID(16, 8)
+#define M4U_PORT_L16_CAM_RAWI_R4               MTK_M4U_ID(16, 9)
+#define M4U_PORT_L16_CAM_RAWI_R5               MTK_M4U_ID(16, 10)
+#define M4U_PORT_L16_CAM_AAI_R1                        MTK_M4U_ID(16, 11)
+#define M4U_PORT_L16_CAM_FHO_R1                        MTK_M4U_ID(16, 12)
+#define M4U_PORT_L16_CAM_AAO_R1                        MTK_M4U_ID(16, 13)
+#define M4U_PORT_L16_CAM_TSFSO_R1              MTK_M4U_ID(16, 14)
+#define M4U_PORT_L16_CAM_FLKO_R1               MTK_M4U_ID(16, 15)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_YUVO_R1               MTK_M4U_ID(17, 0)
+#define M4U_PORT_L17_CAM_YUVO_R3               MTK_M4U_ID(17, 1)
+#define M4U_PORT_L17_CAM_YUVCO_R1              MTK_M4U_ID(17, 2)
+#define M4U_PORT_L17_CAM_YUVO_R2               MTK_M4U_ID(17, 3)
+#define M4U_PORT_L17_CAM_RZH1N2TO_R1           MTK_M4U_ID(17, 4)
+#define M4U_PORT_L17_CAM_DRZS4NO_R1            MTK_M4U_ID(17, 5)
+#define M4U_PORT_L17_CAM_TNCSO_R1              MTK_M4U_ID(17, 6)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_CCUI                  MTK_M4U_ID(18, 0)
+#define M4U_PORT_L18_CAM_CCUO                  MTK_M4U_ID(18, 1)
+#define M4U_PORT_L18_CAM_CCUI2                 MTK_M4U_ID(18, 2)
+#define M4U_PORT_L18_CAM_CCUO2                 MTK_M4U_ID(18, 3)
+
+/* larb19 */
+#define M4U_PORT_L19_VENC_RCPU                 MTK_M4U_ID(19, 0)
+#define M4U_PORT_L19_VENC_REC                  MTK_M4U_ID(19, 1)
+#define M4U_PORT_L19_VENC_BSDMA                        MTK_M4U_ID(19, 2)
+#define M4U_PORT_L19_VENC_SV_COMV              MTK_M4U_ID(19, 3)
+#define M4U_PORT_L19_VENC_RD_COMV              MTK_M4U_ID(19, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA             MTK_M4U_ID(19, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE                MTK_M4U_ID(19, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA             MTK_M4U_ID(19, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA             MTK_M4U_ID(19, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE            MTK_M4U_ID(19, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA           MTK_M4U_ID(19, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA         MTK_M4U_ID(19, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA              MTK_M4U_ID(19, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA0              MTK_M4U_ID(19, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA0             MTK_M4U_ID(19, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA             MTK_M4U_ID(19, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE                MTK_M4U_ID(19, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA         MTK_M4U_ID(19, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA1              MTK_M4U_ID(19, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA1             MTK_M4U_ID(19, 19)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET1       MTK_M4U_ID(19, 20)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET0       MTK_M4U_ID(19, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA             MTK_M4U_ID(19, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA           MTK_M4U_ID(19, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA             MTK_M4U_ID(19, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA           MTK_M4U_ID(19, 25)
+#define M4U_PORT_L19_VENC_SUB_R_CHROMA         MTK_M4U_ID(19, 26)
+
+/* larb20 */
+#define M4U_PORT_L20_VENC_RCPU                 MTK_M4U_ID(20, 0)
+#define M4U_PORT_L20_VENC_REC                  MTK_M4U_ID(20, 1)
+#define M4U_PORT_L20_VENC_BSDMA                        MTK_M4U_ID(20, 2)
+#define M4U_PORT_L20_VENC_SV_COMV              MTK_M4U_ID(20, 3)
+#define M4U_PORT_L20_VENC_RD_COMV              MTK_M4U_ID(20, 4)
+#define M4U_PORT_L20_VENC_NBM_RDMA             MTK_M4U_ID(20, 5)
+#define M4U_PORT_L20_VENC_NBM_RDMA_LITE                MTK_M4U_ID(20, 6)
+#define M4U_PORT_L20_JPGENC_Y_RDMA             MTK_M4U_ID(20, 7)
+#define M4U_PORT_L20_JPGENC_C_RDMA             MTK_M4U_ID(20, 8)
+#define M4U_PORT_L20_JPGENC_Q_TABLE            MTK_M4U_ID(20, 9)
+#define M4U_PORT_L20_VENC_SUB_W_LUMA           MTK_M4U_ID(20, 10)
+#define M4U_PORT_L20_VENC_FCS_NBM_RDMA         MTK_M4U_ID(20, 11)
+#define M4U_PORT_L20_JPGENC_BSDMA              MTK_M4U_ID(20, 12)
+#define M4U_PORT_L20_JPGDEC_WDMA0              MTK_M4U_ID(20, 13)
+#define M4U_PORT_L20_JPGDEC_BSDMA0             MTK_M4U_ID(20, 14)
+#define M4U_PORT_L20_VENC_NBM_WDMA             MTK_M4U_ID(20, 15)
+#define M4U_PORT_L20_VENC_NBM_WDMA_LITE                MTK_M4U_ID(20, 16)
+#define M4U_PORT_L20_VENC_FCS_NBM_WDMA         MTK_M4U_ID(20, 17)
+#define M4U_PORT_L20_JPGDEC_WDMA1              MTK_M4U_ID(20, 18)
+#define M4U_PORT_L20_JPGDEC_BSDMA1             MTK_M4U_ID(20, 19)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET1       MTK_M4U_ID(20, 20)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET0       MTK_M4U_ID(20, 21)
+#define M4U_PORT_L20_VENC_CUR_LUMA             MTK_M4U_ID(20, 22)
+#define M4U_PORT_L20_VENC_CUR_CHROMA           MTK_M4U_ID(20, 23)
+#define M4U_PORT_L20_VENC_REF_LUMA             MTK_M4U_ID(20, 24)
+#define M4U_PORT_L20_VENC_REF_CHROMA           MTK_M4U_ID(20, 25)
+#define M4U_PORT_L20_VENC_SUB_R_CHROMA         MTK_M4U_ID(20, 26)
+
+/* larb21 */
+#define M4U_PORT_L21_VDEC_MC_EXT               MTK_M4U_ID(21, 0)
+#define M4U_PORT_L21_VDEC_UFO_EXT              MTK_M4U_ID(21, 1)
+#define M4U_PORT_L21_VDEC_PP_EXT               MTK_M4U_ID(21, 2)
+#define M4U_PORT_L21_VDEC_PRED_RD_EXT          MTK_M4U_ID(21, 3)
+#define M4U_PORT_L21_VDEC_PRED_WR_EXT          MTK_M4U_ID(21, 4)
+#define M4U_PORT_L21_VDEC_PPWRAP_EXT           MTK_M4U_ID(21, 5)
+#define M4U_PORT_L21_VDEC_TILE_EXT             MTK_M4U_ID(21, 6)
+#define M4U_PORT_L21_VDEC_VLD_EXT              MTK_M4U_ID(21, 7)
+#define M4U_PORT_L21_VDEC_VLD2_EXT             MTK_M4U_ID(21, 8)
+#define M4U_PORT_L21_VDEC_AVC_MV_EXT           MTK_M4U_ID(21, 9)
+
+/* larb22 */
+#define M4U_PORT_L22_VDEC_MC_EXT               MTK_M4U_ID(22, 0)
+#define M4U_PORT_L22_VDEC_UFO_EXT              MTK_M4U_ID(22, 1)
+#define M4U_PORT_L22_VDEC_PP_EXT               MTK_M4U_ID(22, 2)
+#define M4U_PORT_L22_VDEC_PRED_RD_EXT          MTK_M4U_ID(22, 3)
+#define M4U_PORT_L22_VDEC_PRED_WR_EXT          MTK_M4U_ID(22, 4)
+#define M4U_PORT_L22_VDEC_PPWRAP_EXT           MTK_M4U_ID(22, 5)
+#define M4U_PORT_L22_VDEC_TILE_EXT             MTK_M4U_ID(22, 6)
+#define M4U_PORT_L22_VDEC_VLD_EXT              MTK_M4U_ID(22, 7)
+#define M4U_PORT_L22_VDEC_VLD2_EXT             MTK_M4U_ID(22, 8)
+#define M4U_PORT_L22_VDEC_AVC_MV_EXT           MTK_M4U_ID(22, 9)
+
+/* larb23 */
+#define M4U_PORT_L23_VDEC_UFO_ENC_EXT          MTK_M4U_ID(23, 0)
+#define M4U_PORT_L23_VDEC_RDMA_EXT             MTK_M4U_ID(23, 1)
+
+/* larb24 */
+#define M4U_PORT_L24_VDEC_LAT0_VLD_EXT         MTK_M4U_ID(24, 0)
+#define M4U_PORT_L24_VDEC_LAT0_VLD2_EXT                MTK_M4U_ID(24, 1)
+#define M4U_PORT_L24_VDEC_LAT0_AVC_MC_EXT      MTK_M4U_ID(24, 2)
+#define M4U_PORT_L24_VDEC_LAT0_PRED_RD_EXT     MTK_M4U_ID(24, 3)
+#define M4U_PORT_L24_VDEC_LAT0_TILE_EXT                MTK_M4U_ID(24, 4)
+#define M4U_PORT_L24_VDEC_LAT0_WDMA_EXT                MTK_M4U_ID(24, 5)
+#define M4U_PORT_L24_VDEC_LAT1_VLD_EXT         MTK_M4U_ID(24, 6)
+#define M4U_PORT_L24_VDEC_LAT1_VLD2_EXT                MTK_M4U_ID(24, 7)
+#define M4U_PORT_L24_VDEC_LAT1_AVC_MC_EXT      MTK_M4U_ID(24, 8)
+#define M4U_PORT_L24_VDEC_LAT1_PRED_RD_EXT     MTK_M4U_ID(24, 9)
+#define M4U_PORT_L24_VDEC_LAT1_TILE_EXT                MTK_M4U_ID(24, 10)
+#define M4U_PORT_L24_VDEC_LAT1_WDMA_EXT                MTK_M4U_ID(24, 11)
+
+/* larb25 */
+#define M4U_PORT_L25_CAM_MRAW0_LSCI_M1         MTK_M4U_ID(25, 0)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M1          MTK_M4U_ID(25, 1)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M2          MTK_M4U_ID(25, 2)
+#define M4U_PORT_L25_CAM_MRAW0_IMGO_M1         MTK_M4U_ID(25, 3)
+#define M4U_PORT_L25_CAM_MRAW0_IMGBO_M1                MTK_M4U_ID(25, 4)
+#define M4U_PORT_L25_CAM_MRAW2_LSCI_M1         MTK_M4U_ID(25, 5)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M1          MTK_M4U_ID(25, 6)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M2          MTK_M4U_ID(25, 7)
+#define M4U_PORT_L25_CAM_MRAW2_IMGO_M1         MTK_M4U_ID(25, 8)
+#define M4U_PORT_L25_CAM_MRAW2_IMGBO_M1                MTK_M4U_ID(25, 9)
+#define M4U_PORT_L25_CAM_MRAW0_AFO_M1          MTK_M4U_ID(25, 10)
+#define M4U_PORT_L25_CAM_MRAW2_AFO_M1          MTK_M4U_ID(25, 11)
+
+/* larb26 */
+#define M4U_PORT_L26_CAM_MRAW1_LSCI_M1         MTK_M4U_ID(26, 0)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M1          MTK_M4U_ID(26, 1)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M2          MTK_M4U_ID(26, 2)
+#define M4U_PORT_L26_CAM_MRAW1_IMGO_M1         MTK_M4U_ID(26, 3)
+#define M4U_PORT_L26_CAM_MRAW1_IMGBO_M1                MTK_M4U_ID(26, 4)
+#define M4U_PORT_L26_CAM_MRAW3_LSCI_M1         MTK_M4U_ID(26, 5)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M1          MTK_M4U_ID(26, 6)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M2          MTK_M4U_ID(26, 7)
+#define M4U_PORT_L26_CAM_MRAW3_IMGO_M1         MTK_M4U_ID(26, 8)
+#define M4U_PORT_L26_CAM_MRAW3_IMGBO_M1                MTK_M4U_ID(26, 9)
+#define M4U_PORT_L26_CAM_MRAW1_AFO_M1          MTK_M4U_ID(26, 10)
+#define M4U_PORT_L26_CAM_MRAW3_AFO_M1          MTK_M4U_ID(26, 11)
+
+/* larb27 */
+#define M4U_PORT_L27_CAM_IMGO_R1               MTK_M4U_ID(27, 0)
+#define M4U_PORT_L27_CAM_CQI_R1                        MTK_M4U_ID(27, 1)
+#define M4U_PORT_L27_CAM_CQI_R2                        MTK_M4U_ID(27, 2)
+#define M4U_PORT_L27_CAM_BPCI_R1               MTK_M4U_ID(27, 3)
+#define M4U_PORT_L27_CAM_LSCI_R1               MTK_M4U_ID(27, 4)
+#define M4U_PORT_L27_CAM_RAWI_R2               MTK_M4U_ID(27, 5)
+#define M4U_PORT_L27_CAM_RAWI_R3               MTK_M4U_ID(27, 6)
+#define M4U_PORT_L27_CAM_UFDI_R2               MTK_M4U_ID(27, 7)
+#define M4U_PORT_L27_CAM_UFDI_R3               MTK_M4U_ID(27, 8)
+#define M4U_PORT_L27_CAM_RAWI_R4               MTK_M4U_ID(27, 9)
+#define M4U_PORT_L27_CAM_RAWI_R5               MTK_M4U_ID(27, 10)
+#define M4U_PORT_L27_CAM_AAI_R1                        MTK_M4U_ID(27, 11)
+#define M4U_PORT_L27_CAM_FHO_R1                        MTK_M4U_ID(27, 12)
+#define M4U_PORT_L27_CAM_AAO_R1                        MTK_M4U_ID(27, 13)
+#define M4U_PORT_L27_CAM_TSFSO_R1              MTK_M4U_ID(27, 14)
+#define M4U_PORT_L27_CAM_FLKO_R1               MTK_M4U_ID(27, 15)
+
+/* larb28 */
+#define M4U_PORT_L28_CAM_YUVO_R1               MTK_M4U_ID(28, 0)
+#define M4U_PORT_L28_CAM_YUVO_R3               MTK_M4U_ID(28, 1)
+#define M4U_PORT_L28_CAM_YUVCO_R1              MTK_M4U_ID(28, 2)
+#define M4U_PORT_L28_CAM_YUVO_R2               MTK_M4U_ID(28, 3)
+#define M4U_PORT_L28_CAM_RZH1N2TO_R1           MTK_M4U_ID(28, 4)
+#define M4U_PORT_L28_CAM_DRZS4NO_R1            MTK_M4U_ID(28, 5)
+#define M4U_PORT_L28_CAM_TNCSO_R1              MTK_M4U_ID(28, 6)
+
+/* Infra iommu ports */
+/* PCIe1: read: BIT16; write BIT17. */
+#define IOMMU_PORT_INFRA_PCIE1                 MTK_IFAIOMMU_PERI_ID(16)
+/* PCIe0: read: BIT18; write BIT19. */
+#define IOMMU_PORT_INFRA_PCIE0                 MTK_IFAIOMMU_PERI_ID(18)
+#define IOMMU_PORT_INFRA_SSUSB_P3_R            MTK_IFAIOMMU_PERI_ID(20)
+#define IOMMU_PORT_INFRA_SSUSB_P3_W            MTK_IFAIOMMU_PERI_ID(21)
+#define IOMMU_PORT_INFRA_SSUSB_P2_R            MTK_IFAIOMMU_PERI_ID(22)
+#define IOMMU_PORT_INFRA_SSUSB_P2_W            MTK_IFAIOMMU_PERI_ID(23)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_R          MTK_IFAIOMMU_PERI_ID(24)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_W          MTK_IFAIOMMU_PERI_ID(25)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_R          MTK_IFAIOMMU_PERI_ID(26)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_W          MTK_IFAIOMMU_PERI_ID(27)
+#define IOMMU_PORT_INFRA_SSUSB2_R              MTK_IFAIOMMU_PERI_ID(28)
+#define IOMMU_PORT_INFRA_SSUSB2_W              MTK_IFAIOMMU_PERI_ID(29)
+#define IOMMU_PORT_INFRA_SSUSB_R               MTK_IFAIOMMU_PERI_ID(30)
+#define IOMMU_PORT_INFRA_SSUSB_W               MTK_IFAIOMMU_PERI_ID(31)
+
+#endif
index 7d64103..2f68a05 100644 (file)
@@ -12,4 +12,6 @@
 #define MTK_M4U_TO_LARB(id)            (((id) >> 5) & 0x1f)
 #define MTK_M4U_TO_PORT(id)            ((id) & 0x1f)
 
+#define MTK_IFAIOMMU_PERI_ID(port)     MTK_M4U_ID(0, port)
+
 #endif
diff --git a/include/dt-bindings/mfd/cros_ec.h b/include/dt-bindings/mfd/cros_ec.h
new file mode 100644 (file)
index 0000000..3b29cd0
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * DTS binding definitions used for the Chromium OS Embedded Controller.
+ *
+ * Copyright (c) 2022 The Chromium OS Authors. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_MFD_CROS_EC_H
+#define _DT_BINDINGS_MFD_CROS_EC_H
+
+/* Typed channel for keyboard backlight. */
+#define CROS_EC_PWM_DT_KB_LIGHT                0
+/* Typed channel for display backlight. */
+#define CROS_EC_PWM_DT_DISPLAY_LIGHT   1
+/* Number of typed channels. */
+#define CROS_EC_PWM_DT_COUNT           2
+
+#endif
diff --git a/include/dt-bindings/reset/mt7986-resets.h b/include/dt-bindings/reset/mt7986-resets.h
new file mode 100644 (file)
index 0000000..af3d16c
--- /dev/null
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7986
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7986
+
+/* INFRACFG resets */
+#define MT7986_INFRACFG_PEXTP_MAC_SW_RST       6
+#define MT7986_INFRACFG_SSUSB_SW_RST           7
+#define MT7986_INFRACFG_EIP97_SW_RST           8
+#define MT7986_INFRACFG_AUDIO_SW_RST           13
+#define MT7986_INFRACFG_CQ_DMA_SW_RST          14
+
+#define MT7986_INFRACFG_TRNG_SW_RST            17
+#define MT7986_INFRACFG_AP_DMA_SW_RST          32
+#define MT7986_INFRACFG_I2C_SW_RST             33
+#define MT7986_INFRACFG_NFI_SW_RST             34
+#define MT7986_INFRACFG_SPI0_SW_RST            35
+#define MT7986_INFRACFG_SPI1_SW_RST            36
+#define MT7986_INFRACFG_UART0_SW_RST           37
+#define MT7986_INFRACFG_UART1_SW_RST           38
+#define MT7986_INFRACFG_UART2_SW_RST           39
+#define MT7986_INFRACFG_AUXADC_SW_RST          43
+
+#define MT7986_INFRACFG_APXGPT_SW_RST          66
+#define MT7986_INFRACFG_PWM_SW_RST             68
+
+#define MT7986_INFRACFG_SW_RST_NUM             69
+
+/* TOPRGU resets */
+#define MT7986_TOPRGU_APMIXEDSYS_SW_RST                0
+#define MT7986_TOPRGU_SGMII0_SW_RST            1
+#define MT7986_TOPRGU_SGMII1_SW_RST            2
+#define MT7986_TOPRGU_INFRA_SW_RST             3
+#define MT7986_TOPRGU_U2PHY_SW_RST             5
+#define MT7986_TOPRGU_PCIE_SW_RST              6
+#define MT7986_TOPRGU_SSUSB_SW_RST             7
+#define MT7986_TOPRGU_ETHDMA_SW_RST            20
+#define MT7986_TOPRGU_CONSYS_SW_RST            23
+
+#define MT7986_TOPRGU_SW_RST_NUM               24
+
+/* ETHSYS Subsystem resets */
+#define MT7986_ETHSYS_FE_SW_RST                        6
+#define MT7986_ETHSYS_PMTR_SW_RST              8
+#define MT7986_ETHSYS_GMAC_SW_RST              23
+#define MT7986_ETHSYS_PPE0_SW_RST              30
+#define MT7986_ETHSYS_PPE1_SW_RST              31
+
+#define MT7986_ETHSYS_SW_RST_NUM               32
+
+#endif  /* _DT_BINDINGS_RESET_CONTROLLER_MT7986 */
diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h
new file mode 100644 (file)
index 0000000..5f85037
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Runyang Chen <runyang.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8186
+
+#define MT8186_TOPRGU_INFRA_SW_RST                             0
+#define MT8186_TOPRGU_MM_SW_RST                                        1
+#define MT8186_TOPRGU_MFG_SW_RST                               2
+#define MT8186_TOPRGU_VENC_SW_RST                              3
+#define MT8186_TOPRGU_VDEC_SW_RST                              4
+#define MT8186_TOPRGU_IMG_SW_RST                               5
+#define MT8186_TOPRGU_DDR_SW_RST                               6
+#define MT8186_TOPRGU_INFRA_AO_SW_RST                          8
+#define MT8186_TOPRGU_CONNSYS_SW_RST                           9
+#define MT8186_TOPRGU_APMIXED_SW_RST                           10
+#define MT8186_TOPRGU_PWRAP_SW_RST                             11
+#define MT8186_TOPRGU_CONN_MCU_SW_RST                          12
+#define MT8186_TOPRGU_IPNNA_SW_RST                             13
+#define MT8186_TOPRGU_WPE_SW_RST                               14
+#define MT8186_TOPRGU_ADSP_SW_RST                              15
+#define MT8186_TOPRGU_AUDIO_SW_RST                             17
+#define MT8186_TOPRGU_CAM_MAIN_SW_RST                          18
+#define MT8186_TOPRGU_CAM_RAWA_SW_RST                          19
+#define MT8186_TOPRGU_CAM_RAWB_SW_RST                          20
+#define MT8186_TOPRGU_IPE_SW_RST                               21
+#define MT8186_TOPRGU_IMG2_SW_RST                              22
+#define MT8186_TOPRGU_SW_RST_NUM                               23
+
+/* MMSYS resets */
+#define MT8186_MMSYS_SW0_RST_B_DISP_DSI0                       19
+
+#endif  /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */
index f3fdfd7..4f82a5b 100644 (file)
@@ -520,9 +520,6 @@ int acpi_check_resource_conflict(const struct resource *res);
 int acpi_check_region(resource_size_t start, resource_size_t n,
                      const char *name);
 
-acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
-                               u32 level);
-
 int acpi_resources_are_enforced(void);
 
 #ifdef CONFIG_HIBERNATION
index 6562f54..e94cdf2 100644 (file)
@@ -70,7 +70,11 @@ struct amba_device {
        unsigned int            cid;
        struct amba_cs_uci_id   uci;
        unsigned int            irq[AMBA_NR_IRQS];
-       char                    *driver_override;
+       /*
+        * Driver name to force a match.  Do not set directly, because core
+        * frees it.  Use driver_set_override() to set or clear it.
+        */
+       const char              *driver_override;
 };
 
 struct amba_driver {
@@ -79,6 +83,14 @@ struct amba_driver {
        void                    (*remove)(struct amba_device *);
        void                    (*shutdown)(struct amba_device *);
        const struct amba_id    *id_table;
+       /*
+        * For most device drivers, no need to care about this flag as long as
+        * all DMAs are handled through the kernel DMA API. For some special
+        * ones, for example VFIO drivers, they know how to manage the DMA
+        * themselves and set this flag so that the IOMMU layer will allow them
+        * to setup and manage their own I/O address space.
+        */
+       bool driver_managed_dma;
 };
 
 /*
index 7dba084..2e6cd56 100644 (file)
@@ -72,6 +72,8 @@ struct device;
  *  bitmap_allocate_region(bitmap, pos, order)  Allocate specified bit region
  *  bitmap_from_arr32(dst, buf, nbits)          Copy nbits from u32[] buf to dst
  *  bitmap_to_arr32(buf, src, nbits)            Copy nbits from buf to u32[] dst
+ *  bitmap_to_arr64(buf, src, nbits)            Copy nbits from buf to u64[] dst
+ *  bitmap_to_arr64(buf, src, nbits)            Copy nbits from buf to u64[] dst
  *  bitmap_get_value8(map, start)               Get 8bit value from map at start
  *  bitmap_set_value8(map, value, start)        Set 8bit value to map at start
  *
@@ -132,8 +134,8 @@ unsigned long *devm_bitmap_zalloc(struct device *dev,
  * lib/bitmap.c provides these functions:
  */
 
-int __bitmap_equal(const unsigned long *bitmap1,
-                  const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_equal(const unsigned long *bitmap1,
+                   const unsigned long *bitmap2, unsigned int nbits);
 bool __pure __bitmap_or_equal(const unsigned long *src1,
                              const unsigned long *src2,
                              const unsigned long *src3,
@@ -157,10 +159,10 @@ int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
 void __bitmap_replace(unsigned long *dst,
                      const unsigned long *old, const unsigned long *new,
                      const unsigned long *mask, unsigned int nbits);
-int __bitmap_intersects(const unsigned long *bitmap1,
-                       const unsigned long *bitmap2, unsigned int nbits);
-int __bitmap_subset(const unsigned long *bitmap1,
-                   const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+                        const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_subset(const unsigned long *bitmap1,
+                    const unsigned long *bitmap2, unsigned int nbits);
 int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
 void __bitmap_set(unsigned long *map, unsigned int start, int len);
 void __bitmap_clear(unsigned long *map, unsigned int start, int len);
@@ -264,8 +266,12 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
 }
 
 /*
- * On 32-bit systems bitmaps are represented as u32 arrays internally, and
- * therefore conversion is not needed when copying data from/to arrays of u32.
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
+ * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
+ * architectures are not using bitmap_copy_clear_tail().
  */
 #if BITS_PER_LONG == 64
 void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
@@ -281,6 +287,22 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
                        (const unsigned long *) (bitmap), (nbits))
 #endif
 
+/*
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u64.
+ */
+#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+#else
+#define bitmap_from_arr64(bitmap, buf, nbits)                  \
+       bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
+#define bitmap_to_arr64(buf, bitmap, nbits)                    \
+       bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
+#endif
+
 static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
                        const unsigned long *src2, unsigned int nbits)
 {
@@ -331,8 +353,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
 #endif
 #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
 
-static inline int bitmap_equal(const unsigned long *src1,
-                       const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+                               const unsigned long *src2, unsigned int nbits)
 {
        if (small_const_nbits(nbits))
                return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -362,8 +384,9 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
        return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
 }
 
-static inline int bitmap_intersects(const unsigned long *src1,
-                       const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_intersects(const unsigned long *src1,
+                                    const unsigned long *src2,
+                                    unsigned int nbits)
 {
        if (small_const_nbits(nbits))
                return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -371,8 +394,8 @@ static inline int bitmap_intersects(const unsigned long *src1,
                return __bitmap_intersects(src1, src2, nbits);
 }
 
-static inline int bitmap_subset(const unsigned long *src1,
-                       const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_subset(const unsigned long *src1,
+                                const unsigned long *src2, unsigned int nbits)
 {
        if (small_const_nbits(nbits))
                return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -514,10 +537,7 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
  */
 static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
 {
-       dst[0] = mask & ULONG_MAX;
-
-       if (sizeof(mask) > sizeof(unsigned long))
-               dst[1] = mask >> 32;
+       bitmap_from_arr64(dst, &mask, 64);
 }
 
 /**
index 9f07061..e2d9daf 100644 (file)
@@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *);
 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
                unsigned int, gfp_t);
 int blk_rq_append_bio(struct request *rq, struct bio *bio);
-void blk_execute_rq_nowait(struct request *rq, bool at_head,
-               rq_end_io_fn *end_io);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
 
 struct req_iterator {
index c007d58..a24d407 100644 (file)
@@ -105,6 +105,10 @@ typedef u16 blk_short_t;
 /* hack for device mapper, don't use elsewhere: */
 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
 
+/*
+ * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
+ * and the bio would block (cf bio_wouldblock_error())
+ */
 #define BLK_STS_AGAIN          ((__force blk_status_t)12)
 
 /*
index 1b24c1f..608d577 100644 (file)
@@ -147,6 +147,7 @@ struct gendisk {
 #define GD_DEAD                                2
 #define GD_NATIVE_CAPACITY             3
 #define GD_ADDED                       4
+#define GD_SUPPRESS_PART_SCAN          5
 
        struct mutex open_mutex;        /* open/close mutex */
        unsigned open_partitions;       /* number of open partitions */
diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h
new file mode 100644 (file)
index 0000000..736b8bb
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern int pxa25x_clocks_init(void __iomem *regs);
+extern int pxa27x_clocks_init(void __iomem *regs);
+extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg);
+
+#ifdef CONFIG_PXA3xx
+extern unsigned        pxa3xx_get_clk_frequency_khz(int);
+extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask);
+#else
+#define pxa3xx_get_clk_frequency_khz(x)                (0)
+#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0)
+#endif
index 01fddf7..5943578 100644 (file)
@@ -259,6 +259,37 @@ struct compat_rlimit {
        compat_ulong_t  rlim_max;
 };
 
+#ifdef __ARCH_NEED_COMPAT_FLOCK64_PACKED
+#define __ARCH_COMPAT_FLOCK64_PACK     __attribute__((packed))
+#else
+#define __ARCH_COMPAT_FLOCK64_PACK
+#endif
+
+struct compat_flock {
+       short                   l_type;
+       short                   l_whence;
+       compat_off_t            l_start;
+       compat_off_t            l_len;
+#ifdef __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+       __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+#endif
+       compat_pid_t            l_pid;
+#ifdef __ARCH_COMPAT_FLOCK_PAD
+       __ARCH_COMPAT_FLOCK_PAD
+#endif
+};
+
+struct compat_flock64 {
+       short           l_type;
+       short           l_whence;
+       compat_loff_t   l_start;
+       compat_loff_t   l_len;
+       compat_pid_t    l_pid;
+#ifdef __ARCH_COMPAT_FLOCK64_PAD
+       __ARCH_COMPAT_FLOCK64_PAD
+#endif
+} __ARCH_COMPAT_FLOCK64_PACK;
+
 struct compat_rusage {
        struct old_timeval32 ru_utime;
        struct old_timeval32 ru_stime;
@@ -896,6 +927,43 @@ asmlinkage long compat_sys_sigaction(int sig,
 /* obsolete: net/socket.c */
 asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
 
+#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
+asmlinkage long compat_sys_truncate64(const char __user *pathname, compat_arg_u64(len));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FTRUNCATE64
+asmlinkage long compat_sys_ftruncate64(unsigned int fd, compat_arg_u64(len));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FALLOCATE
+asmlinkage long compat_sys_fallocate(int fd, int mode, compat_arg_u64(offset),
+                                    compat_arg_u64(len));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_PREAD64
+asmlinkage long compat_sys_pread64(unsigned int fd, char __user *buf, size_t count,
+                                  compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_PWRITE64
+asmlinkage long compat_sys_pwrite64(unsigned int fd, const char __user *buf, size_t count,
+                                   compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+asmlinkage long compat_sys_sync_file_range(int fd, compat_arg_u64(pos),
+                                          compat_arg_u64(nbytes), unsigned int flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FADVISE64_64
+asmlinkage long compat_sys_fadvise64_64(int fd, compat_arg_u64(pos),
+                                       compat_arg_u64(len), int advice);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_READAHEAD
+asmlinkage long compat_sys_readahead(int fd, compat_arg_u64(offset), size_t count);
+#endif
+
 #endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
 
 /**
index 65a60d3..ae1e63e 100644 (file)
@@ -46,10 +46,10 @@ static __always_inline bool context_tracking_in_user(void)
        return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
 }
 #else
-static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_enabled(void) { return false; }
-static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
-static inline bool context_tracking_enabled_this_cpu(void) { return false; }
+static __always_inline bool context_tracking_in_user(void) { return false; }
+static __always_inline bool context_tracking_enabled(void) { return false; }
+static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
 #endif /* CONFIG_CONTEXT_TRACKING */
 
 #endif
index b66c5f3..19f0dbf 100644 (file)
@@ -130,6 +130,7 @@ enum cpuhp_state {
        CPUHP_ZCOMP_PREPARE,
        CPUHP_TIMERS_PREPARE,
        CPUHP_MIPS_SOC_PREPARE,
+       CPUHP_LOONGARCH_SOC_PREPARE,
        CPUHP_BP_PREPARE_DYN,
        CPUHP_BP_PREPARE_DYN_END                = CPUHP_BP_PREPARE_DYN + 20,
        CPUHP_BRINGUP_CPU,
index 6b16a69..58aea2d 100644 (file)
@@ -45,9 +45,13 @@ struct task_delay_info {
        u64 compact_start;
        u64 compact_delay;      /* wait for memory compact */
 
+       u64 wpcopy_start;
+       u64 wpcopy_delay;       /* wait for write-protect copy */
+
        u32 freepages_count;    /* total count of memory reclaim */
        u32 thrashing_count;    /* total count of thrash waits */
        u32 compact_count;      /* total count of memory compact */
+       u32 wpcopy_count;       /* total count of write-protect copy */
 };
 #endif
 
@@ -75,6 +79,8 @@ extern void __delayacct_swapin_start(void);
 extern void __delayacct_swapin_end(void);
 extern void __delayacct_compact_start(void);
 extern void __delayacct_compact_end(void);
+extern void __delayacct_wpcopy_start(void);
+extern void __delayacct_wpcopy_end(void);
 
 static inline void delayacct_tsk_init(struct task_struct *tsk)
 {
@@ -191,6 +197,24 @@ static inline void delayacct_compact_end(void)
                __delayacct_compact_end();
 }
 
+static inline void delayacct_wpcopy_start(void)
+{
+       if (!static_branch_unlikely(&delayacct_key))
+               return;
+
+       if (current->delays)
+               __delayacct_wpcopy_start();
+}
+
+static inline void delayacct_wpcopy_end(void)
+{
+       if (!static_branch_unlikely(&delayacct_key))
+               return;
+
+       if (current->delays)
+               __delayacct_wpcopy_end();
+}
+
 #else
 static inline void delayacct_init(void)
 {}
@@ -225,6 +249,10 @@ static inline void delayacct_compact_start(void)
 {}
 static inline void delayacct_compact_end(void)
 {}
+static inline void delayacct_wpcopy_start(void)
+{}
+static inline void delayacct_wpcopy_end(void)
+{}
 
 #endif /* CONFIG_TASK_DELAY_ACCT */
 
index 073f1b0..dc94199 100644 (file)
@@ -387,6 +387,75 @@ struct dev_msi_info {
 };
 
 /**
+ * enum device_physical_location_panel - Describes which panel surface of the
+ * system's housing the device connection point resides on.
+ * @DEVICE_PANEL_TOP: Device connection point is on the top panel.
+ * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
+ * @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
+ * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
+ * @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
+ * @DEVICE_PANEL_BACK: Device connection point is on the back panel.
+ * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
+ */
+enum device_physical_location_panel {
+       DEVICE_PANEL_TOP,
+       DEVICE_PANEL_BOTTOM,
+       DEVICE_PANEL_LEFT,
+       DEVICE_PANEL_RIGHT,
+       DEVICE_PANEL_FRONT,
+       DEVICE_PANEL_BACK,
+       DEVICE_PANEL_UNKNOWN,
+};
+
+/**
+ * enum device_physical_location_vertical_position - Describes vertical
+ * position of the device connection point on the panel surface.
+ * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
+ * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
+ */
+enum device_physical_location_vertical_position {
+       DEVICE_VERT_POS_UPPER,
+       DEVICE_VERT_POS_CENTER,
+       DEVICE_VERT_POS_LOWER,
+};
+
+/**
+ * enum device_physical_location_horizontal_position - Describes horizontal
+ * position of the device connection point on the panel surface.
+ * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
+ * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
+ */
+enum device_physical_location_horizontal_position {
+       DEVICE_HORI_POS_LEFT,
+       DEVICE_HORI_POS_CENTER,
+       DEVICE_HORI_POS_RIGHT,
+};
+
+/**
+ * struct device_physical_location - Device data related to physical location
+ * of the device connection point.
+ * @panel: Panel surface of the system's housing that the device connection
+ *         point resides on.
+ * @vertical_position: Vertical position of the device connection point within
+ *                     the panel.
+ * @horizontal_position: Horizontal position of the device connection point
+ *                       within the panel.
+ * @dock: Set if the device connection point resides in a docking station or
+ *        port replicator.
+ * @lid: Set if this device connection point resides on the lid of laptop
+ *       system.
+ */
+struct device_physical_location {
+       enum device_physical_location_panel panel;
+       enum device_physical_location_vertical_position vertical_position;
+       enum device_physical_location_horizontal_position horizontal_position;
+       bool dock;
+       bool lid;
+};
+
+/**
  * struct device - The basic device structure
  * @parent:    The device's "parent" device, the device to which it is attached.
  *             In most cases, a parent device is some sort of bus or host
@@ -451,6 +520,8 @@ struct dev_msi_info {
  *             device (i.e. the bus driver that discovered the device).
  * @iommu_group: IOMMU group the device belongs to.
  * @iommu:     Per device generic IOMMU runtime data
+ * @physical_location: Describes physical location of the device connection
+ *             point in the system housing.
  * @removable:  Whether the device can be removed from the system. This
  *              should be set by the subsystem / bus driver that discovered
  *              the device.
@@ -562,6 +633,8 @@ struct device {
        struct iommu_group      *iommu_group;
        struct dev_iommu        *iommu;
 
+       struct device_physical_location *physical_location;
+
        enum device_removable   removable;
 
        bool                    offline_disabled:1;
index a039ab8..d8b29cc 100644 (file)
@@ -59,6 +59,8 @@ struct fwnode_handle;
  *             bus supports.
  * @dma_configure:     Called to setup DMA configuration on a device on
  *                     this bus.
+ * @dma_cleanup:       Called to cleanup DMA configuration on a device on
+ *                     this bus.
  * @pm:                Power management operations of this bus, callback the specific
  *             device driver's pm-ops.
  * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
@@ -103,6 +105,7 @@ struct bus_type {
        int (*num_vf)(struct device *dev);
 
        int (*dma_configure)(struct device *dev);
+       void (*dma_cleanup)(struct device *dev);
 
        const struct dev_pm_ops *pm;
 
index 15e7c5e..7004530 100644 (file)
@@ -151,6 +151,8 @@ extern int __must_check driver_create_file(struct device_driver *driver,
 extern void driver_remove_file(struct device_driver *driver,
                               const struct driver_attribute *attr);
 
+int driver_set_override(struct device *dev, const char **override,
+                       const char *s, size_t len);
 extern int __must_check driver_for_each_device(struct device_driver *drv,
                                               struct device *start,
                                               void *data,
index 565c5ff..3f31ced 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _LINUX_EXPORT_H
 #define _LINUX_EXPORT_H
 
+#include <linux/stringify.h>
+
 /*
  * Export symbols from the kernel to modules.  Forked from module.h
  * to reduce the amount of pointless cruft we feed to gcc when only
@@ -140,7 +142,6 @@ struct kernel_symbol {
 #endif /* CONFIG_MODULES */
 
 #ifdef DEFAULT_SYMBOL_NAMESPACE
-#include <linux/stringify.h>
 #define _EXPORT_SYMBOL(sym, sec)       __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
 #else
 #define _EXPORT_SYMBOL(sym, sec)       __EXPORT_SYMBOL(sym, sec, "")
@@ -148,8 +149,8 @@ struct kernel_symbol {
 
 #define EXPORT_SYMBOL(sym)             _EXPORT_SYMBOL(sym, "")
 #define EXPORT_SYMBOL_GPL(sym)         _EXPORT_SYMBOL(sym, "_gpl")
-#define EXPORT_SYMBOL_NS(sym, ns)      __EXPORT_SYMBOL(sym, "", #ns)
-#define EXPORT_SYMBOL_NS_GPL(sym, ns)  __EXPORT_SYMBOL(sym, "_gpl", #ns)
+#define EXPORT_SYMBOL_NS(sym, ns)      __EXPORT_SYMBOL(sym, "", __stringify(ns))
+#define EXPORT_SYMBOL_NS_GPL(sym, ns)  __EXPORT_SYMBOL(sym, "_gpl", __stringify(ns))
 
 #endif /* !__ASSEMBLY__ */
 
index 0c19010..685401d 100644 (file)
@@ -296,7 +296,7 @@ static inline void devm_extcon_unregister_notifier_all(struct device *dev,
 
 static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
-       return ERR_PTR(-ENODEV);
+       return NULL;
 }
 
 static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
index d0e7817..e066816 100644 (file)
@@ -125,7 +125,7 @@ int iterate_fd(struct files_struct *, unsigned,
 
 extern int close_fd(unsigned int fd);
 extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern int close_fd_get_file(unsigned int fd, struct file **res);
+extern struct file *close_fd_get_file(unsigned int fd);
 extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
                      struct files_struct **new_fdp);
 
index 51e830b..39704ea 100644 (file)
@@ -14,7 +14,6 @@
 struct file;
 
 extern void fput(struct file *);
-extern void fput_many(struct file *, unsigned int);
 
 struct file_operations;
 struct task_struct;
@@ -47,7 +46,6 @@ static inline void fdput(struct fd fd)
 }
 
 extern struct file *fget(unsigned int fd);
-extern struct file *fget_many(unsigned int fd, unsigned int refs);
 extern struct file *fget_raw(unsigned int fd);
 extern struct file *fget_task(struct task_struct *task, unsigned int fd);
 extern unsigned long __fdget(unsigned int fd);
index 5bb6db2..424ef67 100644 (file)
@@ -21,8 +21,8 @@ extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long siz
 /**
  * find_next_bit - find the next set bit in a memory region
  * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
  * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
  *
  * Returns the bit number for the next set bit
  * If no bits are set, returns @size.
@@ -50,8 +50,8 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
  * find_next_and_bit - find the next set bit in both memory regions
  * @addr1: The first address to base the search on
  * @addr2: The second address to base the search on
- * @offset: The bitnumber to start searching at
  * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
  *
  * Returns the bit number for the next set bit
  * If no bits are set, returns @size.
@@ -79,8 +79,8 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
 /**
  * find_next_zero_bit - find the next cleared bit in a memory region
  * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
  * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
  *
  * Returns the bit number of the next zero bit
  * If no bits are zero, returns @size.
index ec2ccfe..de7fea3 100644 (file)
@@ -17,6 +17,64 @@ struct firmware {
        void *priv;
 };
 
+/**
+ * enum fw_upload_err - firmware upload error codes
+ * @FW_UPLOAD_ERR_NONE: returned to indicate success
+ * @FW_UPLOAD_ERR_HW_ERROR: error signalled by hardware, see kernel log
+ * @FW_UPLOAD_ERR_TIMEOUT: SW timed out on handshake with HW/firmware
+ * @FW_UPLOAD_ERR_CANCELED: upload was cancelled by the user
+ * @FW_UPLOAD_ERR_BUSY: there is an upload operation already in progress
+ * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
+ * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
+ * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_MAX: Maximum error code marker
+ */
+enum fw_upload_err {
+       FW_UPLOAD_ERR_NONE,
+       FW_UPLOAD_ERR_HW_ERROR,
+       FW_UPLOAD_ERR_TIMEOUT,
+       FW_UPLOAD_ERR_CANCELED,
+       FW_UPLOAD_ERR_BUSY,
+       FW_UPLOAD_ERR_INVALID_SIZE,
+       FW_UPLOAD_ERR_RW_ERROR,
+       FW_UPLOAD_ERR_WEAROUT,
+       FW_UPLOAD_ERR_MAX
+};
+
+struct fw_upload {
+       void *dd_handle; /* reference to parent driver */
+       void *priv;      /* firmware loader private fields */
+};
+
+/**
+ * struct fw_upload_ops - device specific operations to support firmware upload
+ * @prepare:             Required: Prepare secure update
+ * @write:               Required: The write() op receives the remaining
+ *                       size to be written and must return the actual
+ *                       size written or a negative error code. The write()
+ *                       op will be called repeatedly until all data is
+ *                       written.
+ * @poll_complete:       Required: Check for the completion of the
+ *                       HW authentication/programming process.
+ * @cancel:              Required: Request cancellation of update. This op
+ *                       is called from the context of a different kernel
+ *                       thread, so race conditions need to be considered.
+ * @cleanup:             Optional: Complements the prepare()
+ *                       function and is called at the completion
+ *                       of the update, on success or failure, if the
+ *                       prepare function succeeded.
+ */
+struct fw_upload_ops {
+       enum fw_upload_err (*prepare)(struct fw_upload *fw_upload,
+                                     const u8 *data, u32 size);
+       enum fw_upload_err (*write)(struct fw_upload *fw_upload,
+                                   const u8 *data, u32 offset,
+                                   u32 size, u32 *written);
+       enum fw_upload_err (*poll_complete)(struct fw_upload *fw_upload);
+       void (*cancel)(struct fw_upload *fw_upload);
+       void (*cleanup)(struct fw_upload *fw_upload);
+};
+
 struct module;
 struct device;
 
@@ -112,6 +170,30 @@ static inline int request_partial_firmware_into_buf
 
 #endif
 
+#ifdef CONFIG_FW_UPLOAD
+
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+                        const char *name, const struct fw_upload_ops *ops,
+                        void *dd_handle);
+void firmware_upload_unregister(struct fw_upload *fw_upload);
+
+#else
+
+static inline struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+                        const char *name, const struct fw_upload_ops *ops,
+                        void *dd_handle)
+{
+               return ERR_PTR(-EINVAL);
+}
+
+static inline void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+}
+
+#endif
+
 int firmware_request_cache(struct device *device, const char *name);
 
 #endif
index 3f87c49..82e8254 100644 (file)
@@ -17,7 +17,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
                        event_cb_func_t cb_fun, void *data);
 
 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
-                         const u32 event, event_cb_func_t cb_fun);
+                         const u32 event, event_cb_func_t cb_fun, void *data);
 #else
 static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
                                      const u32 event, const bool wake,
@@ -27,7 +27,7 @@ static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32
 }
 
 static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
-                                        const u32 event, event_cb_func_t cb_fun)
+                                       const u32 event, event_cb_func_t cb_fun, void *data)
 {
        return -ENODEV;
 }
index 14f00a7..1ec73d5 100644 (file)
 
 /* SMC SIP service Call Function Identifier Prefix */
 #define PM_SIP_SVC                     0xC2000000
+
+/* PM API versions */
+#define PM_API_VERSION_2       2
+
+/* ATF only commands */
 #define PM_GET_TRUSTZONE_VERSION       0xa03
 #define PM_SET_SUSPEND_MODE            0xa02
 #define GET_CALLBACK_DATA              0xa01
@@ -460,6 +465,7 @@ int zynqmp_pm_load_pdi(const u32 src, const u64 address);
 int zynqmp_pm_register_notifier(const u32 node, const u32 event,
                                const u32 wake, const u32 enable);
 int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
 int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
 int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
 #else
@@ -678,6 +684,11 @@ static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
        return -ENODEV;
 }
 
+static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+       return -ENODEV;
+}
+
 static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
 {
        return -ENODEV;
index 3b87f23..9d4d329 100644 (file)
@@ -52,9 +52,9 @@ struct fpga_region {
 
 #define to_fpga_region(d) container_of(d, struct fpga_region, dev)
 
-struct fpga_region *fpga_region_class_find(
-       struct device *start, const void *data,
-       int (*match)(struct device *, const void *));
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+                      int (*match)(struct device *, const void *));
 
 int fpga_region_program_fpga(struct fpga_region *region);
 
index 01403e6..9ad5e35 100644 (file)
@@ -974,9 +974,7 @@ static inline struct file *get_file(struct file *f)
        atomic_long_inc(&f->f_count);
        return f;
 }
-#define get_file_rcu_many(x, cnt)      \
-       atomic_long_add_unless(&(x)->f_count, (cnt), 0)
-#define get_file_rcu(x) get_file_rcu_many((x), 1)
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
 #define file_count(x)  atomic_long_read(&(x)->f_count)
 
 #define        MAX_NON_LFS     ((1UL<<31) - 1)
@@ -2471,22 +2469,11 @@ struct super_block *sget(struct file_system_type *type,
 
 extern int register_filesystem(struct file_system_type *);
 extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount(struct file_system_type *);
-extern void kern_unmount(struct vfsmount *mnt);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
-                    const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
-                         struct vfsmount *);
 extern int vfs_statfs(const struct path *, struct kstatfs *);
 extern int user_statfs(const char __user *, struct kstatfs *);
 extern int fd_statfs(int, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
-extern bool our_mnt(struct vfsmount *mnt);
 extern __printf(2, 3)
 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
 extern int super_setup_bdi(struct super_block *sb);
index 7b6c42b..a86115b 100644 (file)
@@ -32,6 +32,13 @@ struct fsl_mc_io;
  * @shutdown: Function called at shutdown time to quiesce the device
  * @suspend: Function called when a device is stopped
  * @resume: Function called when a device is resumed
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ *             For most device drivers, no need to care about this flag
+ *             as long as all DMAs are handled through the kernel DMA API.
+ *             For some special ones, for example VFIO drivers, they know
+ *             how to manage the DMA themselves and set this flag so that
+ *             the IOMMU layer will allow them to setup and manage their
+ *             own I/O address space.
  *
  * Generic DPAA device driver object for device drivers that are registered
  * with a DPRC bus. This structure is to be embedded in each device-specific
@@ -45,6 +52,7 @@ struct fsl_mc_driver {
        void (*shutdown)(struct fsl_mc_device *dev);
        int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
        int (*resume)(struct fsl_mc_device *dev);
+       bool driver_managed_dma;
 };
 
 #define to_fsl_mc_driver(_drv) \
@@ -170,7 +178,9 @@ struct fsl_mc_obj_desc {
  * @regions: pointer to array of MMIO region entries
  * @irqs: pointer to array of pointers to interrupts allocated to this device
  * @resource: generic resource associated with this MC object device, if any.
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ *                   because core frees it; use driver_set_override() to
+ *                   set or clear it.
  *
  * Generic device object for MC object devices that are "attached" to a
  * MC bus.
@@ -204,7 +214,7 @@ struct fsl_mc_device {
        struct fsl_mc_device_irq **irqs;
        struct fsl_mc_resource *resource;
        struct device_link *consumer_link;
-       char   *driver_override;
+       const char *driver_override;
 };
 
 #define to_fsl_mc_device(_dev) \
index e71f6e1..fe0f460 100644 (file)
@@ -109,6 +109,8 @@ int gpiod_get_direction(struct gpio_desc *desc);
 int gpiod_direction_input(struct gpio_desc *desc);
 int gpiod_direction_output(struct gpio_desc *desc, int value);
 int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
 
 /* Value get/set from non-sleeping context */
 int gpiod_get_value(const struct gpio_desc *desc);
@@ -350,8 +352,18 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
        WARN_ON(desc);
        return -ENOSYS;
 }
-
-
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+                                              unsigned long flags)
+{
+       WARN_ON(desc);
+       return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+                                               unsigned long flags)
+{
+       WARN_ON(desc);
+       return -ENOSYS;
+}
 static inline int gpiod_get_value(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
index 654184c..b1e0f1f 100644 (file)
@@ -333,6 +333,10 @@ struct gpio_irq_chip {
  * @add_pin_ranges: optional routine to initialize pin ranges, to be used when
  *     requires special mapping of the pins that provides GPIO functionality.
  *     It is called after adding GPIO chip and before adding IRQ chip.
+ * @en_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ *     enable hardware timestamp.
+ * @dis_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ *     disable hardware timestamp.
  * @base: identifies the first GPIO number handled by this chip;
  *     or, if negative during registration, requests dynamic ID allocation.
  *     DEPRECATION: providing anything non-negative and nailing the base
@@ -429,6 +433,12 @@ struct gpio_chip {
 
        int                     (*add_pin_ranges)(struct gpio_chip *gc);
 
+       int                     (*en_hw_timestamp)(struct gpio_chip *gc,
+                                                  u32 offset,
+                                                  unsigned long flags);
+       int                     (*dis_hw_timestamp)(struct gpio_chip *gc,
+                                                   u32 offset,
+                                                   unsigned long flags);
        int                     base;
        u16                     ngpio;
        u16                     offset;
index 2647dd1..4d55da2 100644 (file)
@@ -64,6 +64,18 @@ struct gpiod_hog {
 };
 
 /*
+ * Helper for lookup tables with just one single lookup for a device.
+ */
+#define GPIO_LOOKUP_SINGLE(_name, _dev_id, _key, _chip_hwnum, _con_id, _flags) \
+static struct gpiod_lookup_table _name = {                             \
+       .dev_id = _dev_id,                                              \
+       .table = {                                                      \
+               GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags),        \
+               {},                                                     \
+       },                                                              \
+}
+
+/*
  * Simple definition of a single GPIO under a con_id
  */
 #define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
new file mode 100644 (file)
index 0000000..7246273
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __LINUX_HOST1X_CONTEXT_BUS_H
+#define __LINUX_HOST1X_CONTEXT_BUS_H
+
+#include <linux/device.h>
+
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+extern struct bus_type host1x_context_device_bus_type;
+#endif
+
+#endif
diff --git a/include/linux/hte.h b/include/linux/hte.h
new file mode 100644 (file)
index 0000000..8289055
--- /dev/null
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_HTE_H
+#define __LINUX_HTE_H
+
+#include <linux/errno.h>
+
+struct hte_chip;
+struct hte_device;
+struct of_phandle_args;
+
+/**
+ * enum hte_edge - HTE line edge flags.
+ *
+ * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges,
+ * for example during request irq call.
+ * @HTE_RISING_EDGE_TS: Rising edge.
+ * @HTE_FALLING_EDGE_TS: Falling edge.
+ *
+ */
+enum hte_edge {
+       HTE_EDGE_NO_SETUP = 1U << 0,
+       HTE_RISING_EDGE_TS = 1U << 1,
+       HTE_FALLING_EDGE_TS = 1U << 2,
+};
+
+/**
+ * enum hte_return - HTE subsystem return values used during callback.
+ *
+ * @HTE_CB_HANDLED: The consumer handled the data.
+ * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case
+ * HTE subsystem calls secondary callback provided by the consumer where it
+ * is allowed to sleep.
+ */
+enum hte_return {
+       HTE_CB_HANDLED,
+       HTE_RUN_SECOND_CB,
+};
+
+/**
+ * struct hte_ts_data - HTE timestamp data.
+ *
+ * @tsc: Timestamp value.
+ * @seq: Sequence counter of the timestamps.
+ * @raw_level: Level of the line at the timestamp if provider supports it,
+ * -1 otherwise.
+ */
+struct hte_ts_data {
+       u64 tsc;
+       u64 seq;
+       int raw_level;
+};
+
+/**
+ * struct hte_clk_info - Clock source info that HTE provider uses to timestamp.
+ *
+ * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000.
+ * @type: Supported clock type.
+ */
+struct hte_clk_info {
+       u64 hz;
+       clockid_t type;
+};
+
+/**
+ * typedef hte_ts_cb_t - HTE timestamp data processing primary callback.
+ *
+ * The callback is used to push timestamp data to the client and it is
+ * not allowed to sleep.
+ *
+ * @ts: HW timestamp data.
+ * @data: Client supplied data.
+ */
+typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data);
+
+/**
+ * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback.
+ *
+ * This is used when the client needs further processing where it is
+ * allowed to sleep.
+ *
+ * @data: Client supplied data.
+ *
+ */
+typedef enum hte_return (*hte_ts_sec_cb_t)(void *data);
+
+/**
+ * struct hte_line_attr - Line attributes.
+ *
+ * @line_id: The logical ID understood by the consumers and providers.
+ * @line_data: Line data related to line_id.
+ * @edge_flags: Edge setup flags.
+ * @name: Descriptive name of the entity that is being monitored for the
+ * hardware timestamping. If null, HTE core will construct the name.
+ *
+ */
+struct hte_line_attr {
+       u32 line_id;
+       void *line_data;
+       unsigned long edge_flags;
+       const char *name;
+};
+
+/**
+ * struct hte_ts_desc - HTE timestamp descriptor.
+ *
+ * This structure is a communication token between consumers to subsystem
+ * and subsystem to providers.
+ *
+ * @attr: The line attributes.
+ * @hte_data: Subsystem's private data, set by HTE subsystem.
+ */
+struct hte_ts_desc {
+       struct hte_line_attr attr;
+       void *hte_data;
+};
+
+/**
+ * struct hte_ops - HTE operations set by providers.
+ *
+ * @request: Hook for requesting a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @release: Hook for releasing a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @enable: Hook to enable the specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @disable: Hook to disable specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @get_clk_src_info: Hook to get the clock information the provider uses
+ * to timestamp. Returns 0 for success and negative error code for failure. On
+ * success HTE subsystem fills up provided struct hte_clk_info.
+ *
+ * xlated_id parameter is used to communicate between HTE subsystem and the
+ * providers and is translated by the provider.
+ */
+struct hte_ops {
+       int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc,
+                      u32 xlated_id);
+       int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc,
+                      u32 xlated_id);
+       int (*enable)(struct hte_chip *chip, u32 xlated_id);
+       int (*disable)(struct hte_chip *chip, u32 xlated_id);
+       int (*get_clk_src_info)(struct hte_chip *chip,
+                               struct hte_clk_info *ci);
+};
+
+/**
+ * struct hte_chip - Abstract HTE chip.
+ *
+ * @name: functional name of the HTE IP block.
+ * @dev: device providing the HTE.
+ * @ops: callbacks for this HTE.
+ * @nlines: number of lines/signals supported by this chip.
+ * @xlate_of: Callback which translates consumer supplied logical ids to
+ * physical ids, return 0 for the success and negative for the failures.
+ * It stores (between 0 to @nlines) in xlated_id parameter for the success.
+ * @xlate_plat: Same as above but for the consumers with no DT node.
+ * @match_from_linedata: Match HTE device using the line_data.
+ * @of_hte_n_cells: Number of cells used to form the HTE specifier.
+ * @gdev: HTE subsystem abstract device, internal to the HTE subsystem.
+ * @data: chip specific private data.
+ */
+struct hte_chip {
+       const char *name;
+       struct device *dev;
+       const struct hte_ops *ops;
+       u32 nlines;
+       int (*xlate_of)(struct hte_chip *gc,
+                       const struct of_phandle_args *args,
+                       struct hte_ts_desc *desc, u32 *xlated_id);
+       int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc,
+                        u32 *xlated_id);
+       bool (*match_from_linedata)(const struct hte_chip *chip,
+                                   const struct hte_ts_desc *hdesc);
+       u8 of_hte_n_cells;
+
+       struct hte_device *gdev;
+       void *data;
+};
+
+#if IS_ENABLED(CONFIG_HTE)
+/* HTE APIs for the providers */
+int devm_hte_register_chip(struct hte_chip *chip);
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+                  struct hte_ts_data *data);
+
+/* HTE APIs for the consumers */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+                      unsigned long edge_flags, const char *name,
+                      void *data);
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index);
+int hte_ts_put(struct hte_ts_desc *desc);
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+                     hte_ts_sec_cb_t tcb, void *data);
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+                          hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data);
+int of_hte_req_count(struct device *dev);
+int hte_enable_ts(struct hte_ts_desc *desc);
+int hte_disable_ts(struct hte_ts_desc *desc);
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+                        struct hte_clk_info *ci);
+
+#else /* !CONFIG_HTE */
+static inline int devm_hte_register_chip(struct hte_chip *chip)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_push_ts_ns(const struct hte_chip *chip,
+                                u32 xlated_id,
+                                const struct hte_ts_data *data)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+                                    unsigned long edge_flags,
+                                    const char *name, void *data)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc,
+                            int index)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_put(struct hte_ts_desc *desc)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+                                   hte_ts_sec_cb_t tcb, void *data)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int devm_hte_request_ts_ns(struct device *dev,
+                                        struct hte_ts_desc *desc,
+                                        hte_ts_cb_t cb,
+                                        hte_ts_sec_cb_t tcb,
+                                        void *data)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int of_hte_req_count(struct device *dev)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_enable_ts(struct hte_ts_desc *desc)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_disable_ts(struct hte_ts_desc *desc)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+                                      struct hte_clk_info *ci)
+{
+       return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_HTE */
+
+#endif
index a246429..3b42264 100644 (file)
@@ -1292,7 +1292,11 @@ struct hv_device {
        u16 device_id;
 
        struct device device;
-       char *driver_override; /* Driver name to force a match */
+       /*
+        * Driver name to force a match.  Do not set directly, because core
+        * frees it.  Use driver_set_override() to set or clear it.
+        */
+       const char *driver_override;
 
        struct vmbus_channel *channel;
        struct kset          *channels_kset;
index c525fd5..7852f6c 100644 (file)
@@ -32,26 +32,34 @@ struct iio_dev;
 /**
  * struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
  * @set_channel: Will be called to select the current channel, may be NULL.
+ * @append_status: Will be called to enable status append at the end of the sample, may be NULL.
  * @set_mode: Will be called to select the current mode, may be NULL.
+ * @disable_all: Will be called to disable all channels, may be NULL.
  * @postprocess_sample: Is called for each sampled data word, can be used to
  *             modify or drop the sample data, it, may be NULL.
  * @has_registers: true if the device has writable and readable registers, false
  *             if there is just one read-only sample data shift register.
  * @addr_shift: Shift of the register address in the communications register.
  * @read_mask: Mask for the communications register having the read bit set.
+ * @status_ch_mask: Mask for the channel number stored in status register.
  * @data_reg: Address of the data register, if 0 the default address of 0x3 will
  *   be used.
  * @irq_flags: flags for the interrupt used by the triggered buffer
+ * @num_slots: Number of sequencer slots
  */
 struct ad_sigma_delta_info {
        int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+       int (*append_status)(struct ad_sigma_delta *, bool append);
        int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+       int (*disable_all)(struct ad_sigma_delta *);
        int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
        bool has_registers;
        unsigned int addr_shift;
        unsigned int read_mask;
+       unsigned int status_ch_mask;
        unsigned int data_reg;
        unsigned long irq_flags;
+       unsigned int num_slots;
 };
 
 /**
@@ -76,6 +84,13 @@ struct ad_sigma_delta {
        uint8_t                 comm;
 
        const struct ad_sigma_delta_info *info;
+       unsigned int            active_slots;
+       unsigned int            current_slot;
+       unsigned int            num_slots;
+       bool                    status_appended;
+       /* map slots to channels in order to know what to expect from devices */
+       unsigned int            *slots;
+       uint8_t                 *samples_buf;
 
        /*
         * DMA (thus cache coherency maintenance) requires the
@@ -97,6 +112,29 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
        return 0;
 }
 
+static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append)
+{
+       int ret;
+
+       if (sd->info->append_status) {
+               ret = sd->info->append_status(sd, append);
+               if (ret < 0)
+                       return ret;
+
+               sd->status_appended = append;
+       }
+
+       return 0;
+}
+
+static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
+{
+       if (sd->info->disable_all)
+               return sd->info->disable_all(sd);
+
+       return 0;
+}
+
 static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
        unsigned int mode)
 {
index 22f6784..db4a1b2 100644 (file)
@@ -237,6 +237,7 @@ struct st_sensor_settings {
  * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
  * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
  * @buffer_data: Data used by buffer part.
+ * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
  */
 struct st_sensor_data {
        struct iio_trigger *trig;
@@ -261,6 +262,8 @@ struct st_sensor_data {
        s64 hw_timestamp;
 
        char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+
+       struct mutex odr_lock;
 };
 
 #ifdef CONFIG_IIO_BUFFER
index 2be12b7..6b3586b 100644 (file)
@@ -7,6 +7,9 @@
  * struct iio_dev_opaque - industrial I/O device opaque information
  * @indio_dev:                 public industrial I/O device information
  * @id:                        used to identify device internally
+ * @currentmode:               operating mode currently in use, may be eventually
+ *                             checked by device drivers but should be considered
+ *                             read-only as this is a core internal bit
  * @driver_module:             used to make it harder to undercut users
  * @info_exist_lock:           lock to prevent use during removal
  * @trig_readonly:             mark the current trigger immutable
@@ -36,6 +39,7 @@
  */
 struct iio_dev_opaque {
        struct iio_dev                  indio_dev;
+       int                             currentmode;
        int                             id;
        struct module                   *driver_module;
        struct mutex                    info_exist_lock;
index faf00f2..233d2e6 100644 (file)
@@ -315,7 +315,54 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
 unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
 
-/* Device operating modes */
+/*
+ * Device operating modes
+ * @INDIO_DIRECT_MODE: There is an access to either:
+ * a) The last single value available for devices that do not provide
+ *    on-demand reads.
+ * b) A new value after performing an on-demand read otherwise.
+ * On most devices, this is a single-shot read. On some devices with data
+ * streams without an 'on-demand' function, this might also be the 'last value'
+ * feature. Above all, this mode internally means that we are not in any of the
+ * other modes, and sysfs reads should work.
+ * Device drivers should inform the core if they support this mode.
+ * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
+ * It indicates that an explicit trigger is required. This requests the core to
+ * attach a poll function when enabling the buffer, which is indicated by the
+ * _TRIGGERED suffix.
+ * The core will ensure this mode is set when registering a triggered buffer
+ * with iio_triggered_buffer_setup().
+ * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
+ * No poll function can be attached because there is no triggered infrastructure
+ * we can use to cause capture. There is a kfifo that the driver will fill, but
+ * not "only one scan at a time". Typically, hardware will have a buffer that
+ * can hold multiple scans. Software may read one or more scans at a single time
+ * and push the available data to a Kfifo. This means the core will not attach
+ * any poll function when enabling the buffer.
+ * The core will ensure this mode is set when registering a simple kfifo buffer
+ * with devm_iio_kfifo_buffer_setup().
+ * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
+ * Same as above but this time the buffer is not a kfifo where we have direct
+ * access to the data. Instead, the consumer driver must access the data through
+ * non software visible channels (or DMA when there is no demux possible in
+ * software)
+ * The core will ensure this mode is set when registering a dmaengine buffer
+ * with devm_iio_dmaengine_buffer_setup().
+ * @INDIO_EVENT_TRIGGERED: Very unusual mode.
+ * Triggers usually refer to an external event which will start data capture.
+ * Here it is kind of the opposite as, a particular state of the data might
+ * produce an event which can be considered as an event. We don't necessarily
+ * have access to the data itself, but to the event produced. For example, this
+ * can be a threshold detector. The internal path of this mode is very close to
+ * the INDIO_BUFFER_TRIGGERED mode.
+ * The core will ensure this mode is set when registering a triggered event.
+ * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
+ * Here, triggers can result in data capture and can be routed to multiple
+ * hardware components, which make them close to regular triggers in the way
+ * they must be managed by the core, but without the entire interrupts/poll
+ * functions burden. Interrupts are irrelevant as the data flow is hardware
+ * mediated and distributed.
+ */
 #define INDIO_DIRECT_MODE              0x01
 #define INDIO_BUFFER_TRIGGERED         0x02
 #define INDIO_BUFFER_SOFTWARE          0x04
@@ -488,8 +535,12 @@ struct iio_buffer_setup_ops {
 
 /**
  * struct iio_dev - industrial I/O device
- * @modes:             [DRIVER] operating modes supported by device
- * @currentmode:       [INTERN] current operating mode
+ * @modes:             [DRIVER] bitmask listing all the operating modes
+ *                     supported by the IIO device. This list should be
+ *                     initialized before registering the IIO device. It can
+ *                     also be filed up by the IIO core, as a result of
+ *                     enabling particular features in the driver
+ *                     (see iio_triggered_event_setup()).
  * @dev:               [DRIVER] device structure, should be assigned a parent
  *                     and owner
  * @buffer:            [DRIVER] any buffer present
@@ -516,7 +567,6 @@ struct iio_buffer_setup_ops {
  */
 struct iio_dev {
        int                             modes;
-       int                             currentmode;
        struct device                   dev;
 
        struct iio_buffer               *buffer;
@@ -543,6 +593,8 @@ struct iio_dev {
 };
 
 int iio_device_id(struct iio_dev *indio_dev);
+int iio_device_get_current_mode(struct iio_dev *indio_dev);
+bool iio_buffer_enabled(struct iio_dev *indio_dev);
 
 const struct iio_chan_spec
 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
@@ -672,16 +724,6 @@ struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
 __printf(2, 3)
 struct iio_trigger *devm_iio_trigger_alloc(struct device *parent,
                                           const char *fmt, ...);
-/**
- * iio_buffer_enabled() - helper function to test if the buffer is enabled
- * @indio_dev:         IIO device structure for device
- **/
-static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
-{
-       return indio_dev->currentmode
-               & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
-                  INDIO_BUFFER_SOFTWARE);
-}
 
 /**
  * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
index ccd2cea..8a83fb5 100644 (file)
@@ -12,11 +12,10 @@ void iio_kfifo_free(struct iio_buffer *r);
 
 int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
                                    struct iio_dev *indio_dev,
-                                   int mode_flags,
                                    const struct iio_buffer_setup_ops *setup_ops,
                                    const struct attribute **buffer_attrs);
 
-#define devm_iio_kfifo_buffer_setup(dev, indio_dev, mode_flags, setup_ops)     \
-       devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (mode_flags), (setup_ops), NULL)
+#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \
+       devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL)
 
 #endif
index 2f9891c..4f29139 100644 (file)
@@ -539,7 +539,8 @@ struct dmar_domain {
 
        u8 has_iotlb_device: 1;
        u8 iommu_coherency: 1;          /* indicate coherency of iommu access */
-       u8 iommu_snooping: 1;           /* indicate snooping control feature */
+       u8 force_snooping : 1;          /* Create IOPTEs with snoop control */
+       u8 set_pte_snp:1;
 
        struct list_head devices;       /* all devices' list */
        struct iova_domain iovad;       /* iova's that belong to this domain */
index b3b125b..207ef06 100644 (file)
@@ -9,7 +9,7 @@
 #define __INTEL_SVM_H__
 
 /* Page Request Queue depth */
-#define PRQ_ORDER      2
+#define PRQ_ORDER      4
 #define PRQ_RING_MASK  ((0x1000 << PRQ_ORDER) - 0x20)
 #define PRQ_DEPTH      ((0x1000 << PRQ_ORDER) >> 5)
 
index 9208eca..5e1afe1 100644 (file)
@@ -103,10 +103,11 @@ static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
 }
 
 enum iommu_cap {
-       IOMMU_CAP_CACHE_COHERENCY,      /* IOMMU can enforce cache coherent DMA
-                                          transactions */
+       IOMMU_CAP_CACHE_COHERENCY,      /* IOMMU_CACHE is supported */
        IOMMU_CAP_INTR_REMAP,           /* IOMMU supports interrupt isolation */
        IOMMU_CAP_NOEXEC,               /* IOMMU_NOEXEC flag */
+       IOMMU_CAP_PRE_BOOT_PROTECTION,  /* Firmware says it used the IOMMU for
+                                          DMA protection and we should too */
 };
 
 /* These are the possible reserved region types */
@@ -272,6 +273,9 @@ struct iommu_ops {
  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
  *            queue
  * @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ *                           including no-snoop TLPs on PCIe or other platform
+ *                           specific mechanisms.
  * @enable_nesting: Enable nesting
  * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
  * @free: Release the domain after use.
@@ -300,6 +304,7 @@ struct iommu_domain_ops {
        phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
                                    dma_addr_t iova);
 
+       bool (*enforce_cache_coherency)(struct iommu_domain *domain);
        int (*enable_nesting)(struct iommu_domain *domain);
        int (*set_pgtable_quirks)(struct iommu_domain *domain,
                                  unsigned long quirks);
@@ -407,16 +412,10 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
        return dev->iommu->iommu_dev->ops;
 }
 
-#define IOMMU_GROUP_NOTIFY_ADD_DEVICE          1 /* Device added */
-#define IOMMU_GROUP_NOTIFY_DEL_DEVICE          2 /* Pre Device removed */
-#define IOMMU_GROUP_NOTIFY_BIND_DRIVER         3 /* Pre Driver bind */
-#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER                4 /* Post Driver bind */
-#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER       5 /* Pre Driver unbind */
-#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER      6 /* Post Driver unbind */
-
 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
 extern int bus_iommu_probe(struct bus_type *bus);
 extern bool iommu_present(struct bus_type *bus);
+extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
 extern struct iommu_group *iommu_group_get_by_id(int id);
@@ -478,10 +477,6 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 extern struct iommu_group *iommu_group_get(struct device *dev);
 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
 extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_group_register_notifier(struct iommu_group *group,
-                                        struct notifier_block *nb);
-extern int iommu_group_unregister_notifier(struct iommu_group *group,
-                                          struct notifier_block *nb);
 extern int iommu_register_device_fault_handler(struct device *dev,
                                        iommu_dev_fault_handler_t handler,
                                        void *data);
@@ -675,6 +670,13 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
 void iommu_sva_unbind_device(struct iommu_sva *handle);
 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
 
+int iommu_device_use_default_domain(struct device *dev);
+void iommu_device_unuse_default_domain(struct device *dev);
+
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
+void iommu_group_release_dma_owner(struct iommu_group *group);
+bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
@@ -689,6 +691,11 @@ static inline bool iommu_present(struct bus_type *bus)
        return false;
 }
 
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
+{
+       return false;
+}
+
 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
 {
        return false;
@@ -871,18 +878,6 @@ static inline void iommu_group_put(struct iommu_group *group)
 {
 }
 
-static inline int iommu_group_register_notifier(struct iommu_group *group,
-                                               struct notifier_block *nb)
-{
-       return -ENODEV;
-}
-
-static inline int iommu_group_unregister_notifier(struct iommu_group *group,
-                                                 struct notifier_block *nb)
-{
-       return 0;
-}
-
 static inline
 int iommu_register_device_fault_handler(struct device *dev,
                                        iommu_dev_fault_handler_t handler,
@@ -1031,6 +1026,30 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
 {
        return NULL;
 }
+
+static inline int iommu_device_use_default_domain(struct device *dev)
+{
+       return 0;
+}
+
+static inline void iommu_device_unuse_default_domain(struct device *dev)
+{
+}
+
+static inline int
+iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
+{
+       return -ENODEV;
+}
+
+static inline void iommu_group_release_dma_owner(struct iommu_group *group)
+{
+}
+
+static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
+{
+       return false;
+}
 #endif /* CONFIG_IOMMU_API */
 
 /**
index b75395e..e3e8c86 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/ns_common.h>
 #include <linux/refcount.h>
 #include <linux/rhashtable-types.h>
+#include <linux/sysctl.h>
 
 struct user_namespace;
 
@@ -63,6 +64,12 @@ struct ipc_namespace {
        unsigned int    mq_msg_default;
        unsigned int    mq_msgsize_default;
 
+       struct ctl_table_set    mq_set;
+       struct ctl_table_header *mq_sysctls;
+
+       struct ctl_table_set    ipc_set;
+       struct ctl_table_header *ipc_sysctls;
+
        /* user_ns which owns the ipc ns */
        struct user_namespace *user_ns;
        struct ucounts *ucounts;
@@ -169,15 +176,37 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
 
 #ifdef CONFIG_POSIX_MQUEUE_SYSCTL
 
-struct ctl_table_header;
-extern struct ctl_table_header *mq_register_sysctl_table(void);
+void retire_mq_sysctls(struct ipc_namespace *ns);
+bool setup_mq_sysctls(struct ipc_namespace *ns);
 
 #else /* CONFIG_POSIX_MQUEUE_SYSCTL */
 
-static inline struct ctl_table_header *mq_register_sysctl_table(void)
+static inline void retire_mq_sysctls(struct ipc_namespace *ns)
 {
-       return NULL;
+}
+
+static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
+{
+       return true;
 }
 
 #endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+#ifdef CONFIG_SYSVIPC_SYSCTL
+
+bool setup_ipc_sysctls(struct ipc_namespace *ns);
+void retire_ipc_sysctls(struct ipc_namespace *ns);
+
+#else /* CONFIG_SYSVIPC_SYSCTL */
+
+static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+}
+
+static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+       return true;
+}
+
+#endif /* CONFIG_SYSVIPC_SYSCTL */
 #endif
index 38c8203..37dfdcf 100644 (file)
@@ -61,7 +61,7 @@ struct ipv6_devconf {
        __s32           suppress_frag_ndisc;
        __s32           accept_ra_mtu;
        __s32           drop_unsolicited_na;
-       __s32           accept_unsolicited_na;
+       __s32           accept_untracked_na;
        struct ipv6_stable_secret {
                bool initialized;
                struct in6_addr secret;
index 107751c..bf1eef3 100644 (file)
@@ -256,9 +256,9 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
 #include <linux/atomic.h>
 #include <linux/bug.h>
 
-static inline int static_key_count(struct static_key *key)
+static __always_inline int static_key_count(struct static_key *key)
 {
-       return atomic_read(&key->enabled);
+       return arch_atomic_read(&key->enabled);
 }
 
 static __always_inline void jump_label_init(void)
index fcd5035..ce6536f 100644 (file)
@@ -219,7 +219,7 @@ struct crash_mem {
 extern int crash_exclude_mem_range(struct crash_mem *mem,
                                   unsigned long long mstart,
                                   unsigned long long mend);
-extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
                                       void **addr, unsigned long *sz);
 
 #ifndef arch_kexec_apply_relocations_add
index 2614247..293e299 100644 (file)
@@ -16,8 +16,6 @@
 
 #if IS_ENABLED(CONFIG_LIVEPATCH)
 
-#include <asm/livepatch.h>
-
 /* task patch states */
 #define KLP_UNDEFINED  -1
 #define KLP_UNPATCHED   0
index fcc8e74..d336c54 100644 (file)
@@ -27,9 +27,6 @@ struct tc6393xb_platform_data {
        int     (*resume)(struct platform_device *dev);
 
        int     irq_base;       /* base for subdevice irqs */
-       int     gpio_base;
-       int     (*setup)(struct platform_device *dev);
-       void    (*teardown)(struct platform_device *dev);
 
        struct tmio_nand_data   *nand_data;
        struct tmio_fb_data     *fb_data;
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644 (file)
index 0000000..478aece
--- /dev/null
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x8000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+       char *name;
+       u32 num;
+       u32 num_elements;
+       enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @mhi_version: MHI spec version supported by the controller
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ */
+struct mhi_ep_cntrl_config {
+       u32 mhi_version;
+       u32 max_channels;
+       u32 num_channels;
+       const struct mhi_ep_channel_config *ch_cfg;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+       u32 mask;
+       u32 status;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ *             Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @chdb: Array of channel doorbell interrupt info
+ * @event_lock: Lock for protecting event rings
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @state_lock: Lock for protecting state transitions
+ * @st_transition_list: List of state transitions
+ * @ch_db_list: List of queued channel doorbells
+ * @wq: Dedicated workqueue for handling rings and state changes
+ * @state_work: State transition worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @cmd_ring_work: Worker for processing command rings
+ * @ch_ring_work: Worker for processing channel rings
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+ * @read_from_host: CB function for reading from host memory from endpoint
+ * @write_to_host: CB function for writing to host memory from endpoint
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+       struct device *cntrl_dev;
+       struct mhi_ep_device *mhi_dev;
+       void __iomem *mmio;
+
+       struct mhi_ep_chan *mhi_chan;
+       struct mhi_ep_event *mhi_event;
+       struct mhi_ep_cmd *mhi_cmd;
+       struct mhi_ep_sm *sm;
+
+       struct mhi_chan_ctxt *ch_ctx_cache;
+       struct mhi_event_ctxt *ev_ctx_cache;
+       struct mhi_cmd_ctxt *cmd_ctx_cache;
+       u64 ch_ctx_host_pa;
+       u64 ev_ctx_host_pa;
+       u64 cmd_ctx_host_pa;
+       phys_addr_t ch_ctx_cache_phys;
+       phys_addr_t ev_ctx_cache_phys;
+       phys_addr_t cmd_ctx_cache_phys;
+
+       struct mhi_ep_db_info chdb[4];
+       struct mutex event_lock;
+       spinlock_t list_lock;
+       spinlock_t state_lock;
+
+       struct list_head st_transition_list;
+       struct list_head ch_db_list;
+
+       struct workqueue_struct *wq;
+       struct work_struct state_work;
+       struct work_struct reset_work;
+       struct work_struct cmd_ring_work;
+       struct work_struct ch_ring_work;
+
+       void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+       int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+                        void __iomem **virt, size_t size);
+       void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+                          void __iomem *virt, size_t size);
+       int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
+       int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+
+       enum mhi_state mhi_state;
+
+       u32 max_chan;
+       u32 mru;
+       u32 event_rings;
+       u32 hw_event_rings;
+       u32 chdb_offset;
+       u32 erdb_offset;
+       u32 index;
+       int irq;
+       bool enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ *                     to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL (from host to endpoint) channel for the device
+ * @dl_chan: DL (from endpoint to host) channel for the device
+ * @dev_type: MHI device type
+ */
+struct mhi_ep_device {
+       struct device dev;
+       struct mhi_ep_cntrl *mhi_cntrl;
+       const struct mhi_device_id *id;
+       const char *name;
+       struct mhi_ep_chan *ul_chan;
+       struct mhi_ep_chan *dl_chan;
+       enum mhi_device_type dev_type;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
+ * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
+ */
+struct mhi_ep_driver {
+       const struct mhi_device_id *id_table;
+       struct device_driver driver;
+       int (*probe)(struct mhi_ep_device *mhi_ep,
+                    const struct mhi_device_id *id);
+       void (*remove)(struct mhi_ep_device *mhi_ep);
+       void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+                          struct mhi_result *result);
+       void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+                          struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister().  This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+       module_driver(mhi_drv, mhi_ep_driver_register, \
+                     mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+       __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+                              const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the DL channel
+ * @skb: SKBs to be queued
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
+
+#endif
index b064bc2..5040cd7 100644 (file)
@@ -447,6 +447,11 @@ struct mlx5_qp_table {
        struct radix_tree_root  tree;
 };
 
+enum {
+       MLX5_PF_NOTIFY_DISABLE_VF,
+       MLX5_PF_NOTIFY_ENABLE_VF,
+};
+
 struct mlx5_vf_context {
        int     enabled;
        u64     port_guid;
@@ -457,6 +462,7 @@ struct mlx5_vf_context {
        u8      port_guid_valid:1;
        u8      node_guid_valid:1;
        enum port_state_policy  policy;
+       struct blocking_notifier_head notifier;
 };
 
 struct mlx5_core_sriov {
@@ -1162,6 +1168,12 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
 struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
 void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
 
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+                                         int vf_id,
+                                         struct notifier_block *nb);
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+                                            int vf_id,
+                                            struct notifier_block *nb);
 #ifdef CONFIG_MLX5_CORE_IPOIB
 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
                                          struct ib_device *ibdev,
index 78b3d34..fd7d083 100644 (file)
@@ -87,6 +87,7 @@ enum {
 enum {
        MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
        MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+       MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
        MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
        MLX5_OBJ_TYPE_MKEY = 0xff01,
        MLX5_OBJ_TYPE_QP = 0xff02,
@@ -5176,12 +5177,11 @@ struct mlx5_ifc_query_qp_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x20];
-       u8         ece[0x20];
+       u8         reserved_at_40[0x40];
 
        u8         opt_param_mask[0x20];
 
-       u8         reserved_at_a0[0x20];
+       u8         ece[0x20];
 
        struct mlx5_ifc_qpc_bits qpc;
 
index 1a9c9d9..4414ed5 100644 (file)
@@ -165,4 +165,43 @@ struct mlx5_ifc_modify_virtio_net_q_out_bits {
        struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
 };
 
+struct mlx5_ifc_virtio_q_counters_bits {
+       u8    modify_field_select[0x40];
+       u8    reserved_at_40[0x40];
+       u8    received_desc[0x40];
+       u8    completed_desc[0x40];
+       u8    error_cqes[0x20];
+       u8    bad_desc_errors[0x20];
+       u8    exceed_max_chain[0x20];
+       u8    invalid_buffer[0x20];
+       u8    reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+       struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+       struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
 #endif /* __MLX5_IFC_VDPA_H_ */
index 5da5d99..549590e 100644 (file)
@@ -835,6 +835,8 @@ struct wmi_device_id {
 #define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
 #define MHI_NAME_SIZE 32
 
+#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s"
+
 /**
  * struct mhi_device_id - MHI device identification
  * @chan: MHI channel name
index 7f18a75..55a4aba 100644 (file)
 #define _LINUX_MOUNT_H
 
 #include <linux/types.h>
-#include <linux/list.h>
-#include <linux/nodemask.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
 
 struct super_block;
-struct vfsmount;
 struct dentry;
-struct mnt_namespace;
+struct user_namespace;
+struct file_system_type;
 struct fs_context;
+struct file;
+struct path;
 
 #define MNT_NOSUID     0x01
 #define MNT_NODEV      0x02
@@ -81,9 +79,6 @@ static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
        return smp_load_acquire(&mnt->mnt_userns);
 }
 
-struct file; /* forward dec */
-struct path;
-
 extern int mnt_want_write(struct vfsmount *mnt);
 extern int mnt_want_write_file(struct file *file);
 extern void mnt_drop_write(struct vfsmount *mnt);
@@ -94,12 +89,10 @@ extern struct vfsmount *mnt_clone_internal(const struct path *path);
 extern bool __mnt_is_readonly(struct vfsmount *mnt);
 extern bool mnt_may_suid(struct vfsmount *mnt);
 
-struct path;
 extern struct vfsmount *clone_private_mount(const struct path *path);
 extern int __mnt_want_write(struct vfsmount *);
 extern void __mnt_drop_write(struct vfsmount *);
 
-struct file_system_type;
 extern struct vfsmount *fc_mount(struct fs_context *fc);
 extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
 extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
@@ -115,6 +108,18 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
 extern dev_t name_to_dev_t(const char *name);
 extern bool path_is_mountpoint(const struct path *path);
 
+extern bool our_mnt(struct vfsmount *mnt);
+
+extern struct vfsmount *kern_mount(struct file_system_type *);
+extern void kern_unmount(struct vfsmount *mnt);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern long do_mount(const char *, const char __user *,
+                    const char *, unsigned long, void *);
+extern struct vfsmount *collect_mounts(const struct path *);
+extern void drop_collected_mounts(struct vfsmount *);
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+                         struct vfsmount *);
 extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
 
 #endif /* _LINUX_MOUNT_H */
index e89329b..caeb08a 100644 (file)
@@ -69,6 +69,12 @@ extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
 extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
 struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int);
+struct dentry *lookup_one_unlocked(struct user_namespace *mnt_userns,
+                                  const char *name, struct dentry *base,
+                                  int len);
+struct dentry *lookup_one_positive_unlocked(struct user_namespace *mnt_userns,
+                                           const char *name,
+                                           struct dentry *base, int len);
 
 extern int follow_down_one(struct path *);
 extern int follow_down(struct path *);
index 5662d8b..8d04b6a 100644 (file)
@@ -451,6 +451,8 @@ enum lock_type4 {
 #define FATTR4_WORD1_TIME_MODIFY        (1UL << 21)
 #define FATTR4_WORD1_TIME_MODIFY_SET    (1UL << 22)
 #define FATTR4_WORD1_MOUNTED_ON_FILEID  (1UL << 23)
+#define FATTR4_WORD1_DACL               (1UL << 26)
+#define FATTR4_WORD1_SACL               (1UL << 27)
 #define FATTR4_WORD1_FS_LAYOUT_TYPES    (1UL << 30)
 #define FATTR4_WORD2_LAYOUT_TYPES       (1UL << 0)
 #define FATTR4_WORD2_LAYOUT_BLKSIZE     (1UL << 1)
index 157d2bd..ea2f7e6 100644 (file)
@@ -287,4 +287,5 @@ struct nfs_server {
 #define NFS_CAP_XATTR          (1U << 28)
 #define NFS_CAP_READ_PLUS      (1U << 29)
 #define NFS_CAP_FS_LOCATIONS   (1U << 30)
+#define NFS_CAP_MOVEABLE       (1U << 31)
 #endif
index 2863e5a..0e3aa0f 100644 (file)
@@ -800,9 +800,17 @@ struct nfs_setattrargs {
        const struct nfs4_label         *label;
 };
 
+enum nfs4_acl_type {
+       NFS4ACL_NONE = 0,
+       NFS4ACL_ACL,
+       NFS4ACL_DACL,
+       NFS4ACL_SACL,
+};
+
 struct nfs_setaclargs {
        struct nfs4_sequence_args       seq_args;
        struct nfs_fh *                 fh;
+       enum nfs4_acl_type              acl_type;
        size_t                          acl_len;
        struct page **                  acl_pages;
 };
@@ -814,6 +822,7 @@ struct nfs_setaclres {
 struct nfs_getaclargs {
        struct nfs4_sequence_args       seq_args;
        struct nfs_fh *                 fh;
+       enum nfs4_acl_type              acl_type;
        size_t                          acl_len;
        struct page **                  acl_pages;
 };
@@ -822,6 +831,7 @@ struct nfs_getaclargs {
 #define NFS4_ACL_TRUNC         0x0001  /* ACL was truncated */
 struct nfs_getaclres {
        struct nfs4_sequence_res        seq_res;
+       enum nfs4_acl_type              acl_type;
        size_t                          acl_len;
        size_t                          acl_data_offset;
        int                             acl_flags;
@@ -1212,7 +1222,7 @@ struct nfs4_fs_location {
 
 #define NFS4_FS_LOCATIONS_MAXENTRIES 10
 struct nfs4_fs_locations {
-       struct nfs_fattr fattr;
+       struct nfs_fattr *fattr;
        const struct nfs_server *server;
        struct nfs4_pathname fs_path;
        int nlocations;
index c6199db..0f233b7 100644 (file)
  * void nodes_shift_right(dst, src, n) Shift right
  * void nodes_shift_left(dst, src, n)  Shift left
  *
- * int first_node(mask)                        Number lowest set bit, or MAX_NUMNODES
- * int next_node(node, mask)           Next node past 'node', or MAX_NUMNODES
- * int next_node_in(node, mask)                Next node past 'node', or wrap to first,
+ * unsigned int first_node(mask)       Number lowest set bit, or MAX_NUMNODES
+ * unsigend int next_node(node, mask)  Next node past 'node', or MAX_NUMNODES
+ * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
  *                                     or MAX_NUMNODES
- * int first_unset_node(mask)          First node not set in mask, or 
+ * unsigned int first_unset_node(mask) First node not set in mask, or
  *                                     MAX_NUMNODES
  *
  * nodemask_t nodemask_of_node(node)   Return nodemask with bit 'node' set
@@ -153,7 +153,7 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
 
 #define node_test_and_set(node, nodemask) \
                        __node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static inline bool __node_test_and_set(int node, nodemask_t *addr)
 {
        return test_and_set_bit(node, addr->bits);
 }
@@ -200,7 +200,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
 
 #define nodes_equal(src1, src2) \
                        __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
+static inline bool __nodes_equal(const nodemask_t *src1p,
                                        const nodemask_t *src2p, unsigned int nbits)
 {
        return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -208,7 +208,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
 
 #define nodes_intersects(src1, src2) \
                        __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
+static inline bool __nodes_intersects(const nodemask_t *src1p,
                                        const nodemask_t *src2p, unsigned int nbits)
 {
        return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -216,20 +216,20 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
 
 #define nodes_subset(src1, src2) \
                        __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
+static inline bool __nodes_subset(const nodemask_t *src1p,
                                        const nodemask_t *src2p, unsigned int nbits)
 {
        return bitmap_subset(src1p->bits, src2p->bits, nbits);
 }
 
 #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
 {
        return bitmap_empty(srcp->bits, nbits);
 }
 
 #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
 {
        return bitmap_full(srcp->bits, nbits);
 }
@@ -260,15 +260,15 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
           > MAX_NUMNODES, then the silly min_ts could be dropped. */
 
 #define first_node(src) __first_node(&(src))
-static inline int __first_node(const nodemask_t *srcp)
+static inline unsigned int __first_node(const nodemask_t *srcp)
 {
-       return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+       return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
 }
 
 #define next_node(n, src) __next_node((n), &(src))
-static inline int __next_node(int n, const nodemask_t *srcp)
+static inline unsigned int __next_node(int n, const nodemask_t *srcp)
 {
-       return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+       return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
 }
 
 /*
@@ -276,7 +276,7 @@ static inline int __next_node(int n, const nodemask_t *srcp)
  * the first node in src if needed.  Returns MAX_NUMNODES if src is empty.
  */
 #define next_node_in(n, src) __next_node_in((n), &(src))
-int __next_node_in(int node, const nodemask_t *srcp);
+unsigned int __next_node_in(int node, const nodemask_t *srcp);
 
 static inline void init_nodemask_of_node(nodemask_t *mask, int node)
 {
@@ -296,9 +296,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
 })
 
 #define first_unset_node(mask) __first_unset_node(&(mask))
-static inline int __first_unset_node(const nodemask_t *maskp)
+static inline unsigned int __first_unset_node(const nodemask_t *maskp)
 {
-       return min_t(int,MAX_NUMNODES,
+       return min_t(unsigned int, MAX_NUMNODES,
                        find_first_zero_bit(maskp->bits, MAX_NUMNODES));
 }
 
@@ -435,11 +435,11 @@ static inline int num_node_state(enum node_states state)
 
 #define first_online_node      first_node(node_states[N_ONLINE])
 #define first_memory_node      first_node(node_states[N_MEMORY])
-static inline int next_online_node(int nid)
+static inline unsigned int next_online_node(int nid)
 {
        return next_node(nid, node_states[N_ONLINE]);
 }
-static inline int next_memory_node(int nid)
+static inline unsigned int next_memory_node(int nid)
 {
        return next_node(nid, node_states[N_MEMORY]);
 }
index 87069b8..aef88c2 100644 (file)
@@ -150,6 +150,11 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
 extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
                struct notifier_block *nb);
 
+extern int atomic_notifier_chain_register_unique_prio(
+               struct atomic_notifier_head *nh, struct notifier_block *nb);
+extern int blocking_notifier_chain_register_unique_prio(
+               struct blocking_notifier_head *nh, struct notifier_block *nb);
+
 extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
                struct notifier_block *nb);
 extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
@@ -173,6 +178,8 @@ extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh
 extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
                unsigned long val_up, unsigned long val_down, void *v);
 
+extern bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh);
+
 #define NOTIFY_DONE            0x0000          /* Don't care */
 #define NOTIFY_OK              0x0001          /* Suits me */
 #define NOTIFY_STOP_MASK       0x8000          /* Don't call further */
index 5358a5f..fa092b9 100644 (file)
@@ -564,6 +564,15 @@ int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport,
                        void *lsreqbuf, u32 lsreqbuf_len);
 
 
+/*
+ * Routine called to get the appid field associated with request by the lldd
+ *
+ * If the return value is NULL : the user/libvirt has not set the appid to VM
+ * If the return value is non-zero: Returns the appid associated with VM
+ *
+ * @req: IO request from nvme fc to driver
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req);
 
 /*
  * ***************  LLDD FC-NVME Target/Subsystem API ***************
@@ -1048,5 +1057,10 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
 
 void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
                        struct nvmefc_tgt_fcp_req *fcpreq);
+/*
+ * add a define, visible to the compiler, that indicates support
+ * for feature. Allows for conditional compilation in LLDDs.
+ */
+#define NVME_FC_FEAT_UUID      0x0001
 
 #endif /* _NVME_FC_DRIVER_H */
index c0c0cef..980f9c9 100644 (file)
@@ -25,6 +25,7 @@ struct nvmem_cell_info {
        unsigned int            bytes;
        unsigned int            bit_offset;
        unsigned int            nbits;
+       struct device_node      *np;
 };
 
 /**
index 3266ac0..81a57b4 100644 (file)
@@ -512,7 +512,11 @@ struct pci_dev {
        u16             acs_cap;        /* ACS Capability offset */
        phys_addr_t     rom;            /* Physical address if not from BAR */
        size_t          romlen;         /* Length if not from BAR */
-       char            *driver_override; /* Driver name to force a match */
+       /*
+        * Driver name to force a match.  Do not set directly, because core
+        * frees it.  Use driver_set_override() to set or clear it.
+        */
+       const char      *driver_override;
 
        unsigned long   priv_flags;     /* Private flags for the PCI driver */
 
@@ -891,6 +895,13 @@ struct module;
  *              created once it is bound to the driver.
  * @driver:    Driver model structure.
  * @dynids:    List of dynamically added device IDs.
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ *             For most device drivers, no need to care about this flag
+ *             as long as all DMAs are handled through the kernel DMA API.
+ *             For some special ones, for example VFIO drivers, they know
+ *             how to manage the DMA themselves and set this flag so that
+ *             the IOMMU layer will allow them to setup and manage their
+ *             own I/O address space.
  */
 struct pci_driver {
        struct list_head        node;
@@ -909,6 +920,7 @@ struct pci_driver {
        const struct attribute_group **dev_groups;
        struct device_driver    driver;
        struct pci_dynids       dynids;
+       bool driver_managed_dma;
 };
 
 static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h
new file mode 100644 (file)
index 0000000..09931d0
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020,2022 NXP
+ */
+
+#ifndef __PHY_LVDS_H_
+#define __PHY_LVDS_H_
+
+/**
+ * struct phy_configure_opts_lvds - LVDS configuration set
+ * @bits_per_lane_and_dclk_cycle:      Number of bits per lane per differential
+ *                                     clock cycle.
+ * @differential_clk_rate:             Clock rate, in Hertz, of the LVDS
+ *                                     differential clock.
+ * @lanes:                             Number of active, consecutive,
+ *                                     data lanes, starting from lane 0,
+ *                                     used for the transmissions.
+ * @is_slave:                          Boolean, true if the phy is a slave
+ *                                     which works together with a master
+ *                                     phy to support dual link transmission,
+ *                                     otherwise a regular phy or a master phy.
+ *
+ * This structure is used to represent the configuration state of a LVDS phy.
+ */
+struct phy_configure_opts_lvds {
+       unsigned int    bits_per_lane_and_dclk_cycle;
+       unsigned long   differential_clk_rate;
+       unsigned int    lanes;
+       bool            is_slave;
+};
+
+#endif /* __PHY_LVDS_H_ */
index f3286f4..b141375 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/regulator/consumer.h>
 
 #include <linux/phy/phy-dp.h>
+#include <linux/phy/phy-lvds.h>
 #include <linux/phy/phy-mipi-dphy.h>
 
 struct phy;
@@ -57,10 +58,13 @@ enum phy_media {
  *             the MIPI_DPHY phy mode.
  * @dp:                Configuration set applicable for phys supporting
  *             the DisplayPort protocol.
+ * @lvds:      Configuration set applicable for phys supporting
+ *             the LVDS phy mode.
  */
 union phy_configure_opts {
        struct phy_configure_opts_mipi_dphy     mipi_dphy;
        struct phy_configure_opts_dp            dp;
+       struct phy_configure_opts_lvds          lvds;
 };
 
 /**
diff --git a/include/linux/platform_data/asoc-poodle.h b/include/linux/platform_data/asoc-poodle.h
new file mode 100644 (file)
index 0000000..2052fad
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_PLATFORM_DATA_POODLE_AUDIO
+#define __LINUX_PLATFORM_DATA_POODLE_AUDIO
+
+/* locomo is not a proper gpio driver, and uses its own api */
+struct poodle_audio_platform_data {
+       struct device   *locomo_dev;
+
+       int             gpio_amp_on;
+       int             gpio_mute_l;
+       int             gpio_mute_r;
+       int             gpio_232vcc_on;
+       int             gpio_jk_b;
+};
+
+#endif
similarity index 93%
rename from arch/arm/mach-pxa/include/mach/audio.h
rename to include/linux/platform_data/asoc-pxa.h
index 7beebf7..327454c 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_AUDIO_H__
-#define __ASM_ARCH_AUDIO_H__
+#ifndef __SOC_PXA_AUDIO_H__
+#define __SOC_PXA_AUDIO_H__
 
 #include <sound/core.h>
 #include <sound/pcm.h>
diff --git a/include/linux/platform_data/timer-ixp4xx.h b/include/linux/platform_data/timer-ixp4xx.h
deleted file mode 100644 (file)
index ee92ae7..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TIMER_IXP4XX_H
-#define __TIMER_IXP4XX_H
-
-#include <linux/ioport.h>
-
-void __init ixp4xx_timer_setup(resource_size_t timerbase,
-                              int timer_irq,
-                              unsigned int timer_freq);
-
-#endif
index b3d5747..6333bac 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/fb.h>
-#include <mach/regs-lcd.h>
 
 /*
  * Supported LCD connections
@@ -153,6 +152,27 @@ struct pxafb_mach_info {
 void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
 unsigned long pxafb_get_hsync_time(struct device *dev);
 
+/* smartpanel related */
+#define SMART_CMD_A0                    (0x1 << 8)
+#define SMART_CMD_READ_STATUS_REG       (0x0 << 9)
+#define SMART_CMD_READ_FRAME_BUFFER    ((0x0 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_COMMAND                 (0x1 << 9)
+#define SMART_CMD_WRITE_DATA           ((0x1 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_FRAME          ((0x2 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WAIT_FOR_VSYNC        (0x3 << 9)
+#define SMART_CMD_NOOP                  (0x4 << 9)
+#define SMART_CMD_INTERRUPT             (0x5 << 9)
+
+#define SMART_CMD(x)   (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
+#define SMART_DAT(x)   (SMART_CMD_WRITE_DATA | ((x) & 0xff))
+
+/* SMART_DELAY() is introduced for software controlled delay primitive which
+ * can be inserted between command sequences, unused command 0x6 is used here
+ * and delay ranges from 0ms ~ 255ms
+ */
+#define SMART_CMD_DELAY                (0x6 << 9)
+#define SMART_DELAY(ms)                (SMART_CMD_DELAY | ((ms) & 0xff))
+
 #ifdef CONFIG_FB_PXA_SMARTPANEL
 extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
 extern int pxafb_smart_flush(struct fb_info *info);
index 7c96f16..b0d5a25 100644 (file)
@@ -31,7 +31,11 @@ struct platform_device {
        struct resource *resource;
 
        const struct platform_device_id *id_entry;
-       char *driver_override; /* Driver name to force a match */
+       /*
+        * Driver name to force a match.  Do not set directly, because core
+        * frees it.  Use driver_set_override() to set or clear it.
+        */
+       const char *driver_override;
 
        /* MFD cell pointer */
        struct mfd_cell *mfd_cell;
@@ -210,6 +214,14 @@ struct platform_driver {
        struct device_driver driver;
        const struct platform_device_id *id_table;
        bool prevent_deferred_probe;
+       /*
+        * For most device drivers, no need to care about this flag as long as
+        * all DMAs are handled through the kernel DMA API. For some special
+        * ones, for example VFIO drivers, they know how to manage the DMA
+        * themselves and set this flag so that the IOMMU layer will allow them
+        * to setup and manage their own I/O address space.
+        */
+       bool driver_managed_dma;
 };
 
 #define to_platform_driver(drv)        (container_of((drv), struct platform_driver, \
@@ -328,8 +340,6 @@ extern int platform_pm_restore(struct device *dev);
 #define platform_pm_restore            NULL
 #endif
 
-extern int platform_dma_configure(struct device *dev);
-
 #ifdef CONFIG_PM_SLEEP
 #define USE_PLATFORM_PM_SLEEP_OPS \
        .suspend = platform_pm_suspend, \
index 70ec69d..871c9c4 100644 (file)
@@ -21,7 +21,6 @@
  * Callbacks for platform drivers to implement.
  */
 extern void (*pm_power_off)(void);
-extern void (*pm_power_off_prepare)(void);
 
 struct device; /* we have a circular dep with device.h */
 #ifdef CONFIG_VT_CONSOLE_SLEEP
index 0d85a63..6708b4e 100644 (file)
@@ -117,18 +117,25 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
                                              unsigned long freq,
                                              bool available);
-struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
-                                              unsigned int level);
-struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
-                                             unsigned int *level);
-
 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
                                              unsigned long *freq);
 struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
                                                     unsigned long u_volt);
 
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+                                              unsigned int level);
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+                                             unsigned int *level);
+
 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
                                             unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+                                          unsigned int *bw, int index);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+                                          unsigned int *bw, int index);
+
 void dev_pm_opp_put(struct dev_pm_opp *opp);
 
 int dev_pm_opp_add(struct device *dev, unsigned long freq,
@@ -243,12 +250,6 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
        return 0;
 }
 
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
-                                       unsigned long freq, bool available)
-{
-       return ERR_PTR(-EOPNOTSUPP);
-}
-
 static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
                                        unsigned int level)
 {
@@ -261,6 +262,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
        return ERR_PTR(-EOPNOTSUPP);
 }
 
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+                                       unsigned long freq, bool available)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
                                        unsigned long *freq)
 {
@@ -279,6 +286,18 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
        return ERR_PTR(-EOPNOTSUPP);
 }
 
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+                                       unsigned int *bw, int index)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+                                       unsigned int *bw, int index)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
 static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
 
 static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
index fc24d45..a5b429d 100644 (file)
@@ -451,6 +451,11 @@ static inline void *device_connection_find_match(struct device *dev,
        return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match);
 }
 
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+                                  const char *con_id, void *data,
+                                  devcon_match_fn_t match,
+                                  void **matches, unsigned int matches_len);
+
 /* -------------------------------------------------------------------------- */
 /* Software fwnode support - when HW description is incomplete or missing */
 
index db45095..c952c5b 100644 (file)
@@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
 
 #define PT_SEIZED      0x00010000      /* SEIZE used, enable new behavior */
 #define PT_PTRACED     0x00000001
-#define PT_DTRACE      0x00000002      /* delayed trace (used on um) */
 
 #define PT_OPT_FLAG_SHIFT      3
 /* PT_TRACE_* event enable flags */
@@ -47,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
 #define PT_EXITKILL            (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
 #define PT_SUSPEND_SECCOMP     (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
 
-/* single stepping state bits (used on ARM and PA-RISC) */
-#define PT_SINGLESTEP_BIT      31
-#define PT_SINGLESTEP          (1<<PT_SINGLESTEP_BIT)
-#define PT_BLOCKSTEP_BIT       30
-#define PT_BLOCKSTEP           (1<<PT_BLOCKSTEP_BIT)
-
 extern long arch_ptrace(struct task_struct *child, long request,
                        unsigned long addr, unsigned long data);
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
index a242964..e5d9ef8 100644 (file)
@@ -7,6 +7,7 @@
 #include <uapi/linux/reboot.h>
 
 struct device;
+struct sys_off_handler;
 
 #define SYS_DOWN       0x0001  /* Notify of system down */
 #define SYS_RESTART    SYS_DOWN
@@ -62,6 +63,95 @@ extern void machine_shutdown(void);
 struct pt_regs;
 extern void machine_crash_shutdown(struct pt_regs *);
 
+void do_kernel_power_off(void);
+
+/*
+ * sys-off handler API.
+ */
+
+/*
+ * Standard sys-off priority levels. Users are expected to set priorities
+ * relative to the standard levels.
+ *
+ * SYS_OFF_PRIO_PLATFORM:      Use this for platform-level handlers.
+ *
+ * SYS_OFF_PRIO_LOW:           Use this for handler of last resort.
+ *
+ * SYS_OFF_PRIO_DEFAULT:       Use this for normal handlers.
+ *
+ * SYS_OFF_PRIO_HIGH:          Use this for higher priority handlers.
+ *
+ * SYS_OFF_PRIO_FIRMWARE:      Use this if handler uses firmware call.
+ */
+#define SYS_OFF_PRIO_PLATFORM          -256
+#define SYS_OFF_PRIO_LOW               -128
+#define SYS_OFF_PRIO_DEFAULT           0
+#define SYS_OFF_PRIO_HIGH              192
+#define SYS_OFF_PRIO_FIRMWARE          224
+
+enum sys_off_mode {
+       /**
+        * @SYS_OFF_MODE_POWER_OFF_PREPARE:
+        *
+        * Handlers prepare system to be powered off. Handlers are
+        * allowed to sleep.
+        */
+       SYS_OFF_MODE_POWER_OFF_PREPARE,
+
+       /**
+        * @SYS_OFF_MODE_POWER_OFF:
+        *
+        * Handlers power-off system. Handlers are disallowed to sleep.
+        */
+       SYS_OFF_MODE_POWER_OFF,
+
+       /**
+        * @SYS_OFF_MODE_RESTART:
+        *
+        * Handlers restart system. Handlers are disallowed to sleep.
+        */
+       SYS_OFF_MODE_RESTART,
+};
+
+/**
+ * struct sys_off_data - sys-off callback argument
+ *
+ * @mode: Mode ID. Currently used only by the sys-off restart mode,
+ *        see enum reboot_mode for the available modes.
+ * @cb_data: User's callback data.
+ * @cmd: Command string. Currently used only by the sys-off restart mode,
+ *       NULL otherwise.
+ */
+struct sys_off_data {
+       int mode;
+       void *cb_data;
+       const char *cmd;
+};
+
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+                        int priority,
+                        int (*callback)(struct sys_off_data *data),
+                        void *cb_data);
+void unregister_sys_off_handler(struct sys_off_handler *handler);
+
+int devm_register_sys_off_handler(struct device *dev,
+                                 enum sys_off_mode mode,
+                                 int priority,
+                                 int (*callback)(struct sys_off_data *data),
+                                 void *cb_data);
+
+int devm_register_power_off_handler(struct device *dev,
+                                   int (*callback)(struct sys_off_data *data),
+                                   void *cb_data);
+
+int devm_register_restart_handler(struct device *dev,
+                                 int (*callback)(struct sys_off_data *data),
+                                 void *cb_data);
+
+int register_platform_power_off(void (*power_off)(void));
+void unregister_platform_power_off(void (*power_off)(void));
+
 /*
  * Architecture independent implemenations of sys_reboot commands.
  */
@@ -70,6 +160,7 @@ extern void kernel_restart_prepare(char *cmd);
 extern void kernel_restart(char *cmd);
 extern void kernel_halt(void);
 extern void kernel_power_off(void);
+extern bool kernel_can_power_off(void);
 
 void ctrl_alt_del(void);
 
index 02fa911..523c98b 100644 (file)
@@ -41,7 +41,9 @@ struct rpmsg_channel_info {
  * rpmsg_device - device that belong to the rpmsg bus
  * @dev: the device struct
  * @id: device id (used to match between rpmsg drivers and devices)
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ *                   because core frees it; use driver_set_override() to
+ *                   set or clear it.
  * @src: local address
  * @dst: destination address
  * @ept: the rpmsg endpoint of this channel
@@ -51,7 +53,7 @@ struct rpmsg_channel_info {
 struct rpmsg_device {
        struct device dev;
        struct rpmsg_device_id id;
-       char *driver_override;
+       const char *driver_override;
        u32 src;
        u32 dst;
        struct rpmsg_endpoint *ept;
@@ -163,6 +165,8 @@ static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val)
 
 #if IS_ENABLED(CONFIG_RPMSG)
 
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+                                  const char *driver_override);
 int rpmsg_register_device(struct rpmsg_device *rpdev);
 int rpmsg_unregister_device(struct device *parent,
                            struct rpmsg_channel_info *chinfo);
@@ -190,6 +194,12 @@ ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
 
 #else
 
+static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+                                                const char *driver_override)
+{
+       return -ENXIO;
+}
+
 static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
 {
        return -ENXIO;
index 3d780b4..534038d 100644 (file)
 #define PCR_SETTING_REG1               0x724
 #define PCR_SETTING_REG2               0x814
 #define PCR_SETTING_REG3               0x747
+#define PCR_SETTING_REG4               0x818
+#define PCR_SETTING_REG5               0x81C
+
 
 #define rtsx_pci_init_cmd(pcr)         ((pcr)->ci = 0)
 
index b89c857..c46f3a6 100644 (file)
@@ -103,7 +103,7 @@ struct task_group;
 /* Convenience macros for the sake of set_current_state: */
 #define TASK_KILLABLE                  (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
 #define TASK_STOPPED                   (TASK_WAKEKILL | __TASK_STOPPED)
-#define TASK_TRACED                    (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_TRACED                    __TASK_TRACED
 
 #define TASK_IDLE                      (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
 
@@ -118,11 +118,9 @@ struct task_group;
 
 #define task_is_running(task)          (READ_ONCE((task)->__state) == TASK_RUNNING)
 
-#define task_is_traced(task)           ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
-
-#define task_is_stopped(task)          ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
-
-#define task_is_stopped_or_traced(task)        ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_is_traced(task)           ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
+#define task_is_stopped(task)          ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
+#define task_is_stopped_or_traced(task)        ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
 
 /*
  * Special states are those that do not use the normal wait-loop pattern. See
index fa067de..68876d0 100644 (file)
@@ -19,6 +19,10 @@ struct task_struct;
 #define JOBCTL_TRAPPING_BIT    21      /* switching to TRACED */
 #define JOBCTL_LISTENING_BIT   22      /* ptracer is listening for events */
 #define JOBCTL_TRAP_FREEZE_BIT 23      /* trap for cgroup freezer */
+#define JOBCTL_PTRACE_FROZEN_BIT       24      /* frozen for ptrace */
+
+#define JOBCTL_STOPPED_BIT     26      /* do_signal_stop() */
+#define JOBCTL_TRACED_BIT      27      /* ptrace_stop() */
 
 #define JOBCTL_STOP_DEQUEUED   (1UL << JOBCTL_STOP_DEQUEUED_BIT)
 #define JOBCTL_STOP_PENDING    (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +32,10 @@ struct task_struct;
 #define JOBCTL_TRAPPING                (1UL << JOBCTL_TRAPPING_BIT)
 #define JOBCTL_LISTENING       (1UL << JOBCTL_LISTENING_BIT)
 #define JOBCTL_TRAP_FREEZE     (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_PTRACE_FROZEN   (1UL << JOBCTL_PTRACE_FROZEN_BIT)
+
+#define JOBCTL_STOPPED         (1UL << JOBCTL_STOPPED_BIT)
+#define JOBCTL_TRACED          (1UL << JOBCTL_TRACED_BIT)
 
 #define JOBCTL_TRAP_MASK       (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
 #define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
index 20ed5ba..cafbe03 100644 (file)
@@ -294,8 +294,10 @@ static inline int kernel_dequeue_signal(void)
 static inline void kernel_signal_stop(void)
 {
        spin_lock_irq(&current->sighand->siglock);
-       if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+       if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
+               current->jobctl |= JOBCTL_STOPPED;
                set_special_state(TASK_STOPPED);
+       }
        spin_unlock_irq(&current->sighand->siglock);
 
        schedule();
@@ -444,13 +446,23 @@ extern void calculate_sigpending(void);
 
 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
 
-static inline void signal_wake_up(struct task_struct *t, bool resume)
+static inline void signal_wake_up(struct task_struct *t, bool fatal)
 {
-       signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+       unsigned int state = 0;
+       if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
+               t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
+               state = TASK_WAKEKILL | __TASK_TRACED;
+       }
+       signal_wake_up_state(t, state);
 }
 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 {
-       signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+       unsigned int state = 0;
+       if (resume) {
+               t->jobctl &= ~JOBCTL_TRACED;
+               state = __TASK_TRACED;
+       }
+       signal_wake_up_state(t, state);
 }
 
 void task_join_group_stop(struct task_struct *task);
index 719c9a6..505aaf9 100644 (file)
@@ -32,6 +32,10 @@ struct kernel_clone_args {
        size_t set_tid_size;
        int cgroup;
        int io_thread;
+       int kthread;
+       int idle;
+       int (*fn)(void *);
+       void *fn_arg;
        struct cgroup *cgrp;
        struct css_set *cset;
 };
@@ -67,8 +71,7 @@ extern void fork_init(void);
 
 extern void release_task(struct task_struct * p);
 
-extern int copy_thread(unsigned long, unsigned long, unsigned long,
-                      struct task_struct *, unsigned long);
+extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
 
 extern void flush_thread(void);
 
@@ -89,6 +92,7 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
 struct task_struct *fork_idle(int);
 struct mm_struct *copy_init_mm(void);
 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
 int kernel_wait(pid_t pid, int *stat);
 
index d4828e6..cbd5070 100644 (file)
@@ -232,6 +232,7 @@ struct uart_port {
        int                     hw_stopped;             /* sw-assisted CTS flow state */
        unsigned int            mctrl;                  /* current modem ctrl settings */
        unsigned int            timeout;                /* character-based timeout */
+       unsigned int            frame_time;             /* frame timing in ns */
        unsigned int            type;                   /* port type */
        const struct uart_ops   *ops;
        unsigned int            custom_divisor;
index f6c3323..dec15f5 100644 (file)
 #define APPLE_S5L_UCON_DEFAULT         (S3C2410_UCON_TXIRQMODE | \
                                         S3C2410_UCON_RXIRQMODE | \
                                         S3C2410_UCON_RXFIFO_TOI)
+#define APPLE_S5L_UCON_MASK            (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+                                        APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
+                                        APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
 
 #define APPLE_S5L_UTRSTAT_RXTHRESH     (1<<4)
 #define APPLE_S5L_UTRSTAT_TXTHRESH     (1<<5)
index a6db6f2..3b98e7a 100644 (file)
@@ -282,7 +282,8 @@ extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
                                struct task_struct *p, enum pid_type type);
 extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
                               struct task_struct *p, enum pid_type type);
-extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern int send_signal_locked(int sig, struct kernel_siginfo *info,
+                             struct task_struct *p, enum pid_type type);
 extern int sigprocmask(int, sigset_t *, sigset_t *);
 extern void set_current_blocked(sigset_t *);
 extern void __set_current_blocked(const sigset_t *);
index 3af1428..9153e77 100644 (file)
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  *
  * SipHash: a fast short-input PRF
  * https://131002.net/siphash/
index da96f0d..d3d1055 100644 (file)
@@ -2696,7 +2696,14 @@ void *skb_pull(struct sk_buff *skb, unsigned int len);
 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
 {
        skb->len -= len;
-       BUG_ON(skb->len < skb->data_len);
+       if (unlikely(skb->len < skb->data_len)) {
+#if defined(CONFIG_DEBUG_NET)
+               skb->len += len;
+               pr_err("__skb_pull(len=%u)\n", len);
+               skb_dump(KERN_ERR, skb, false);
+#endif
+               BUG();
+       }
        return skb->data += len;
 }
 
similarity index 75%
rename from arch/arm/mach-pxa/include/mach/hardware.h
rename to include/linux/soc/pxa/cpu.h
index ee7eab1..5782450 100644 (file)
@@ -1,61 +1,16 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- *  arch/arm/mach-pxa/include/mach/hardware.h
- *
  *  Author:    Nicolas Pitre
  *  Created:   Jun 15, 2001
  *  Copyright: MontaVista Software Inc.
  */
 
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include <mach/addr-map.h>
-
-/*
- * Workarounds for at least 2 errata so far require this.
- * The mapping is set in mach-pxa/generic.c.
- */
-#define UNCACHED_PHYS_0                0xfe000000
-#define UNCACHED_PHYS_0_SIZE   0x00100000
-
-/*
- * Intel PXA2xx internal register mapping:
- *
- * 0x40000000 - 0x41ffffff <--> 0xf2000000 - 0xf3ffffff
- * 0x44000000 - 0x45ffffff <--> 0xf4000000 - 0xf5ffffff
- * 0x48000000 - 0x49ffffff <--> 0xf6000000 - 0xf7ffffff
- * 0x4c000000 - 0x4dffffff <--> 0xf8000000 - 0xf9ffffff
- * 0x50000000 - 0x51ffffff <--> 0xfa000000 - 0xfbffffff
- * 0x54000000 - 0x55ffffff <--> 0xfc000000 - 0xfdffffff
- * 0x58000000 - 0x59ffffff <--> 0xfe000000 - 0xffffffff
- *
- * Note that not all PXA2xx chips implement all those addresses, and the
- * kernel only maps the minimum needed range of this mapping.
- */
-#define io_v2p(x) (0x3c000000 + ((x) & 0x01ffffff) + (((x) & 0x0e000000) << 1))
-#define io_p2v(x) IOMEM(0xf2000000 + ((x) & 0x01ffffff) + (((x) & 0x1c000000) >> 1))
-
-#ifndef __ASSEMBLY__
-# define __REG(x)      (*((volatile u32 __iomem *)io_p2v(x)))
-
-/* With indexed regs we don't want to feed the index through io_p2v()
-   especially if it is a variable, otherwise horrible code will result. */
-# define __REG2(x,y)   \
-       (*(volatile u32 __iomem*)((u32)&__REG(x) + (y)))
-
-# define __PREG(x)     (io_v2p((u32)&(x)))
-
-#else
-
-# define __REG(x)      io_p2v(x)
-# define __PREG(x)     io_v2p(x)
-
-#endif
-
-#ifndef __ASSEMBLY__
+#ifndef __SOC_PXA_CPU_H
+#define __SOC_PXA_CPU_H
 
+#ifdef CONFIG_ARM
 #include <asm/cputype.h>
+#endif
 
 /*
  *   CPU     Stepping     CPU_ID         JTAG_ID
                __cpu_is_pxa93x(read_cpuid_id());       \
         })
 
-
-/*
- * return current memory and LCD clock frequency in units of 10kHz
- */
-extern unsigned int get_memclk_frequency_10khz(void);
-
 #endif
-
-#endif  /* _ASM_ARCH_HARDWARE_H */
similarity index 98%
rename from arch/arm/plat-pxa/include/plat/mfp.h
rename to include/linux/soc/pxa/mfp.h
index 3accaa9..39779cb 100644 (file)
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * arch/arm/plat-pxa/include/plat/mfp.h
- *
  *   Common Multi-Function Pin Definitions
  *
  * Copyright (C) 2007 Marvell International Ltd.
@@ -453,8 +451,8 @@ struct mfp_addr_map {
 
 #define MFP_ADDR_END   { MFP_PIN_INVALID, 0 }
 
-void __init mfp_init_base(void __iomem *mfpr_base);
-void __init mfp_init_addr(struct mfp_addr_map *map);
+void mfp_init_base(void __iomem *mfpr_base);
+void mfp_init_addr(struct mfp_addr_map *map);
 
 /*
  * mfp_{read, write}() - for direct read/write access to the MFPR register
diff --git a/include/linux/soc/pxa/smemc.h b/include/linux/soc/pxa/smemc.h
new file mode 100644 (file)
index 0000000..f1ffea2
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PXA_REGS_H
+#define __PXA_REGS_H
+
+#include <linux/types.h>
+
+void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio);
+void pxa_smemc_set_pcmcia_socket(int nr);
+int pxa2xx_smemc_get_sdram_rows(void);
+unsigned int pxa3xx_smemc_get_memclkdiv(void);
+void __iomem *pxa_smemc_get_mdrefr(void);
+
+#endif
index df70eb1..d361ba2 100644 (file)
@@ -138,6 +138,8 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
  *     for driver coldplugging, and in uevents used for hotplugging
  * @driver_override: If the name of a driver is written to this attribute, then
  *     the device will bind to the named driver and only the named driver.
+ *     Do not set directly, because core frees it; use driver_set_override() to
+ *     set or clear it.
  * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
  *     not using a GPIO line)
  * @word_delay: delay to be inserted between consecutive
index 124e13c..9f442d7 100644 (file)
@@ -198,15 +198,15 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
  * @local_property_block_len: Length of the @local_property_block in dwords
  * @remote_properties: Properties exported by the remote domain
  * @remote_property_block_gen: Generation of @remote_properties
- * @get_uuid_work: Work used to retrieve @remote_uuid
- * @uuid_retries: Number of times left @remote_uuid is requested before
- *               giving up
- * @get_properties_work: Work used to get remote domain properties
- * @properties_retries: Number of times left to read properties
+ * @state: Next XDomain discovery state to run
+ * @state_work: Work used to run the next state
+ * @state_retries: Number of retries remain for the state
  * @properties_changed_work: Work used to notify the remote domain that
  *                          our properties have changed
  * @properties_changed_retries: Number of times left to send properties
  *                             changed notification
+ * @bonding_possible: True if lane bonding is possible on local side
+ * @target_link_width: Target link width from the remote host
  * @link: Root switch link the remote domain is connected (ICM only)
  * @depth: Depth in the chain the remote domain is connected (ICM only)
  *
@@ -244,12 +244,13 @@ struct tb_xdomain {
        u32 local_property_block_len;
        struct tb_property_dir *remote_properties;
        u32 remote_property_block_gen;
-       struct delayed_work get_uuid_work;
-       int uuid_retries;
-       struct delayed_work get_properties_work;
-       int properties_retries;
+       int state;
+       struct delayed_work state_work;
+       int state_retries;
        struct delayed_work properties_changed_work;
        int properties_changed_retries;
+       bool bonding_possible;
+       u8 target_link_width;
        u8 link;
        u8 depth;
 };
@@ -465,6 +466,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
  * @msix_ida: Used to allocate MSI-X vectors for rings
  * @going_away: The host controller device is about to disappear so when
  *             this flag is set, avoid touching the hardware anymore.
+ * @iommu_dma_protection: An IOMMU will isolate external-facing ports.
  * @interrupt_work: Work scheduled to handle ring interrupt when no
  *                 MSI-X is used.
  * @hop_count: Number of rings (end point hops) supported by NHI.
@@ -479,6 +481,7 @@ struct tb_nhi {
        struct tb_ring **rx_rings;
        struct ida msix_ida;
        bool going_away;
+       bool iommu_dma_protection;
        struct work_struct interrupt_work;
        u32 hop_count;
        unsigned long quirks;
index 200b7b7..60bee86 100644 (file)
@@ -1969,21 +1969,10 @@ usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe)
        return eps[usb_pipeendpoint(pipe)];
 }
 
-/*-------------------------------------------------------------------------*/
-
-static inline __u16
-usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
+static inline u16 usb_maxpacket(struct usb_device *udev, int pipe)
 {
-       struct usb_host_endpoint        *ep;
-       unsigned                        epnum = usb_pipeendpoint(pipe);
+       struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe);
 
-       if (is_out) {
-               WARN_ON(usb_pipein(pipe));
-               ep = udev->ep_out[epnum];
-       } else {
-               WARN_ON(usb_pipeout(pipe));
-               ep = udev->ep_in[epnum];
-       }
        if (!ep)
                return 0;
 
@@ -1991,8 +1980,6 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
        return usb_endpoint_maxp(&ep->desc);
 }
 
-/* ----------------------------------------------------------------------- */
-
 /* translate USB error codes to codes user space understands */
 static inline int usb_translate_errors(int error_code)
 {
index 10fe57c..3ad58b7 100644 (file)
@@ -386,6 +386,7 @@ struct usb_gadget_ops {
  * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
  *     indicates that it supports LPM as per the LPM ECN & errata.
  * @irq: the interrupt number for device controller.
+ * @id_number: a unique ID number for ensuring that gadget names are distinct
  *
  * Gadgets have a mostly-portable "gadget driver" implementing device
  * functions, handling all usb configurations and interfaces.  Gadget
@@ -446,6 +447,7 @@ struct usb_gadget {
        unsigned                        connected:1;
        unsigned                        lpm_capable:1;
        int                             irq;
+       int                             id_number;
 };
 #define work_to_gadget(w)      (container_of((w), struct usb_gadget, work))
 
@@ -664,9 +666,9 @@ static inline int usb_gadget_check_config(struct usb_gadget *gadget)
  * @driver: Driver model state for this driver.
  * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
  *     this driver will be bound to any available UDC.
- * @pending: UDC core private data used for deferred probe of this driver.
- * @match_existing_only: If udc is not found, return an error and don't add this
- *      gadget driver to list of pending driver
+ * @match_existing_only: If udc is not found, return an error and fail
+ *     the driver registration
+ * @is_bound: Allow a driver to be bound to only one gadget
  *
  * Devices are disabled till a gadget driver successfully bind()s, which
  * means the driver will handle setup() requests needed to enumerate (and
@@ -729,8 +731,8 @@ struct usb_gadget_driver {
        struct device_driver    driver;
 
        char                    *udc_name;
-       struct list_head        pending;
        unsigned                match_existing_only:1;
+       bool                    is_bound:1;
 };
 
 
@@ -740,22 +742,30 @@ struct usb_gadget_driver {
 /* driver modules register and unregister, as usual.
  * these calls must be made in a context that can sleep.
  *
- * these will usually be implemented directly by the hardware-dependent
- * usb bus interface driver, which will only support a single driver.
+ * A gadget driver can be bound to only one gadget at a time.
  */
 
 /**
- * usb_gadget_probe_driver - probe a gadget driver
+ * usb_gadget_register_driver_owner - register a gadget driver
  * @driver: the driver being registered
+ * @owner: the driver module
+ * @mod_name: the driver module's build name
  * Context: can sleep
  *
  * Call this in your gadget driver's module initialization function,
- * to tell the underlying usb controller driver about your driver.
+ * to tell the underlying UDC controller driver about your driver.
  * The @bind() function will be called to bind it to a gadget before this
  * registration call returns.  It's expected that the @bind() function will
  * be in init sections.
+ *
+ * Use the macro defined below instead of calling this directly.
  */
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+               struct module *owner, const char *mod_name);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define usb_gadget_register_driver(driver) \
+       usb_gadget_register_driver_owner(driver, THIS_MODULE, KBUILD_MODNAME)
 
 /**
  * usb_gadget_unregister_driver - unregister a gadget driver
index 548a028..2c1fc92 100644 (file)
@@ -124,6 +124,7 @@ struct usb_hcd {
 #define HCD_FLAG_RH_RUNNING            5       /* root hub is running? */
 #define HCD_FLAG_DEAD                  6       /* controller has died? */
 #define HCD_FLAG_INTF_AUTHORIZED       7       /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER     8       /* Defer roothub registration */
 
        /* The flags can be tested using these macros; they are likely to
         * be slightly faster than test_bit().
@@ -134,6 +135,7 @@ struct usb_hcd {
 #define HCD_WAKEUP_PENDING(hcd)        ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
 #define HCD_RH_RUNNING(hcd)    ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
 #define HCD_DEAD(hcd)          ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
 
        /*
         * Specifies if interfaces are authorized by default
index a9d9957..ee57781 100644 (file)
@@ -8,11 +8,13 @@
 
 struct device;
 struct typec_mux;
+struct typec_mux_dev;
 struct typec_switch;
+struct typec_switch_dev;
 struct typec_altmode;
 struct fwnode_handle;
 
-typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw,
+typedef int (*typec_switch_set_fn_t)(struct typec_switch_dev *sw,
                                     enum typec_orientation orientation);
 
 struct typec_switch_desc {
@@ -32,13 +34,13 @@ static inline struct typec_switch *typec_switch_get(struct device *dev)
        return fwnode_typec_switch_get(dev_fwnode(dev));
 }
 
-struct typec_switch *
+struct typec_switch_dev *
 typec_switch_register(struct device *parent,
                      const struct typec_switch_desc *desc);
-void typec_switch_unregister(struct typec_switch *sw);
+void typec_switch_unregister(struct typec_switch_dev *sw);
 
-void typec_switch_set_drvdata(struct typec_switch *sw, void *data);
-void *typec_switch_get_drvdata(struct typec_switch *sw);
+void typec_switch_set_drvdata(struct typec_switch_dev *sw, void *data);
+void *typec_switch_get_drvdata(struct typec_switch_dev *sw);
 
 struct typec_mux_state {
        struct typec_altmode *alt;
@@ -46,7 +48,7 @@ struct typec_mux_state {
        void *data;
 };
 
-typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux,
+typedef int (*typec_mux_set_fn_t)(struct typec_mux_dev *mux,
                                  struct typec_mux_state *state);
 
 struct typec_mux_desc {
@@ -67,11 +69,11 @@ typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc)
        return fwnode_typec_mux_get(dev_fwnode(dev), desc);
 }
 
-struct typec_mux *
+struct typec_mux_dev *
 typec_mux_register(struct device *parent, const struct typec_mux_desc *desc);
-void typec_mux_unregister(struct typec_mux *mux);
+void typec_mux_unregister(struct typec_mux_dev *mux);
 
-void typec_mux_set_drvdata(struct typec_mux *mux, void *data);
-void *typec_mux_get_drvdata(struct typec_mux *mux);
+void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data);
+void *typec_mux_get_drvdata(struct typec_mux_dev *mux);
 
 #endif /* __USB_TYPEC_MUX */
index 8943a20..4700a88 100644 (file)
@@ -64,11 +64,15 @@ struct vdpa_mgmt_dev;
  * struct vdpa_device - representation of a vDPA device
  * @dev: underlying device
  * @dma_dev: the actual device that is performing DMA
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ *                   because core frees it; use driver_set_override() to
+ *                   set or clear it.
  * @config: the configuration ops for this device.
- * @cf_mutex: Protects get and set access to configuration layout.
+ * @cf_lock: Protects get and set access to configuration layout.
  * @index: device index
  * @features_valid: were features initialized? for legacy guests
+ * @ngroups: the number of virtqueue groups
+ * @nas: the number of address spaces
  * @use_va: indicate whether virtual address must be used by this device
  * @nvqs: maximum number of supported virtqueues
  * @mdev: management device pointer; caller must setup when registering device as part
@@ -79,12 +83,14 @@ struct vdpa_device {
        struct device *dma_dev;
        const char *driver_override;
        const struct vdpa_config_ops *config;
-       struct mutex cf_mutex; /* Protects get/set config */
+       struct rw_semaphore cf_lock; /* Protects get/set config */
        unsigned int index;
        bool features_valid;
        bool use_va;
        u32 nvqs;
        struct vdpa_mgmt_dev *mdev;
+       unsigned int ngroups;
+       unsigned int nas;
 };
 
 /**
@@ -172,6 +178,10 @@ struct vdpa_map_file {
  *                             for the device
  *                             @vdev: vdpa device
  *                             Returns virtqueue algin requirement
+ * @get_vq_group:              Get the group id for a specific virtqueue
+ *                             @vdev: vdpa device
+ *                             @idx: virtqueue index
+ *                             Returns u32: group id for this virtqueue
  * @get_device_features:       Get virtio features supported by the device
  *                             @vdev: vdpa device
  *                             Returns the virtio features support by the
@@ -232,10 +242,17 @@ struct vdpa_map_file {
  *                             @vdev: vdpa device
  *                             Returns the iova range supported by
  *                             the device.
+ * @set_group_asid:            Set address space identifier for a
+ *                             virtqueue group
+ *                             @vdev: vdpa device
+ *                             @group: virtqueue group
+ *                             @asid: address space id for this group
+ *                             Returns integer: success (0) or error (< 0)
  * @set_map:                   Set device memory mapping (optional)
  *                             Needed for device that using device
  *                             specific DMA translation (on-chip IOMMU)
  *                             @vdev: vdpa device
+ *                             @asid: address space identifier
  *                             @iotlb: vhost memory mapping to be
  *                             used by the vDPA
  *                             Returns integer: success (0) or error (< 0)
@@ -244,6 +261,7 @@ struct vdpa_map_file {
  *                             specific DMA translation (on-chip IOMMU)
  *                             and preferring incremental map.
  *                             @vdev: vdpa device
+ *                             @asid: address space identifier
  *                             @iova: iova to be mapped
  *                             @size: size of the area
  *                             @pa: physical address for the map
@@ -255,6 +273,7 @@ struct vdpa_map_file {
  *                             specific DMA translation (on-chip IOMMU)
  *                             and preferring incremental unmap.
  *                             @vdev: vdpa device
+ *                             @asid: address space identifier
  *                             @iova: iova to be unmapped
  *                             @size: size of the area
  *                             Returns integer: success (0) or error (< 0)
@@ -276,6 +295,9 @@ struct vdpa_config_ops {
                            const struct vdpa_vq_state *state);
        int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
                            struct vdpa_vq_state *state);
+       int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
+                                  struct sk_buff *msg,
+                                  struct netlink_ext_ack *extack);
        struct vdpa_notification_area
        (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
        /* vq irq is not expected to be changed once DRIVER_OK is set */
@@ -283,6 +305,7 @@ struct vdpa_config_ops {
 
        /* Device ops */
        u32 (*get_vq_align)(struct vdpa_device *vdev);
+       u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
        u64 (*get_device_features)(struct vdpa_device *vdev);
        int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
        u64 (*get_driver_features)(struct vdpa_device *vdev);
@@ -304,10 +327,14 @@ struct vdpa_config_ops {
        struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
 
        /* DMA ops */
-       int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
-       int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
-                      u64 pa, u32 perm, void *opaque);
-       int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
+       int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
+                      struct vhost_iotlb *iotlb);
+       int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
+                      u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
+       int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
+                        u64 iova, u64 size);
+       int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
+                             unsigned int asid);
 
        /* Free device resources */
        void (*free)(struct vdpa_device *vdev);
@@ -315,6 +342,7 @@ struct vdpa_config_ops {
 
 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
+                                       unsigned int ngroups, unsigned int nas,
                                        size_t size, const char *name,
                                        bool use_va);
 
@@ -325,17 +353,20 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
  * @member: the name of struct vdpa_device within the @dev_struct
  * @parent: the parent device
  * @config: the bus operations that is supported by this device
+ * @ngroups: the number of virtqueue groups supported by this device
+ * @nas: the number of address spaces
  * @name: name of the vdpa device
  * @use_va: indicate whether virtual address must be used by this device
  *
  * Return allocated data structure or ERR_PTR upon error
  */
-#define vdpa_alloc_device(dev_struct, member, parent, config, name, use_va)   \
-                         container_of(__vdpa_alloc_device( \
-                                      parent, config, \
-                                      sizeof(dev_struct) + \
+#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
+                         name, use_va) \
+                         container_of((__vdpa_alloc_device( \
+                                      parent, config, ngroups, nas, \
+                                      (sizeof(dev_struct) + \
                                       BUILD_BUG_ON_ZERO(offsetof( \
-                                      dev_struct, member)), name, use_va), \
+                                      dev_struct, member))), name, use_va)), \
                                       dev_struct, member)
 
 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
@@ -395,10 +426,10 @@ static inline int vdpa_reset(struct vdpa_device *vdev)
        const struct vdpa_config_ops *ops = vdev->config;
        int ret;
 
-       mutex_lock(&vdev->cf_mutex);
+       down_write(&vdev->cf_lock);
        vdev->features_valid = false;
        ret = ops->reset(vdev);
-       mutex_unlock(&vdev->cf_mutex);
+       up_write(&vdev->cf_lock);
        return ret;
 }
 
@@ -417,9 +448,9 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
 {
        int ret;
 
-       mutex_lock(&vdev->cf_mutex);
+       down_write(&vdev->cf_lock);
        ret = vdpa_set_features_unlocked(vdev, features);
-       mutex_unlock(&vdev->cf_mutex);
+       up_write(&vdev->cf_lock);
 
        return ret;
 }
@@ -463,7 +494,7 @@ struct vdpa_mgmtdev_ops {
 struct vdpa_mgmt_dev {
        struct device *device;
        const struct vdpa_mgmtdev_ops *ops;
-       const struct virtio_device_id *id_table;
+       struct virtio_device_id *id_table;
        u64 config_attr_mask;
        struct list_head list;
        u64 supported_features;
index 66dda06..aa888cc 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/poll.h>
 #include <uapi/linux/vfio.h>
 
+struct kvm;
+
 /*
  * VFIO devices can be placed in a set, this allows all devices to share this
  * structure and the VFIO core will provide a lock that is held around
@@ -34,6 +36,8 @@ struct vfio_device {
        struct vfio_device_set *dev_set;
        struct list_head dev_set_list;
        unsigned int migration_flags;
+       /* Driver must reference the kvm during open_device or never touch it */
+       struct kvm *kvm;
 
        /* Members below here are private, not for driver use */
        refcount_t refcount;
@@ -125,8 +129,6 @@ void vfio_uninit_group_dev(struct vfio_device *device);
 int vfio_register_group_dev(struct vfio_device *device);
 int vfio_register_emulated_iommu_dev(struct vfio_device *device);
 void vfio_unregister_group_dev(struct vfio_device *device);
-extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
-extern void vfio_device_put(struct vfio_device *device);
 
 int vfio_assign_device_set(struct vfio_device *device, void *set_id);
 
@@ -138,56 +140,36 @@ int vfio_mig_get_next_state(struct vfio_device *device,
 /*
  * External user API
  */
-extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
-extern void vfio_group_put_external_user(struct vfio_group *group);
-extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device
-                                                               *dev);
-extern bool vfio_external_group_match_file(struct vfio_group *group,
-                                          struct file *filep);
-extern int vfio_external_user_iommu_id(struct vfio_group *group);
-extern long vfio_external_check_extension(struct vfio_group *group,
-                                         unsigned long arg);
+extern struct iommu_group *vfio_file_iommu_group(struct file *file);
+extern bool vfio_file_enforced_coherent(struct file *file);
+extern void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
+extern bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
 
 #define VFIO_PIN_PAGES_MAX_ENTRIES     (PAGE_SIZE/sizeof(unsigned long))
 
-extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+extern int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
                          int npage, int prot, unsigned long *phys_pfn);
-extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+extern int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
                            int npage);
-
-extern int vfio_group_pin_pages(struct vfio_group *group,
-                               unsigned long *user_iova_pfn, int npage,
-                               int prot, unsigned long *phys_pfn);
-extern int vfio_group_unpin_pages(struct vfio_group *group,
-                                 unsigned long *user_iova_pfn, int npage);
-
-extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
+extern int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova,
                       void *data, size_t len, bool write);
 
-extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group);
-
 /* each type has independent events */
 enum vfio_notify_type {
        VFIO_IOMMU_NOTIFY = 0,
-       VFIO_GROUP_NOTIFY = 1,
 };
 
 /* events for VFIO_IOMMU_NOTIFY */
 #define VFIO_IOMMU_NOTIFY_DMA_UNMAP    BIT(0)
 
-/* events for VFIO_GROUP_NOTIFY */
-#define VFIO_GROUP_NOTIFY_SET_KVM      BIT(0)
-
-extern int vfio_register_notifier(struct device *dev,
+extern int vfio_register_notifier(struct vfio_device *device,
                                  enum vfio_notify_type type,
                                  unsigned long *required_events,
                                  struct notifier_block *nb);
-extern int vfio_unregister_notifier(struct device *dev,
+extern int vfio_unregister_notifier(struct vfio_device *device,
                                    enum vfio_notify_type type,
                                    struct notifier_block *nb);
 
-struct kvm;
-extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
 
 /*
  * Sub-module helpers
index 48f2dd3..23c176d 100644 (file)
@@ -227,8 +227,9 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
 void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev);
 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
-int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn);
 extern const struct pci_error_handlers vfio_pci_core_err_handlers;
+int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
+                                 int nr_virtfn);
 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
                unsigned long arg);
 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
index 2d0e2f5..e79a408 100644 (file)
@@ -36,6 +36,8 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last,
                          u64 addr, unsigned int perm);
 void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last);
 
+void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
+                     unsigned int flags);
 struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags);
 void vhost_iotlb_free(struct vhost_iotlb *iotlb);
 void vhost_iotlb_reset(struct vhost_iotlb *iotlb);
index 5464f39..d8fdf17 100644 (file)
@@ -131,6 +131,7 @@ void unregister_virtio_device(struct virtio_device *dev);
 bool is_virtio_device(struct device *dev);
 
 void virtio_break_device(struct virtio_device *dev);
+void __virtio_unbreak_device(struct virtio_device *dev);
 
 void virtio_config_changed(struct virtio_device *dev);
 #ifdef CONFIG_PM_SLEEP
index b341dd6..9a36051 100644 (file)
@@ -57,6 +57,11 @@ struct virtio_shm_region {
  *             include a NULL entry for vqs unused by driver
  *     Returns 0 on success or error status
  * @del_vqs: free virtqueues found by find_vqs().
+ * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
+ *      The function guarantees that all memory operations on the
+ *      queue before it are visible to the vring_interrupt() that is
+ *      called after it.
+ *      vdev: the virtio_device
  * @get_features: get the array of feature bits for this device.
  *     vdev: the virtio_device
  *     Returns the first 64 feature bits (all we currently need).
@@ -89,6 +94,7 @@ struct virtio_config_ops {
                        const char * const names[], const bool *ctx,
                        struct irq_affinity *desc);
        void (*del_vqs)(struct virtio_device *);
+       void (*synchronize_cbs)(struct virtio_device *);
        u64 (*get_features)(struct virtio_device *vdev);
        int (*finalize_features)(struct virtio_device *vdev);
        const char *(*bus_name)(struct virtio_device *vdev);
@@ -218,6 +224,25 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
 }
 
 /**
+ * virtio_synchronize_cbs - synchronize with virtqueue callbacks
+ * @vdev: the device
+ */
+static inline
+void virtio_synchronize_cbs(struct virtio_device *dev)
+{
+       if (dev->config->synchronize_cbs) {
+               dev->config->synchronize_cbs(dev);
+       } else {
+               /*
+                * A best effort fallback to synchronize with
+                * interrupts, preemption and softirq disabled
+                * regions. See comment above synchronize_rcu().
+                */
+               synchronize_rcu();
+       }
+}
+
+/**
  * virtio_device_ready - enable vq use in probe function
  * @vdev: the device
  *
@@ -230,7 +255,27 @@ void virtio_device_ready(struct virtio_device *dev)
 {
        unsigned status = dev->config->get_status(dev);
 
-       BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+       WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+
+       /*
+        * The virtio_synchronize_cbs() makes sure vring_interrupt()
+        * will see the driver specific setup if it sees vq->broken
+        * as false (even if the notifications come before DRIVER_OK).
+        */
+       virtio_synchronize_cbs(dev);
+       __virtio_unbreak_device(dev);
+       /*
+        * The transport should ensure the visibility of vq->broken
+        * before setting DRIVER_OK. See the comments for the transport
+        * specific set_status() method.
+        *
+        * A well behaved device will only notify a virtqueue after
+        * DRIVER_OK, this means the device should "see" the coherenct
+        * memory write that set vq->broken as false which is done by
+        * the driver when it sees DRIVER_OK, then the following
+        * driver's vring_interrupt() will see vq->broken as false so
+        * we won't lose any notification.
+        */
        dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
 }
 
index 462854f..332d2b0 100644 (file)
@@ -254,9 +254,6 @@ struct wm97xx_mach_ops {
        int (*acc_startup) (struct wm97xx *);
        void (*acc_shutdown) (struct wm97xx *);
 
-       /* interrupt mask control - required for accelerated operation */
-       void (*irq_enable) (struct wm97xx *, int enable);
-
        /* GPIO pin used for accelerated operation */
        int irq_gpio;
 
@@ -281,7 +278,6 @@ struct wm97xx {
        unsigned long ts_reader_min_interval; /* Minimum interval */
        unsigned int pen_irq;           /* Pen IRQ number in use */
        struct workqueue_struct *ts_workq;
-       struct work_struct pen_event_work;
        u16 acc_slot;                   /* AC97 slot used for acc touch data */
        u16 acc_rate;                   /* acc touch data rate */
        unsigned pen_is_down:1;         /* Pen is down */
index 7a4db8b..0e40c3d 100644 (file)
@@ -15,7 +15,7 @@ enum amt_msg_type {
        AMT_MSG_MEMBERSHIP_QUERY,
        AMT_MSG_MEMBERSHIP_UPDATE,
        AMT_MSG_MULTICAST_DATA,
-       AMT_MSG_TEARDOWM,
+       AMT_MSG_TEARDOWN,
        __AMT_MSG_MAX,
 };
 
index 0f9790c..a427a05 100644 (file)
@@ -228,6 +228,7 @@ typedef struct ax25_dev {
        ax25_dama_info          dama;
 #endif
        refcount_t              refcount;
+       bool device_up;
 } ax25_dev;
 
 typedef struct ax25_cb {
index 5a52a20..c0ea2a4 100644 (file)
@@ -155,21 +155,18 @@ struct bdaddr_list_with_irk {
        u8 local_irk[16];
 };
 
+/* Bitmask of connection flags */
 enum hci_conn_flags {
-       HCI_CONN_FLAG_REMOTE_WAKEUP,
-       HCI_CONN_FLAG_DEVICE_PRIVACY,
-
-       __HCI_CONN_NUM_FLAGS,
+       HCI_CONN_FLAG_REMOTE_WAKEUP = 1,
+       HCI_CONN_FLAG_DEVICE_PRIVACY = 2,
 };
-
-/* Make sure number of flags doesn't exceed sizeof(current_flags) */
-static_assert(__HCI_CONN_NUM_FLAGS < 32);
+typedef u8 hci_conn_flags_t;
 
 struct bdaddr_list_with_flags {
        struct list_head list;
        bdaddr_t bdaddr;
        u8 bdaddr_type;
-       DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
+       hci_conn_flags_t flags;
 };
 
 struct bt_uuid {
@@ -576,7 +573,7 @@ struct hci_dev {
        struct rfkill           *rfkill;
 
        DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
-       DECLARE_BITMAP(conn_flags, __HCI_CONN_NUM_FLAGS);
+       hci_conn_flags_t        conn_flags;
 
        __s8                    adv_tx_power;
        __u8                    adv_data[HCI_MAX_EXT_AD_LENGTH];
@@ -775,7 +772,7 @@ struct hci_conn_params {
 
        struct hci_conn *conn;
        bool explicit_connect;
-       DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
+       hci_conn_flags_t flags;
        u8  privacy_mode;
 };
 
index b14f4c0..cb904d3 100644 (file)
@@ -149,7 +149,9 @@ struct bond_params {
        struct reciprocal_value reciprocal_packets_per_slave;
        u16 ad_actor_sys_prio;
        u16 ad_user_port_key;
+#if IS_ENABLED(CONFIG_IPV6)
        struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
+#endif
 
        /* 2 bytes of padding : see ether_addr_equal_64bits() */
        u8 ad_actor_system[ETH_ALEN + 2];
@@ -503,12 +505,14 @@ static inline int bond_is_ip_target_ok(__be32 addr)
        return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int bond_is_ip6_target_ok(struct in6_addr *addr)
 {
        return !ipv6_addr_any(addr) &&
               !ipv6_addr_loopback(addr) &&
               !ipv6_addr_is_multicast(addr);
 }
+#endif
 
 /* Get the oldest arp which we've received on this slave for bond's
  * arp_targets.
@@ -746,6 +750,7 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
        return -1;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
 {
        int i;
@@ -758,6 +763,7 @@ static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr
 
        return -1;
 }
+#endif
 
 /* exported from bond_main.c */
 extern unsigned int bond_net_id;
index 6406cfe..37866c8 100644 (file)
@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
        int ret = NF_ACCEPT;
 
        if (ct) {
-               if (!nf_ct_is_confirmed(ct))
+               if (!nf_ct_is_confirmed(ct)) {
                        ret = __nf_conntrack_confirm(skb);
+
+                       if (ret == NF_ACCEPT)
+                               ct = (struct nf_conn *)skb_nfct(skb);
+               }
+
                if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct))
                        nf_ct_deliver_cached_events(ct);
        }
index 9bab396..d6cf511 100644 (file)
@@ -187,37 +187,17 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
                if (spin_trylock(&qdisc->seqlock))
                        return true;
 
-               /* Paired with smp_mb__after_atomic() to make sure
-                * STATE_MISSED checking is synchronized with clearing
-                * in pfifo_fast_dequeue().
+               /* No need to insist if the MISSED flag was already set.
+                * Note that test_and_set_bit() also gives us memory ordering
+                * guarantees wrt potential earlier enqueue() and below
+                * spin_trylock(), both of which are necessary to prevent races
                 */
-               smp_mb__before_atomic();
-
-               /* If the MISSED flag is set, it means other thread has
-                * set the MISSED flag before second spin_trylock(), so
-                * we can return false here to avoid multi cpus doing
-                * the set_bit() and second spin_trylock() concurrently.
-                */
-               if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
+               if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
                        return false;
 
-               /* Set the MISSED flag before the second spin_trylock(),
-                * if the second spin_trylock() return false, it means
-                * other cpu holding the lock will do dequeuing for us
-                * or it will see the MISSED flag set after releasing
-                * lock and reschedule the net_tx_action() to do the
-                * dequeuing.
-                */
-               set_bit(__QDISC_STATE_MISSED, &qdisc->state);
-
-               /* spin_trylock() only has load-acquire semantic, so use
-                * smp_mb__after_atomic() to ensure STATE_MISSED is set
-                * before doing the second spin_trylock().
-                */
-               smp_mb__after_atomic();
-
-               /* Retry again in case other CPU may not see the new flag
-                * after it releases the lock at the end of qdisc_run_end().
+               /* Try to take the lock again to make sure that we will either
+                * grab it or the CPU that still has it will see MISSED set
+                * when testing it in qdisc_run_end()
                 */
                return spin_trylock(&qdisc->seqlock);
        }
@@ -229,6 +209,12 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
        if (qdisc->flags & TCQ_F_NOLOCK) {
                spin_unlock(&qdisc->seqlock);
 
+               /* spin_unlock() only has store-release semantic. The unlock
+                * and test_bit() ordering is a store-load ordering, so a full
+                * memory barrier is needed here.
+                */
+               smp_mb();
+
                if (unlikely(test_bit(__QDISC_STATE_MISSED,
                                      &qdisc->state)))
                        __netif_schedule(qdisc);
diff --git a/include/pcmcia/soc_common.h b/include/pcmcia/soc_common.h
new file mode 100644 (file)
index 0000000..d4f18f4
--- /dev/null
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <pcmcia/ss.h>
+
+struct module;
+struct cpufreq_freqs;
+
+struct soc_pcmcia_regulator {
+       struct regulator        *reg;
+       bool                    on;
+};
+
+struct pcmcia_state {
+  unsigned detect: 1,
+            ready: 1,
+             bvd1: 1,
+             bvd2: 1,
+           wrprot: 1,
+            vs_3v: 1,
+            vs_Xv: 1;
+};
+
+/*
+ * This structure encapsulates per-socket state which we might need to
+ * use when responding to a Card Services query of some kind.
+ */
+struct soc_pcmcia_socket {
+       struct pcmcia_socket    socket;
+
+       /*
+        * Info from low level handler
+        */
+       unsigned int            nr;
+       struct clk              *clk;
+
+       /*
+        * Core PCMCIA state
+        */
+       const struct pcmcia_low_level *ops;
+
+       unsigned int            status;
+       socket_state_t          cs_state;
+
+       unsigned short          spd_io[MAX_IO_WIN];
+       unsigned short          spd_mem[MAX_WIN];
+       unsigned short          spd_attr[MAX_WIN];
+
+       struct resource         res_skt;
+       struct resource         res_io;
+       struct resource         res_io_io;
+       struct resource         res_mem;
+       struct resource         res_attr;
+
+       struct {
+               int             gpio;
+               struct gpio_desc *desc;
+               unsigned int    irq;
+               const char      *name;
+       } stat[6];
+#define SOC_STAT_CD            0       /* Card detect */
+#define SOC_STAT_BVD1          1       /* BATDEAD / IOSTSCHG */
+#define SOC_STAT_BVD2          2       /* BATWARN / IOSPKR */
+#define SOC_STAT_RDY           3       /* Ready / Interrupt */
+#define SOC_STAT_VS1           4       /* Voltage sense 1 */
+#define SOC_STAT_VS2           5       /* Voltage sense 2 */
+
+       struct gpio_desc        *gpio_reset;
+       struct gpio_desc        *gpio_bus_enable;
+       struct soc_pcmcia_regulator vcc;
+       struct soc_pcmcia_regulator vpp;
+
+       unsigned int            irq_state;
+
+#ifdef CONFIG_CPU_FREQ
+       struct notifier_block   cpufreq_nb;
+#endif
+       struct timer_list       poll_timer;
+       struct list_head        node;
+       void *driver_data;
+};
+
+
+struct pcmcia_low_level {
+       struct module *owner;
+
+       /* first socket in system */
+       int first;
+       /* nr of sockets */
+       int nr;
+
+       int (*hw_init)(struct soc_pcmcia_socket *);
+       void (*hw_shutdown)(struct soc_pcmcia_socket *);
+
+       void (*socket_state)(struct soc_pcmcia_socket *, struct pcmcia_state *);
+       int (*configure_socket)(struct soc_pcmcia_socket *, const socket_state_t *);
+
+       /*
+        * Enable card status IRQs on (re-)initialisation.  This can
+        * be called at initialisation, power management event, or
+        * pcmcia event.
+        */
+       void (*socket_init)(struct soc_pcmcia_socket *);
+
+       /*
+        * Disable card status IRQs and PCMCIA bus on suspend.
+        */
+       void (*socket_suspend)(struct soc_pcmcia_socket *);
+
+       /*
+        * Hardware specific timing routines.
+        * If provided, the get_timing routine overrides the SOC default.
+        */
+       unsigned int (*get_timing)(struct soc_pcmcia_socket *, unsigned int, unsigned int);
+       int (*set_timing)(struct soc_pcmcia_socket *);
+       int (*show_timing)(struct soc_pcmcia_socket *, char *);
+
+#ifdef CONFIG_CPU_FREQ
+       /*
+        * CPUFREQ support.
+        */
+       int (*frequency_change)(struct soc_pcmcia_socket *, unsigned long, struct cpufreq_freqs *);
+#endif
+};
+
+
+
index 95100cf..0a6f8da 100644 (file)
@@ -52,4 +52,8 @@ extern int pxa2xx_ac97_hw_resume(void);
 extern int pxa2xx_ac97_hw_probe(struct platform_device *dev);
 extern void pxa2xx_ac97_hw_remove(struct platform_device *dev);
 
+/* modem registers, used by touchscreen driver */
+u32 pxa2xx_ac97_read_modr(void);
+u32 pxa2xx_ac97_read_misr(void);
+
 #endif
index bea654a..513e889 100644 (file)
@@ -15,10 +15,6 @@ TRACE_DEFINE_ENUM(NODE);
 TRACE_DEFINE_ENUM(DATA);
 TRACE_DEFINE_ENUM(META);
 TRACE_DEFINE_ENUM(META_FLUSH);
-TRACE_DEFINE_ENUM(INMEM);
-TRACE_DEFINE_ENUM(INMEM_DROP);
-TRACE_DEFINE_ENUM(INMEM_INVALIDATE);
-TRACE_DEFINE_ENUM(INMEM_REVOKE);
 TRACE_DEFINE_ENUM(IPU);
 TRACE_DEFINE_ENUM(OPU);
 TRACE_DEFINE_ENUM(HOT);
@@ -59,10 +55,6 @@ TRACE_DEFINE_ENUM(CP_RESIZE);
                { DATA,         "DATA" },                               \
                { META,         "META" },                               \
                { META_FLUSH,   "META_FLUSH" },                         \
-               { INMEM,        "INMEM" },                              \
-               { INMEM_DROP,   "INMEM_DROP" },                         \
-               { INMEM_INVALIDATE,     "INMEM_INVALIDATE" },           \
-               { INMEM_REVOKE, "INMEM_REVOKE" },                       \
                { IPU,          "IN-PLACE" },                           \
                { OPU,          "OUT-OF-PLACE" })
 
@@ -652,19 +644,22 @@ TRACE_EVENT(f2fs_background_gc,
 
 TRACE_EVENT(f2fs_gc_begin,
 
-       TP_PROTO(struct super_block *sb, bool sync, bool background,
+       TP_PROTO(struct super_block *sb, int gc_type, bool no_bg_gc,
+                       unsigned int nr_free_secs,
                        long long dirty_nodes, long long dirty_dents,
                        long long dirty_imeta, unsigned int free_sec,
                        unsigned int free_seg, int reserved_seg,
                        unsigned int prefree_seg),
 
-       TP_ARGS(sb, sync, background, dirty_nodes, dirty_dents, dirty_imeta,
+       TP_ARGS(sb, gc_type, no_bg_gc, nr_free_secs, dirty_nodes,
+               dirty_dents, dirty_imeta,
                free_sec, free_seg, reserved_seg, prefree_seg),
 
        TP_STRUCT__entry(
                __field(dev_t,          dev)
-               __field(bool,           sync)
-               __field(bool,           background)
+               __field(int,            gc_type)
+               __field(bool,           no_bg_gc)
+               __field(unsigned int,   nr_free_secs)
                __field(long long,      dirty_nodes)
                __field(long long,      dirty_dents)
                __field(long long,      dirty_imeta)
@@ -676,8 +671,9 @@ TRACE_EVENT(f2fs_gc_begin,
 
        TP_fast_assign(
                __entry->dev            = sb->s_dev;
-               __entry->sync           = sync;
-               __entry->background     = background;
+               __entry->gc_type        = gc_type;
+               __entry->no_bg_gc       = no_bg_gc;
+               __entry->nr_free_secs   = nr_free_secs;
                __entry->dirty_nodes    = dirty_nodes;
                __entry->dirty_dents    = dirty_dents;
                __entry->dirty_imeta    = dirty_imeta;
@@ -687,12 +683,13 @@ TRACE_EVENT(f2fs_gc_begin,
                __entry->prefree_seg    = prefree_seg;
        ),
 
-       TP_printk("dev = (%d,%d), sync = %d, background = %d, nodes = %lld, "
-               "dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
+       TP_printk("dev = (%d,%d), gc_type = %s, no_background_GC = %d, nr_free_secs = %u, "
+               "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
                "rsv_seg:%d, prefree_seg:%u",
                show_dev(__entry->dev),
-               __entry->sync,
-               __entry->background,
+               show_gc_type(__entry->gc_type),
+               (__entry->gc_type == BG_GC) ? __entry->no_bg_gc : -1,
+               __entry->nr_free_secs,
                __entry->dirty_nodes,
                __entry->dirty_dents,
                __entry->dirty_imeta,
@@ -1285,20 +1282,6 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
        TP_ARGS(page, type)
 );
 
-DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
-
-       TP_PROTO(struct page *page, int type),
-
-       TP_ARGS(page, type)
-);
-
-DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
-
-       TP_PROTO(struct page *page, int type),
-
-       TP_ARGS(page, type)
-);
-
 TRACE_EVENT(f2fs_filemap_fault,
 
        TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
@@ -2063,6 +2046,100 @@ TRACE_EVENT(f2fs_fiemap,
                __entry->ret)
 );
 
+DECLARE_EVENT_CLASS(f2fs__rw_start,
+
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+                       pid_t pid, char *pathname, char *command),
+
+       TP_ARGS(inode, offset, bytes, pid, pathname, command),
+
+       TP_STRUCT__entry(
+               __string(pathbuf, pathname)
+               __field(loff_t, offset)
+               __field(int, bytes)
+               __field(loff_t, i_size)
+               __string(cmdline, command)
+               __field(pid_t, pid)
+               __field(ino_t, ino)
+       ),
+
+       TP_fast_assign(
+               /*
+                * Replace the spaces in filenames and cmdlines
+                * because this screws up the tooling that parses
+                * the traces.
+                */
+               __assign_str(pathbuf, pathname);
+               (void)strreplace(__get_str(pathbuf), ' ', '_');
+               __entry->offset = offset;
+               __entry->bytes = bytes;
+               __entry->i_size = i_size_read(inode);
+               __assign_str(cmdline, command);
+               (void)strreplace(__get_str(cmdline), ' ', '_');
+               __entry->pid = pid;
+               __entry->ino = inode->i_ino;
+       ),
+
+       TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+               " pid %d, i_size %llu, ino %lu",
+               __get_str(pathbuf), __entry->offset, __entry->bytes,
+               __get_str(cmdline), __entry->pid, __entry->i_size,
+               (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(f2fs__rw_end,
+
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+       TP_ARGS(inode, offset, bytes),
+
+       TP_STRUCT__entry(
+               __field(ino_t,  ino)
+               __field(loff_t, offset)
+               __field(int,    bytes)
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->offset         = offset;
+               __entry->bytes          = bytes;
+       ),
+
+       TP_printk("ino %lu, offset %llu, bytes %d",
+               (unsigned long) __entry->ino,
+               __entry->offset, __entry->bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_dataread_start,
+
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+               pid_t pid, char *pathname, char *command),
+
+       TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_dataread_end,
+
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+       TP_ARGS(inode, offset, bytes)
+);
+
+DEFINE_EVENT(f2fs__rw_start, f2fs_datawrite_start,
+
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+               pid_t pid, char *pathname, char *command),
+
+       TP_ARGS(inode, offset, bytes, pid, pathname, command)
+);
+
+DEFINE_EVENT(f2fs__rw_end, f2fs_datawrite_end,
+
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+
+       TP_ARGS(inode, offset, bytes)
+);
+
 #endif /* _TRACE_F2FS_H */
 
  /* This part must be outside protection */
diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/thermal_pressure.h
new file mode 100644 (file)
index 0000000..b686802
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_pressure
+
+#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_PRESSURE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(thermal_pressure_update,
+       TP_PROTO(int cpu, unsigned long thermal_pressure),
+       TP_ARGS(cpu, thermal_pressure),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, thermal_pressure)
+               __field(int, cpu)
+       ),
+
+       TP_fast_assign(
+               __entry->thermal_pressure = thermal_pressure;
+               __entry->cpu = cpu;
+       ),
+
+       TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure)
+);
+#endif /* _TRACE_THERMAL_PRESSURE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index ecd0f5b..f13d37b 100644 (file)
 #define F_GETSIG       11      /* for sockets. */
 #endif
 
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
 #ifndef F_GETLK64
 #define F_GETLK64      12      /*  using 'struct flock64' */
 #define F_SETLK64      13
 #define F_SETLKW64     14
 #endif
-#endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
 
 #ifndef F_SETOWN_EX
 #define F_SETOWN_EX    15
@@ -192,25 +192,19 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
-#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
 struct flock {
        short   l_type;
        short   l_whence;
        __kernel_off_t  l_start;
        __kernel_off_t  l_len;
        __kernel_pid_t  l_pid;
-       __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+       __ARCH_FLOCK_EXTRA_SYSID
 #endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+       __ARCH_FLOCK_PAD
 #endif
+};
 
 struct flock64 {
        short  l_type;
@@ -218,8 +212,9 @@ struct flock64 {
        __kernel_loff_t l_start;
        __kernel_loff_t l_len;
        __kernel_pid_t  l_pid;
+#ifdef __ARCH_FLOCK64_PAD
        __ARCH_FLOCK64_PAD
-};
 #endif
+};
 
 #endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/include/uapi/asm-generic/termbits-common.h b/include/uapi/asm-generic/termbits-common.h
new file mode 100644 (file)
index 0000000..4d084fe
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_TERMBITS_COMMON_H
+#define __ASM_GENERIC_TERMBITS_COMMON_H
+
+typedef unsigned char  cc_t;
+typedef unsigned int   speed_t;
+
+/* c_iflag bits */
+#define IGNBRK 0x001                   /* Ignore break condition */
+#define BRKINT 0x002                   /* Signal interrupt on break */
+#define IGNPAR 0x004                   /* Ignore characters with parity errors */
+#define PARMRK 0x008                   /* Mark parity and framing errors */
+#define INPCK  0x010                   /* Enable input parity check */
+#define ISTRIP 0x020                   /* Strip 8th bit off characters */
+#define INLCR  0x040                   /* Map NL to CR on input */
+#define IGNCR  0x080                   /* Ignore CR */
+#define ICRNL  0x100                   /* Map CR to NL on input */
+#define IXANY  0x800                   /* Any character will restart after stop */
+
+/* c_oflag bits */
+#define OPOST  0x01                    /* Perform output processing */
+#define OCRNL  0x08
+#define ONOCR  0x10
+#define ONLRET 0x20
+#define OFILL  0x40
+#define OFDEL  0x80
+
+/* c_cflag bit meaning */
+/* Common CBAUD rates */
+#define     B0         0x00000000      /* hang up */
+#define    B50         0x00000001
+#define    B75         0x00000002
+#define   B110         0x00000003
+#define   B134         0x00000004
+#define   B150         0x00000005
+#define   B200         0x00000006
+#define   B300         0x00000007
+#define   B600         0x00000008
+#define  B1200         0x00000009
+#define  B1800         0x0000000a
+#define  B2400         0x0000000b
+#define  B4800         0x0000000c
+#define  B9600         0x0000000d
+#define B19200         0x0000000e
+#define B38400         0x0000000f
+#define EXTA           B19200
+#define EXTB           B38400
+
+#define CMSPAR         0x40000000      /* mark or space (stick) parity */
+#define CRTSCTS                0x80000000      /* flow control */
+
+#define IBSHIFT                16              /* Shift from CBAUD to CIBAUD */
+
+/* tcflow() ACTION argument and TCXONC use these */
+#define TCOOFF         0               /* Suspend output */
+#define TCOON          1               /* Restart suspended output */
+#define TCIOFF         2               /* Send a STOP character */
+#define TCION          3               /* Send a START character */
+
+/* tcflush() QUEUE_SELECTOR argument and TCFLSH use these */
+#define TCIFLUSH       0               /* Discard data received but not yet read */
+#define TCOFLUSH       1               /* Discard data written but not yet sent */
+#define TCIOFLUSH      2               /* Discard all pending data */
+
+#endif /* __ASM_GENERIC_TERMBITS_COMMON_H */
index 2fbaf9a..890ef29 100644 (file)
@@ -2,10 +2,8 @@
 #ifndef __ASM_GENERIC_TERMBITS_H
 #define __ASM_GENERIC_TERMBITS_H
 
-#include <linux/posix_types.h>
+#include <asm-generic/termbits-common.h>
 
-typedef unsigned char  cc_t;
-typedef unsigned int   speed_t;
 typedef unsigned int   tcflag_t;
 
 #define NCCS 19
@@ -41,156 +39,107 @@ struct ktermios {
 };
 
 /* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
+#define VINTR           0
+#define VQUIT           1
+#define VERASE          2
+#define VKILL           3
+#define VEOF            4
+#define VTIME           5
+#define VMIN            6
+#define VSWTC           7
+#define VSTART          8
+#define VSTOP           9
+#define VSUSP          10
+#define VEOL           11
+#define VREPRINT       12
+#define VDISCARD       13
+#define VWERASE                14
+#define VLNEXT         15
+#define VEOL2          16
 
 /* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK  0000020
-#define ISTRIP 0000040
-#define INLCR  0000100
-#define IGNCR  0000200
-#define ICRNL  0000400
-#define IUCLC  0001000
-#define IXON   0002000
-#define IXANY  0004000
-#define IXOFF  0010000
-#define IMAXBEL        0020000
-#define IUTF8  0040000
+#define IUCLC  0x0200
+#define IXON   0x0400
+#define IXOFF  0x1000
+#define IMAXBEL        0x2000
+#define IUTF8  0x4000
 
 /* c_oflag bits */
-#define OPOST  0000001
-#define OLCUC  0000002
-#define ONLCR  0000004
-#define OCRNL  0000010
-#define ONOCR  0000020
-#define ONLRET 0000040
-#define OFILL  0000100
-#define OFDEL  0000200
-#define NLDLY  0000400
-#define   NL0  0000000
-#define   NL1  0000400
-#define CRDLY  0003000
-#define   CR0  0000000
-#define   CR1  0001000
-#define   CR2  0002000
-#define   CR3  0003000
-#define TABDLY 0014000
-#define   TAB0 0000000
-#define   TAB1 0004000
-#define   TAB2 0010000
-#define   TAB3 0014000
-#define   XTABS        0014000
-#define BSDLY  0020000
-#define   BS0  0000000
-#define   BS1  0020000
-#define VTDLY  0040000
-#define   VT0  0000000
-#define   VT1  0040000
-#define FFDLY  0100000
-#define   FF0  0000000
-#define   FF1  0100000
+#define OLCUC  0x00002
+#define ONLCR  0x00004
+#define NLDLY  0x00100
+#define   NL0  0x00000
+#define   NL1  0x00100
+#define CRDLY  0x00600
+#define   CR0  0x00000
+#define   CR1  0x00200
+#define   CR2  0x00400
+#define   CR3  0x00600
+#define TABDLY 0x01800
+#define   TAB0 0x00000
+#define   TAB1 0x00800
+#define   TAB2 0x01000
+#define   TAB3 0x01800
+#define   XTABS        0x01800
+#define BSDLY  0x02000
+#define   BS0  0x00000
+#define   BS1  0x02000
+#define VTDLY  0x04000
+#define   VT0  0x00000
+#define   VT1  0x04000
+#define FFDLY  0x08000
+#define   FF0  0x00000
+#define   FF1  0x08000
 
 /* c_cflag bit meaning */
-#define CBAUD  0010017
-#define  B0    0000000         /* hang up */
-#define  B50   0000001
-#define  B75   0000002
-#define  B110  0000003
-#define  B134  0000004
-#define  B150  0000005
-#define  B200  0000006
-#define  B300  0000007
-#define  B600  0000010
-#define  B1200 0000011
-#define  B1800 0000012
-#define  B2400 0000013
-#define  B4800 0000014
-#define  B9600 0000015
-#define  B19200        0000016
-#define  B38400        0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE  0000060
-#define   CS5  0000000
-#define   CS6  0000020
-#define   CS7  0000040
-#define   CS8  0000060
-#define CSTOPB 0000100
-#define CREAD  0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL  0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define    BOTHER 0010000
-#define    B57600 0010001
-#define   B115200 0010002
-#define   B230400 0010003
-#define   B460800 0010004
-#define   B500000 0010005
-#define   B576000 0010006
-#define   B921600 0010007
-#define  B1000000 0010010
-#define  B1152000 0010011
-#define  B1500000 0010012
-#define  B2000000 0010013
-#define  B2500000 0010014
-#define  B3000000 0010015
-#define  B3500000 0010016
-#define  B4000000 0010017
-#define CIBAUD   002003600000  /* input baud rate */
-#define CMSPAR   010000000000  /* mark or space (stick) parity */
-#define CRTSCTS          020000000000  /* flow control */
-
-#define IBSHIFT          16            /* Shift from CBAUD to CIBAUD */
+#define CBAUD          0x0000100f
+#define CSIZE          0x00000030
+#define   CS5          0x00000000
+#define   CS6          0x00000010
+#define   CS7          0x00000020
+#define   CS8          0x00000030
+#define CSTOPB         0x00000040
+#define CREAD          0x00000080
+#define PARENB         0x00000100
+#define PARODD         0x00000200
+#define HUPCL          0x00000400
+#define CLOCAL         0x00000800
+#define CBAUDEX                0x00001000
+#define BOTHER         0x00001000
+#define     B57600     0x00001001
+#define    B115200     0x00001002
+#define    B230400     0x00001003
+#define    B460800     0x00001004
+#define    B500000     0x00001005
+#define    B576000     0x00001006
+#define    B921600     0x00001007
+#define   B1000000     0x00001008
+#define   B1152000     0x00001009
+#define   B1500000     0x0000100a
+#define   B2000000     0x0000100b
+#define   B2500000     0x0000100c
+#define   B3000000     0x0000100d
+#define   B3500000     0x0000100e
+#define   B4000000     0x0000100f
+#define CIBAUD         0x100f0000      /* input baud rate */
 
 /* c_lflag bits */
-#define ISIG   0000001
-#define ICANON 0000002
-#define XCASE  0000004
-#define ECHO   0000010
-#define ECHOE  0000020
-#define ECHOK  0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL        0001000
-#define ECHOPRT        0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC        0200000
-
-/* tcflow() and TCXONC use these */
-#define        TCOOFF          0
-#define        TCOON           1
-#define        TCIOFF          2
-#define        TCION           3
-
-/* tcflush() and TCFLSH use these */
-#define        TCIFLUSH        0
-#define        TCOFLUSH        1
-#define        TCIOFLUSH       2
+#define ISIG   0x00001
+#define ICANON 0x00002
+#define XCASE  0x00004
+#define ECHO   0x00008
+#define ECHOE  0x00010
+#define ECHOK  0x00020
+#define ECHONL 0x00040
+#define NOFLSH 0x00080
+#define TOSTOP 0x00100
+#define ECHOCTL        0x00200
+#define ECHOPRT        0x00400
+#define ECHOKE 0x00800
+#define FLUSHO 0x01000
+#define PENDIN 0x04000
+#define IEXTEN 0x08000
+#define EXTPROC        0x10000
 
 /* tcsetattr uses these */
 #define        TCSANOW         0
index 1c48b0a..45fa180 100644 (file)
@@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
 
 /* kernel/ptrace.c */
 #define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
 
 /* kernel/sched/core.c */
 #define __NR_sched_setparam 118
@@ -779,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_kexec_file_load 294
 __SYSCALL(__NR_kexec_file_load,     sys_kexec_file_load)
 /* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
 #define __NR_clock_gettime64 403
 __SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
 #define __NR_clock_settime64 404
index 9a1d210..18d3246 100644 (file)
@@ -140,6 +140,10 @@ extern "C" {
  * not require GTT memory accounting
  */
 #define AMDGPU_GEM_CREATE_PREEMPTIBLE          (1 << 11)
+/* Flag that BO can be discarded under memory pressure without keeping the
+ * content.
+ */
+#define AMDGPU_GEM_CREATE_DISCARDABLE          (1 << 12)
 
 struct drm_amdgpu_gem_create_in  {
        /** the requested memory size */
@@ -529,6 +533,8 @@ struct drm_amdgpu_gem_op {
 #define AMDGPU_VM_MTYPE_UC             (4 << 5)
 /* Use Read Write MTYPE instead of default MTYPE */
 #define AMDGPU_VM_MTYPE_RW             (5 << 5)
+/* don't allocate MALL */
+#define AMDGPU_VM_PAGE_NOALLOC         (1 << 9)
 
 struct drm_amdgpu_gem_va {
        /** GEM object handle */
@@ -988,6 +994,8 @@ struct drm_amdgpu_info_vbios {
 #define AMDGPU_VRAM_TYPE_DDR4  8
 #define AMDGPU_VRAM_TYPE_GDDR6 9
 #define AMDGPU_VRAM_TYPE_DDR5  10
+#define AMDGPU_VRAM_TYPE_LPDDR4 11
+#define AMDGPU_VRAM_TYPE_LPDDR5 12
 
 struct drm_amdgpu_info_device {
        /** PCI Device ID */
index 11157fa..986333c 100644 (file)
@@ -236,6 +236,21 @@ struct binder_frozen_status_info {
        __u32            async_recv;
 };
 
+/* struct binder_extened_error - extended error information
+ * @id:                identifier for the failed operation
+ * @command:   command as defined by binder_driver_return_protocol
+ * @param:     parameter holding a negative errno value
+ *
+ * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information
+ * returned by the driver upon a failed operation. Userspace can pull this
+ * data to properly handle specific error scenarios.
+ */
+struct binder_extended_error {
+       __u32   id;
+       __u32   command;
+       __s32   param;
+};
+
 #define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
 #define BINDER_SET_IDLE_TIMEOUT                _IOW('b', 3, __s64)
 #define BINDER_SET_MAX_THREADS         _IOW('b', 5, __u32)
@@ -249,6 +264,7 @@ struct binder_frozen_status_info {
 #define BINDER_FREEZE                  _IOW('b', 14, struct binder_freeze_info)
 #define BINDER_GET_FROZEN_INFO         _IOWR('b', 15, struct binder_frozen_status_info)
 #define BINDER_ENABLE_ONEWAY_SPAM_DETECTION    _IOW('b', 16, __u32)
+#define BINDER_GET_EXTENDED_ERROR      _IOWR('b', 17, struct binder_extended_error)
 
 /*
  * NOTE: Two special error codes you should check for when calling
@@ -289,7 +305,7 @@ struct binder_transaction_data {
        /* General information about the transaction. */
        __u32           flags;
        __kernel_pid_t  sender_pid;
-       __kernel_uid_t  sender_euid;
+       __kernel_uid32_t        sender_euid;
        binder_size_t   data_size;      /* number of bytes of data */
        binder_size_t   offsets_size;   /* number of bytes of offsets */
 
index 8eda133..7c1dc81 100644 (file)
@@ -439,6 +439,8 @@ enum {
 #define AUDIT_ARCH_UNICORE     (EM_UNICORE|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_X86_64      (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_XTENSA      (EM_XTENSA)
+#define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 
 #define AUDIT_PERM_EXEC                1
 #define AUDIT_PERM_WRITE       2
index f47e853..ef38c2b 100644 (file)
@@ -51,6 +51,7 @@
 #define EM_RISCV       243     /* RISC-V */
 #define EM_BPF         247     /* Linux BPF - in-kernel virtual machine */
 #define EM_CSKY                252     /* C-SKY */
+#define EM_LOONGARCH   258     /* LoongArch */
 #define EM_FRV         0x5441  /* Fujitsu FR-V */
 
 /*
index c4abd09..2b9f5e9 100644 (file)
@@ -438,6 +438,11 @@ typedef struct elf64_shdr {
 #define NT_MIPS_DSP    0x800           /* MIPS DSP ASE registers */
 #define NT_MIPS_FP_MODE        0x801           /* MIPS floating-point mode */
 #define NT_MIPS_MSA    0x802           /* MIPS SIMD registers */
+#define NT_LOONGARCH_CPUCFG    0xa00   /* LoongArch CPU config registers */
+#define NT_LOONGARCH_CSR       0xa01   /* LoongArch control and status registers */
+#define NT_LOONGARCH_LSX       0xa02   /* LoongArch Loongson SIMD Extension registers */
+#define NT_LOONGARCH_LASX      0xa03   /* LoongArch Loongson Advanced SIMD Extension registers */
+#define NT_LOONGARCH_LBT       0xa04   /* LoongArch Loongson Binary Translation registers */
 
 /* Note types with note name "GNU" */
 #define NT_GNU_PROPERTY_TYPE_0 5
index eaaea3d..cb9966d 100644 (file)
@@ -66,6 +66,8 @@ struct gpiochip_info {
  * @GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN: line has pull-down bias enabled
  * @GPIO_V2_LINE_FLAG_BIAS_DISABLED: line has bias disabled
  * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME: line events contain REALTIME timestamps
+ * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE: line events contain timestamps from
+ * hardware timestamp engine
  */
 enum gpio_v2_line_flag {
        GPIO_V2_LINE_FLAG_USED                  = _BITULL(0),
@@ -80,6 +82,7 @@ enum gpio_v2_line_flag {
        GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN        = _BITULL(9),
        GPIO_V2_LINE_FLAG_BIAS_DISABLED         = _BITULL(10),
        GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME  = _BITULL(11),
+       GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE       = _BITULL(12),
 };
 
 /**
index 53e7dae..776e027 100644 (file)
@@ -47,6 +47,7 @@ struct io_uring_sqe {
                __u32           unlink_flags;
                __u32           hardlink_flags;
                __u32           xattr_flags;
+               __u32           close_flags;
        };
        __u64   user_data;      /* data to be passed back at completion time */
        /* pack this to avoid bogus arm OABI complaints */
@@ -259,6 +260,11 @@ enum io_uring_op {
 #define IORING_ACCEPT_MULTISHOT        (1U << 0)
 
 /*
+ * close flags, store in sqe->close_flags
+ */
+#define IORING_CLOSE_FD_AND_FILE_SLOT  (1U << 0)
+
+/*
  * IO completion data structure (Completion Queue Entry)
  */
 struct io_uring_cqe {
index 549ddea..03cdbe7 100644 (file)
@@ -194,7 +194,7 @@ enum {
        DEVCONF_IOAM6_ID,
        DEVCONF_IOAM6_ID_WIDE,
        DEVCONF_NDISC_EVICT_NOCARRIER,
-       DEVCONF_ACCEPT_UNSOLICITED_NA,
+       DEVCONF_ACCEPT_UNTRACKED_NA,
        DEVCONF_MAX
 };
 
index fb7e2ef..981016e 100644 (file)
@@ -43,6 +43,7 @@
 #define KEXEC_ARCH_MIPS    ( 8 << 16)
 #define KEXEC_ARCH_AARCH64 (183 << 16)
 #define KEXEC_ARCH_RISCV   (243 << 16)
+#define KEXEC_ARCH_LOONGARCH   (258 << 16)
 
 /* The artificial cap on the number of segments passed to kexec_load. */
 #define KEXEC_SEGMENT_MAX 16
index 51d6bb2..d3fcd3b 100644 (file)
@@ -31,7 +31,7 @@ struct __kernel_sockaddr_storage {
 
 #define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK)
 
-#define SOCK_TXREHASH_DEFAULT  ((u8)-1)
+#define SOCK_TXREHASH_DEFAULT  255
 #define SOCK_TXREHASH_DISABLED 0
 #define SOCK_TXREHASH_ENABLED  1
 
index 7361541..a7f5b11 100644 (file)
@@ -34,7 +34,7 @@
  */
 
 
-#define TASKSTATS_VERSION      12
+#define TASKSTATS_VERSION      13
 #define TS_COMM_LEN            32      /* should be >= TASK_COMM_LEN
                                         * in linux/sched.h */
 
@@ -194,6 +194,10 @@ struct taskstats {
        __u64   ac_exe_dev;     /* program binary device ID */
        __u64   ac_exe_inode;   /* program binary inode number */
        /* v12 end */
+
+       /* v13: Delay waiting for write-protect copy */
+       __u64    wpcopy_count;
+       __u64    wpcopy_delay_total;
 };
 
 
index 1061d8d..25c55ca 100644 (file)
@@ -18,6 +18,7 @@ enum vdpa_command {
        VDPA_CMD_DEV_DEL,
        VDPA_CMD_DEV_GET,               /* can dump */
        VDPA_CMD_DEV_CONFIG_GET,        /* can dump */
+       VDPA_CMD_DEV_VSTATS_GET,
 };
 
 enum vdpa_attr {
@@ -46,6 +47,11 @@ enum vdpa_attr {
        VDPA_ATTR_DEV_NEGOTIATED_FEATURES,      /* u64 */
        VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,          /* u32 */
        VDPA_ATTR_DEV_SUPPORTED_FEATURES,       /* u64 */
+
+       VDPA_ATTR_DEV_QUEUE_INDEX,              /* u32 */
+       VDPA_ATTR_DEV_VENDOR_ATTR_NAME,         /* string */
+       VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,        /* u64 */
+
        /* new attributes must be added above here */
        VDPA_ATTR_MAX,
 };
index fea8606..733a1cd 100644 (file)
@@ -643,7 +643,7 @@ enum {
 };
 
 /**
- * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
  *                                           struct vfio_pci_hot_reset_info)
  *
  * Return: 0 on success, -errno on failure:
@@ -770,7 +770,7 @@ struct vfio_device_ioeventfd {
 #define VFIO_DEVICE_IOEVENTFD          _IO(VFIO_TYPE, VFIO_BASE + 16)
 
 /**
- * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17,
+ * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
  *                            struct vfio_device_feature)
  *
  * Get, set, or probe feature data of the device.  The feature is selected
index 5d99e7c..cab645d 100644 (file)
 
 /* Set or get vhost backend capability */
 
-/* Use message type V2 */
-#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
-/* IOTLB can accept batching hints */
-#define VHOST_BACKEND_F_IOTLB_BATCH  0x2
-
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
 
 /* Get the valid iova range */
 #define VHOST_VDPA_GET_IOVA_RANGE      _IOR(VHOST_VIRTIO, 0x78, \
                                             struct vhost_vdpa_iova_range)
-
 /* Get the config size */
 #define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
 
 /* Get the count of all virtqueues */
 #define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
 
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM       _IOR(VHOST_VIRTIO, 0x81, __u32)
+
+/* Get the number of address spaces. */
+#define VHOST_VDPA_GET_AS_NUM          _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
+
+/* Get the group for a virtqueue: read index, write group in num,
+ * The virtqueue index is stored in the index field of
+ * vhost_vring_state. The group for this specific virtqueue is
+ * returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_GROUP     _IOWR(VHOST_VIRTIO, 0x7B,       \
+                                             struct vhost_vring_state)
+/* Set the ASID for a virtqueue group. The group index is stored in
+ * the index field of vhost_vring_state, the ASID associated with this
+ * group is stored at num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_SET_GROUP_ASID      _IOW(VHOST_VIRTIO, 0x7C, \
+                                            struct vhost_vring_state)
+
 #endif
index f7f6a3a..634cee4 100644 (file)
@@ -87,7 +87,7 @@ struct vhost_msg {
 
 struct vhost_msg_v2 {
        __u32 type;
-       __u32 reserved;
+       __u32 asid;
        union {
                struct vhost_iotlb_msg iotlb;
                __u8 padding[64];
@@ -153,4 +153,13 @@ struct vhost_vdpa_iova_range {
 /* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
 #define VHOST_NET_F_VIRTIO_NET_HDR 27
 
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH  0x2
+/* IOTLB can accept address space identifier through V2 type of IOTLB
+ * message
+ */
+#define VHOST_BACKEND_F_IOTLB_ASID  0x3
+
 #endif
index 1d6b4f0..52540d5 100644 (file)
@@ -348,33 +348,41 @@ enum hl_server_type {
  *                            The address which accessing it caused the razwi.
  *                            Razwi initiator.
  *                            Razwi cause, was it a page fault or MMU access error.
+ * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
+ * HL_INFO_REGISTER_EVENTFD   - Register eventfd for event notifications.
+ * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
+ * HL_INFO_GET_EVENTS         - Retrieve the last occurred events
  */
-#define HL_INFO_HW_IP_INFO             0
-#define HL_INFO_HW_EVENTS              1
-#define HL_INFO_DRAM_USAGE             2
-#define HL_INFO_HW_IDLE                        3
-#define HL_INFO_DEVICE_STATUS          4
-#define HL_INFO_DEVICE_UTILIZATION     6
-#define HL_INFO_HW_EVENTS_AGGREGATE    7
-#define HL_INFO_CLK_RATE               8
-#define HL_INFO_RESET_COUNT            9
-#define HL_INFO_TIME_SYNC              10
-#define HL_INFO_CS_COUNTERS            11
-#define HL_INFO_PCI_COUNTERS           12
-#define HL_INFO_CLK_THROTTLE_REASON    13
-#define HL_INFO_SYNC_MANAGER           14
-#define HL_INFO_TOTAL_ENERGY           15
-#define HL_INFO_PLL_FREQUENCY          16
-#define HL_INFO_POWER                  17
-#define HL_INFO_OPEN_STATS             18
-#define HL_INFO_DRAM_REPLACED_ROWS     21
-#define HL_INFO_DRAM_PENDING_ROWS      22
-#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23
-#define HL_INFO_CS_TIMEOUT_EVENT       24
-#define HL_INFO_RAZWI_EVENT            25
-
-#define HL_INFO_VERSION_MAX_LEN                128
-#define HL_INFO_CARD_NAME_MAX_LEN      16
+#define HL_INFO_HW_IP_INFO                     0
+#define HL_INFO_HW_EVENTS                      1
+#define HL_INFO_DRAM_USAGE                     2
+#define HL_INFO_HW_IDLE                                3
+#define HL_INFO_DEVICE_STATUS                  4
+#define HL_INFO_DEVICE_UTILIZATION             6
+#define HL_INFO_HW_EVENTS_AGGREGATE            7
+#define HL_INFO_CLK_RATE                       8
+#define HL_INFO_RESET_COUNT                    9
+#define HL_INFO_TIME_SYNC                      10
+#define HL_INFO_CS_COUNTERS                    11
+#define HL_INFO_PCI_COUNTERS                   12
+#define HL_INFO_CLK_THROTTLE_REASON            13
+#define HL_INFO_SYNC_MANAGER                   14
+#define HL_INFO_TOTAL_ENERGY                   15
+#define HL_INFO_PLL_FREQUENCY                  16
+#define HL_INFO_POWER                          17
+#define HL_INFO_OPEN_STATS                     18
+#define HL_INFO_DRAM_REPLACED_ROWS             21
+#define HL_INFO_DRAM_PENDING_ROWS              22
+#define HL_INFO_LAST_ERR_OPEN_DEV_TIME         23
+#define HL_INFO_CS_TIMEOUT_EVENT               24
+#define HL_INFO_RAZWI_EVENT                    25
+#define HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES       26
+#define HL_INFO_REGISTER_EVENTFD               28
+#define HL_INFO_UNREGISTER_EVENTFD             29
+#define HL_INFO_GET_EVENTS                     30
+
+#define HL_INFO_VERSION_MAX_LEN                        128
+#define HL_INFO_CARD_NAME_MAX_LEN              16
 
 /**
  * struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC
@@ -409,6 +417,7 @@ enum hl_server_type {
  * @dram_page_size: The DRAM physical page size.
  * @number_of_user_interrupts: The number of interrupts that are available to the userspace
  *                             application to use. Relevant for Gaudi2 and later.
+ * @device_mem_alloc_default_page_size: default page size used in device memory allocation.
  */
 struct hl_info_hw_ip_info {
        __u64 sram_base_address;
@@ -436,6 +445,8 @@ struct hl_info_hw_ip_info {
        __u32 reserved3;
        __u16 number_of_user_interrupts;
        __u16 pad2;
+       __u64 reserved4;
+       __u64 device_mem_alloc_default_page_size;
 };
 
 struct hl_info_dram_usage {
@@ -538,10 +549,15 @@ struct hl_pll_frequency_info {
  * struct hl_open_stats_info - device open statistics information
  * @open_counter: ever growing counter, increased on each successful dev open
  * @last_open_period_ms: duration (ms) device was open last time
+ * @is_compute_ctx_active: Whether there is an active compute context executing
+ * @compute_ctx_in_release: true if the current compute context is being released
  */
 struct hl_open_stats_info {
        __u64 open_counter;
        __u64 last_open_period_ms;
+       __u8 is_compute_ctx_active;
+       __u8 compute_ctx_in_release;
+       __u8 pad[6];
 };
 
 /**
@@ -640,6 +656,15 @@ struct hl_info_razwi_event {
        __u8 pad[2];
 };
 
+/**
+ * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information.
+ * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size
+ *                      (e.g. 0x2100000 means that 1MB and 32MB pages are supported).
+ */
+struct hl_info_dev_memalloc_page_sizes {
+       __u64 page_order_bitmask;
+};
+
 enum gaudi_dcores {
        HL_GAUDI_WS_DCORE,
        HL_GAUDI_WN_DCORE,
@@ -660,6 +685,7 @@ enum gaudi_dcores {
  * @period_ms: Period value, in milliseconds, for utilization rate in range 100ms - 1000ms in 100 ms
  *             resolution. Currently not in use.
  * @pll_index: Index as defined in hl_<asic type>_pll_index enumeration.
+ * @eventfd: event file descriptor for event notifications.
  * @pad: Padding to 64 bit.
  */
 struct hl_info_args {
@@ -672,6 +698,7 @@ struct hl_info_args {
                __u32 ctx_id;
                __u32 period_ms;
                __u32 pll_index;
+               __u32 eventfd;
        };
 
        __u32 pad;
@@ -1115,6 +1142,7 @@ union hl_wait_cs_args {
 #define HL_MEM_SHARED          0x2
 #define HL_MEM_USERPTR         0x4
 #define HL_MEM_FORCE_HINT      0x8
+#define HL_MEM_PREFETCH                0x40
 
 /**
  * structure hl_mem_in - structure that handle input args for memory IOCTL
@@ -1371,6 +1399,13 @@ struct hl_debug_args {
 };
 
 /*
+ * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command
+ *
+ * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event
+ */
+#define HL_NOTIFIER_EVENT_TPC_ASSERT  (1 << 0)
+
+/*
  * Various information operations such as:
  * - H/W IP information
  * - Current dram usage
similarity index 100%
rename from drivers/scsi/ufs/ufs.h
rename to include/ufs/ufs.h
similarity index 99%
rename from drivers/scsi/ufs/ufshcd.h
rename to include/ufs/ufshcd.h
index 2b0f344..a922714 100644 (file)
 #include <linux/devfreq.h>
 #include <linux/pm_runtime.h>
 #include <scsi/scsi_device.h>
-#include "unipro.h"
-#include "ufs.h"
-#include "ufs_quirks.h"
-#include "ufshci.h"
+#include <ufs/unipro.h>
+#include <ufs/ufs.h>
+#include <ufs/ufs_quirks.h>
+#include <ufs/ufshci.h>
 
 #define UFSHCD "ufshcd"
 
index 005eae1..72f94cc 100644 (file)
 #define WAIT_DMA_GUI_IDLE                         (1 << 9)
 #define WAIT_2D_IDLECLEAN                         (1 << 16)
 
-/* SURFACE_CNTL bit consants */
+/* SURFACE_CNTL bit constants */
 #define SURF_TRANSLATION_DIS                      (1 << 8)
 #define NONSURF_AP0_SWP_16BPP                     (1 << 20)
 #define NONSURF_AP0_SWP_32BPP                     (1 << 21)
index 7e199c6..e5c84ff 100644 (file)
@@ -109,9 +109,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
        return __set_phys_to_machine(pfn, mfn);
 }
 
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
 bool xen_arch_need_swiotlb(struct device *dev,
                           phys_addr_t phys,
                           dma_addr_t dev_addr);
index 7d0f2f0..527c990 100644 (file)
@@ -101,10 +101,10 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref);
  * Eventually end access through the given grant reference, and once that
  * access has been ended, free the given page too.  Access will be ended
  * immediately iff the grant entry is not in use, otherwise it will happen
- * some time later.  page may be 0, in which case no freeing will occur.
+ * some time later.  page may be NULL, in which case no freeing will occur.
  * Note that the granted page might still be accessed (read or write) by the
  * other side after gnttab_end_foreign_access() returns, so even if page was
- * specified as 0 it is not allowed to just reuse the page for other
+ * specified as NULL it is not allowed to just reuse the page for other
  * purposes immediately. gnttab_end_foreign_access() will take an additional
  * reference to the granted page in this case, which is dropped only after
  * the grant is no longer in use.
@@ -112,7 +112,7 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref);
  * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
  * via free_pages_exact()) in order to avoid high order pages.
  */
-void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
+void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
 
 /*
  * End access through the given grant reference, iff the grant entry is
index 251aac1..c984afc 100644 (file)
@@ -378,6 +378,10 @@ config SYSVIPC_SYSCTL
        depends on SYSCTL
        default y
 
+config SYSVIPC_COMPAT
+       def_bool y
+       depends on COMPAT && SYSVIPC
+
 config POSIX_MQUEUE
        bool "POSIX Message Queues"
        depends on NET
index dc84cf7..18229cf 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/namei.h>
 #include <linux/init_syscalls.h>
+#include <linux/task_work.h>
 #include <linux/umh.h>
 
 static __initdata bool csum_present;
@@ -727,6 +728,7 @@ done:
        initrd_end = 0;
 
        flush_delayed_fput();
+       task_work_run();
 }
 
 static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
index 02eb533..0ee39cd 100644 (file)
@@ -688,7 +688,7 @@ noinline void __ref rest_init(void)
         * the init task will end up wanting to create kthreads, which, if
         * we schedule it before we create kthreadd, will OOPS.
         */
-       pid = kernel_thread(kernel_init, NULL, CLONE_FS);
+       pid = user_mode_thread(kernel_init, NULL, CLONE_FS);
        /*
         * Pin init on the boot CPU. Task migration is not properly working
         * until sched_init_smp() has been run. It will set the allowed
index f101c17..ef313ec 100644 (file)
 #include <linux/capability.h>
 #include <linux/ipc_namespace.h>
 #include <linux/msg.h>
+#include <linux/slab.h>
 #include "util.h"
 
-static void *get_ipc(struct ctl_table *table)
-{
-       char *which = table->data;
-       struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
-       which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns;
-       return which;
-}
-
-static int proc_ipc_dointvec(struct ctl_table *table, int write,
-               void *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct ctl_table ipc_table;
-
-       memcpy(&ipc_table, table, sizeof(ipc_table));
-       ipc_table.data = get_ipc(table);
-
-       return proc_dointvec(&ipc_table, write, buffer, lenp, ppos);
-}
-
-static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
-               void *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct ctl_table ipc_table;
-
-       memcpy(&ipc_table, table, sizeof(ipc_table));
-       ipc_table.data = get_ipc(table);
-
-       return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
-}
-
 static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
                void *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ipc_namespace *ns = current->nsproxy->ipc_ns;
-       int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       struct ipc_namespace *ns =
+               container_of(table->data, struct ipc_namespace, shm_rmid_forced);
+       int err;
+
+       err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
        if (err < 0)
                return err;
@@ -58,17 +32,6 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
        return err;
 }
 
-static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
-               void *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct ctl_table ipc_table;
-       memcpy(&ipc_table, table, sizeof(ipc_table));
-       ipc_table.data = get_ipc(table);
-
-       return proc_doulongvec_minmax(&ipc_table, write, buffer,
-                                       lenp, ppos);
-}
-
 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
                void *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -87,14 +50,15 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
 static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
        void *buffer, size_t *lenp, loff_t *ppos)
 {
+       struct ipc_namespace *ns =
+               container_of(table->data, struct ipc_namespace, sem_ctls);
        int ret, semmni;
-       struct ipc_namespace *ns = current->nsproxy->ipc_ns;
 
        semmni = ns->sem_ctls[3];
-       ret = proc_ipc_dointvec(table, write, buffer, lenp, ppos);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
 
        if (!ret)
-               ret = sem_check_semmni(current->nsproxy->ipc_ns);
+               ret = sem_check_semmni(ns);
 
        /*
         * Reset the semmni value if an error happens.
@@ -104,44 +68,31 @@ static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
        return ret;
 }
 
-#ifdef CONFIG_CHECKPOINT_RESTORE
-static int proc_ipc_dointvec_minmax_checkpoint_restore(struct ctl_table *table,
-               int write, void *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct user_namespace *user_ns = current->nsproxy->ipc_ns->user_ns;
-
-       if (write && !checkpoint_restore_ns_capable(user_ns))
-               return -EPERM;
-
-       return proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos);
-}
-#endif
-
 int ipc_mni = IPCMNI;
 int ipc_mni_shift = IPCMNI_SHIFT;
 int ipc_min_cycle = RADIX_TREE_MAP_SIZE;
 
-static struct ctl_table ipc_kern_table[] = {
+static struct ctl_table ipc_sysctls[] = {
        {
                .procname       = "shmmax",
                .data           = &init_ipc_ns.shm_ctlmax,
                .maxlen         = sizeof(init_ipc_ns.shm_ctlmax),
                .mode           = 0644,
-               .proc_handler   = proc_ipc_doulongvec_minmax,
+               .proc_handler   = proc_doulongvec_minmax,
        },
        {
                .procname       = "shmall",
                .data           = &init_ipc_ns.shm_ctlall,
                .maxlen         = sizeof(init_ipc_ns.shm_ctlall),
                .mode           = 0644,
-               .proc_handler   = proc_ipc_doulongvec_minmax,
+               .proc_handler   = proc_doulongvec_minmax,
        },
        {
                .procname       = "shmmni",
                .data           = &init_ipc_ns.shm_ctlmni,
                .maxlen         = sizeof(init_ipc_ns.shm_ctlmni),
                .mode           = 0644,
-               .proc_handler   = proc_ipc_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = &ipc_mni,
        },
@@ -159,7 +110,7 @@ static struct ctl_table ipc_kern_table[] = {
                .data           = &init_ipc_ns.msg_ctlmax,
                .maxlen         = sizeof(init_ipc_ns.msg_ctlmax),
                .mode           = 0644,
-               .proc_handler   = proc_ipc_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = SYSCTL_INT_MAX,
        },
@@ -168,7 +119,7 @@ static struct ctl_table ipc_kern_table[] = {
                .data           = &init_ipc_ns.msg_ctlmni,
                .maxlen         = sizeof(init_ipc_ns.msg_ctlmni),
                .mode           = 0644,
-               .proc_handler   = proc_ipc_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = &ipc_mni,
        },
@@ -186,7 +137,7 @@ static struct ctl_table ipc_kern_table[] = {
                .data           = &init_ipc_ns.msg_ctlmnb,
                .maxlen         = sizeof(init_ipc_ns.msg_ctlmnb),
                .mode           = 0644,
-               .proc_handler   = proc_ipc_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = SYSCTL_INT_MAX,
        },
@@ -202,8 +153,8 @@ static struct ctl_table ipc_kern_table[] = {
                .procname       = "sem_next_id",
                .data           = &init_ipc_ns.ids[IPC_SEM_IDS].next_id,
                .maxlen         = sizeof(init_ipc_ns.ids[IPC_SEM_IDS].next_id),
-               .mode           = 0666,
-               .proc_handler   = proc_ipc_dointvec_minmax_checkpoint_restore,
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = SYSCTL_INT_MAX,
        },
@@ -211,8 +162,8 @@ static struct ctl_table ipc_kern_table[] = {
                .procname       = "msg_next_id",
                .data           = &init_ipc_ns.ids[IPC_MSG_IDS].next_id,
                .maxlen         = sizeof(init_ipc_ns.ids[IPC_MSG_IDS].next_id),
-               .mode           = 0666,
-               .proc_handler   = proc_ipc_dointvec_minmax_checkpoint_restore,
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = SYSCTL_INT_MAX,
        },
@@ -220,8 +171,8 @@ static struct ctl_table ipc_kern_table[] = {
                .procname       = "shm_next_id",
                .data           = &init_ipc_ns.ids[IPC_SHM_IDS].next_id,
                .maxlen         = sizeof(init_ipc_ns.ids[IPC_SHM_IDS].next_id),
-               .mode           = 0666,
-               .proc_handler   = proc_ipc_dointvec_minmax_checkpoint_restore,
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = SYSCTL_INT_MAX,
        },
@@ -229,18 +180,112 @@ static struct ctl_table ipc_kern_table[] = {
        {}
 };
 
-static struct ctl_table ipc_root_table[] = {
-       {
-               .procname       = "kernel",
-               .mode           = 0555,
-               .child          = ipc_kern_table,
-       },
-       {}
+static struct ctl_table_set *set_lookup(struct ctl_table_root *root)
+{
+       return &current->nsproxy->ipc_ns->ipc_set;
+}
+
+static int set_is_seen(struct ctl_table_set *set)
+{
+       return &current->nsproxy->ipc_ns->ipc_set == set;
+}
+
+static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table)
+{
+       int mode = table->mode;
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+       struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+
+       if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
+            (table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
+            (table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
+           checkpoint_restore_ns_capable(ns->user_ns))
+               mode = 0666;
+#endif
+       return mode;
+}
+
+static struct ctl_table_root set_root = {
+       .lookup = set_lookup,
+       .permissions = ipc_permissions,
 };
 
+bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+       struct ctl_table *tbl;
+
+       setup_sysctl_set(&ns->ipc_set, &set_root, set_is_seen);
+
+       tbl = kmemdup(ipc_sysctls, sizeof(ipc_sysctls), GFP_KERNEL);
+       if (tbl) {
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(ipc_sysctls); i++) {
+                       if (tbl[i].data == &init_ipc_ns.shm_ctlmax)
+                               tbl[i].data = &ns->shm_ctlmax;
+
+                       else if (tbl[i].data == &init_ipc_ns.shm_ctlall)
+                               tbl[i].data = &ns->shm_ctlall;
+
+                       else if (tbl[i].data == &init_ipc_ns.shm_ctlmni)
+                               tbl[i].data = &ns->shm_ctlmni;
+
+                       else if (tbl[i].data == &init_ipc_ns.shm_rmid_forced)
+                               tbl[i].data = &ns->shm_rmid_forced;
+
+                       else if (tbl[i].data == &init_ipc_ns.msg_ctlmax)
+                               tbl[i].data = &ns->msg_ctlmax;
+
+                       else if (tbl[i].data == &init_ipc_ns.msg_ctlmni)
+                               tbl[i].data = &ns->msg_ctlmni;
+
+                       else if (tbl[i].data == &init_ipc_ns.msg_ctlmnb)
+                               tbl[i].data = &ns->msg_ctlmnb;
+
+                       else if (tbl[i].data == &init_ipc_ns.sem_ctls)
+                               tbl[i].data = &ns->sem_ctls;
+#ifdef CONFIG_CHECKPOINT_RESTORE
+                       else if (tbl[i].data == &init_ipc_ns.ids[IPC_SEM_IDS].next_id)
+                               tbl[i].data = &ns->ids[IPC_SEM_IDS].next_id;
+
+                       else if (tbl[i].data == &init_ipc_ns.ids[IPC_MSG_IDS].next_id)
+                               tbl[i].data = &ns->ids[IPC_MSG_IDS].next_id;
+
+                       else if (tbl[i].data == &init_ipc_ns.ids[IPC_SHM_IDS].next_id)
+                               tbl[i].data = &ns->ids[IPC_SHM_IDS].next_id;
+#endif
+                       else
+                               tbl[i].data = NULL;
+               }
+
+               ns->ipc_sysctls = __register_sysctl_table(&ns->ipc_set, "kernel", tbl);
+       }
+       if (!ns->ipc_sysctls) {
+               kfree(tbl);
+               retire_sysctl_set(&ns->ipc_set);
+               return false;
+       }
+
+       return true;
+}
+
+void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+       struct ctl_table *tbl;
+
+       tbl = ns->ipc_sysctls->ctl_table_arg;
+       unregister_sysctl_table(ns->ipc_sysctls);
+       retire_sysctl_set(&ns->ipc_set);
+       kfree(tbl);
+}
+
 static int __init ipc_sysctl_init(void)
 {
-       register_sysctl_table(ipc_root_table);
+       if (!setup_ipc_sysctls(&init_ipc_ns)) {
+               pr_warn("ipc sysctl registration failed\n");
+               return -ENOMEM;
+       }
        return 0;
 }
 
index 72a92a0..fbf6a8b 100644 (file)
@@ -9,39 +9,9 @@
 #include <linux/ipc_namespace.h>
 #include <linux/sysctl.h>
 
-#ifdef CONFIG_PROC_SYSCTL
-static void *get_mq(struct ctl_table *table)
-{
-       char *which = table->data;
-       struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
-       which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns;
-       return which;
-}
-
-static int proc_mq_dointvec(struct ctl_table *table, int write,
-                           void *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct ctl_table mq_table;
-       memcpy(&mq_table, table, sizeof(mq_table));
-       mq_table.data = get_mq(table);
-
-       return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
-}
-
-static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
-               void *buffer, size_t *lenp, loff_t *ppos)
-{
-       struct ctl_table mq_table;
-       memcpy(&mq_table, table, sizeof(mq_table));
-       mq_table.data = get_mq(table);
-
-       return proc_dointvec_minmax(&mq_table, write, buffer,
-                                       lenp, ppos);
-}
-#else
-#define proc_mq_dointvec NULL
-#define proc_mq_dointvec_minmax NULL
-#endif
+#include <linux/stat.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
 
 static int msg_max_limit_min = MIN_MSGMAX;
 static int msg_max_limit_max = HARD_MSGMAX;
@@ -55,14 +25,14 @@ static struct ctl_table mq_sysctls[] = {
                .data           = &init_ipc_ns.mq_queues_max,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_mq_dointvec,
+               .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "msg_max",
                .data           = &init_ipc_ns.mq_msg_max,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_mq_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = &msg_max_limit_min,
                .extra2         = &msg_max_limit_max,
        },
@@ -71,7 +41,7 @@ static struct ctl_table mq_sysctls[] = {
                .data           = &init_ipc_ns.mq_msgsize_max,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_mq_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = &msg_maxsize_limit_min,
                .extra2         = &msg_maxsize_limit_max,
        },
@@ -80,7 +50,7 @@ static struct ctl_table mq_sysctls[] = {
                .data           = &init_ipc_ns.mq_msg_default,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_mq_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = &msg_max_limit_min,
                .extra2         = &msg_max_limit_max,
        },
@@ -89,32 +59,73 @@ static struct ctl_table mq_sysctls[] = {
                .data           = &init_ipc_ns.mq_msgsize_default,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_mq_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = &msg_maxsize_limit_min,
                .extra2         = &msg_maxsize_limit_max,
        },
        {}
 };
 
-static struct ctl_table mq_sysctl_dir[] = {
-       {
-               .procname       = "mqueue",
-               .mode           = 0555,
-               .child          = mq_sysctls,
-       },
-       {}
-};
+static struct ctl_table_set *set_lookup(struct ctl_table_root *root)
+{
+       return &current->nsproxy->ipc_ns->mq_set;
+}
 
-static struct ctl_table mq_sysctl_root[] = {
-       {
-               .procname       = "fs",
-               .mode           = 0555,
-               .child          = mq_sysctl_dir,
-       },
-       {}
+static int set_is_seen(struct ctl_table_set *set)
+{
+       return &current->nsproxy->ipc_ns->mq_set == set;
+}
+
+static struct ctl_table_root set_root = {
+       .lookup = set_lookup,
 };
 
-struct ctl_table_header *mq_register_sysctl_table(void)
+bool setup_mq_sysctls(struct ipc_namespace *ns)
 {
-       return register_sysctl_table(mq_sysctl_root);
+       struct ctl_table *tbl;
+
+       setup_sysctl_set(&ns->mq_set, &set_root, set_is_seen);
+
+       tbl = kmemdup(mq_sysctls, sizeof(mq_sysctls), GFP_KERNEL);
+       if (tbl) {
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(mq_sysctls); i++) {
+                       if (tbl[i].data == &init_ipc_ns.mq_queues_max)
+                               tbl[i].data = &ns->mq_queues_max;
+
+                       else if (tbl[i].data == &init_ipc_ns.mq_msg_max)
+                               tbl[i].data = &ns->mq_msg_max;
+
+                       else if (tbl[i].data == &init_ipc_ns.mq_msgsize_max)
+                               tbl[i].data = &ns->mq_msgsize_max;
+
+                       else if (tbl[i].data == &init_ipc_ns.mq_msg_default)
+                               tbl[i].data = &ns->mq_msg_default;
+
+                       else if (tbl[i].data == &init_ipc_ns.mq_msgsize_default)
+                               tbl[i].data = &ns->mq_msgsize_default;
+                       else
+                               tbl[i].data = NULL;
+               }
+
+               ns->mq_sysctls = __register_sysctl_table(&ns->mq_set, "fs/mqueue", tbl);
+       }
+       if (!ns->mq_sysctls) {
+               kfree(tbl);
+               retire_sysctl_set(&ns->mq_set);
+               return false;
+       }
+
+       return true;
+}
+
+void retire_mq_sysctls(struct ipc_namespace *ns)
+{
+       struct ctl_table *tbl;
+
+       tbl = ns->mq_sysctls->ctl_table_arg;
+       unregister_sysctl_table(ns->mq_sysctls);
+       retire_sysctl_set(&ns->mq_set);
+       kfree(tbl);
 }
index 54cb626..12ad786 100644 (file)
@@ -164,8 +164,6 @@ static void remove_notification(struct mqueue_inode_info *info);
 
 static struct kmem_cache *mqueue_inode_cachep;
 
-static struct ctl_table_header *mq_sysctl_table;
-
 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
 {
        return container_of(inode, struct mqueue_inode_info, vfs_inode);
@@ -1727,8 +1725,10 @@ static int __init init_mqueue_fs(void)
        if (mqueue_inode_cachep == NULL)
                return -ENOMEM;
 
-       /* ignore failures - they are not fatal */
-       mq_sysctl_table = mq_register_sysctl_table();
+       if (!setup_mq_sysctls(&init_ipc_ns)) {
+               pr_warn("sysctl registration failed\n");
+               return -ENOMEM;
+       }
 
        error = register_filesystem(&mqueue_fs_type);
        if (error)
@@ -1745,8 +1745,6 @@ static int __init init_mqueue_fs(void)
 out_filesystem:
        unregister_filesystem(&mqueue_fs_type);
 out_sysctl:
-       if (mq_sysctl_table)
-               unregister_sysctl_table(mq_sysctl_table);
        kmem_cache_destroy(mqueue_inode_cachep);
        return error;
 }
index ae83f0f..754f323 100644 (file)
@@ -59,6 +59,13 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
        if (err)
                goto fail_put;
 
+       err = -ENOMEM;
+       if (!setup_mq_sysctls(ns))
+               goto fail_put;
+
+       if (!setup_ipc_sysctls(ns))
+               goto fail_put;
+
        sem_init_ns(ns);
        msg_init_ns(ns);
        shm_init_ns(ns);
@@ -125,6 +132,9 @@ static void free_ipc_ns(struct ipc_namespace *ns)
        msg_exit_ns(ns);
        shm_exit_ns(ns);
 
+       retire_mq_sysctls(ns);
+       retire_ipc_sysctls(ns);
+
        dec_ipc_namespaces(ns->ucounts);
        put_user_ns(ns->user_ns);
        ns_free_inum(&ns->ns);
index cacd868..5f6f3f8 100644 (file)
@@ -1953,6 +1953,11 @@ out:
                CONT;                                                   \
        LDX_MEM_##SIZEOP:                                               \
                DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
+               CONT;                                                   \
+       LDX_PROBE_MEM_##SIZEOP:                                         \
+               bpf_probe_read_kernel(&DST, sizeof(SIZE),               \
+                                     (const void *)(long) (SRC + insn->off));  \
+               DST = *((SIZE *)&DST);                                  \
                CONT;
 
        LDST(B,   u8)
@@ -1960,15 +1965,6 @@ out:
        LDST(W,  u32)
        LDST(DW, u64)
 #undef LDST
-#define LDX_PROBE(SIZEOP, SIZE)                                                        \
-       LDX_PROBE_MEM_##SIZEOP:                                                 \
-               bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off));      \
-               CONT;
-       LDX_PROBE(B,  1)
-       LDX_PROBE(H,  2)
-       LDX_PROBE(W,  4)
-       LDX_PROBE(DW, 8)
-#undef LDX_PROBE
 
 #define ATOMIC_ALU_OP(BOP, KOP)                                                \
                case BOP:                                               \
index 2c1e18f..164ed9e 100644 (file)
@@ -177,11 +177,14 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
        d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
        tmp = d->compact_delay_total + tsk->delays->compact_delay;
        d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp;
+       tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay;
+       d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp;
        d->blkio_count += tsk->delays->blkio_count;
        d->swapin_count += tsk->delays->swapin_count;
        d->freepages_count += tsk->delays->freepages_count;
        d->thrashing_count += tsk->delays->thrashing_count;
        d->compact_count += tsk->delays->compact_count;
+       d->wpcopy_count += tsk->delays->wpcopy_count;
        raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
 
        return 0;
@@ -249,3 +252,16 @@ void __delayacct_compact_end(void)
                      &current->delays->compact_delay,
                      &current->delays->compact_count);
 }
+
+void __delayacct_wpcopy_start(void)
+{
+       current->delays->wpcopy_start = local_clock();
+}
+
+void __delayacct_wpcopy_end(void)
+{
+       delayacct_end(&current->delays->lock,
+                     &current->delays->wpcopy_start,
+                     &current->delays->wpcopy_delay,
+                     &current->delays->wpcopy_count);
+}
index ac74063..2caafd1 100644 (file)
@@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
 
        rc = active_cacheline_insert(entry);
        if (rc == -ENOMEM) {
-               pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
+               pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
        } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
                err_printk(entry->dev, entry,
index dfa1de8..cb50f8d 100644 (file)
@@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void)
 }
 
 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
-                                   unsigned long nslabs, bool late_alloc)
+               unsigned long nslabs, unsigned int flags, bool late_alloc)
 {
        void *vaddr = phys_to_virt(start);
        unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
@@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
        mem->index = 0;
        mem->late_alloc = late_alloc;
 
-       if (swiotlb_force_bounce)
-               mem->force_bounce = true;
+       mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
 
        spin_lock_init(&mem->lock);
        for (i = 0; i < mem->nslabs; i++) {
@@ -275,8 +274,7 @@ retry:
                panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
                      __func__, alloc_size, PAGE_SIZE);
 
-       swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
-       mem->force_bounce = flags & SWIOTLB_FORCE;
+       swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
 
        if (flags & SWIOTLB_VERBOSE)
                swiotlb_print_info();
@@ -348,7 +346,7 @@ retry:
 
        set_memory_decrypted((unsigned long)vstart,
                             (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
-       swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
+       swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
 
        swiotlb_print_info();
        return 0;
@@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
 
                set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
                                     rmem->size >> PAGE_SHIFT);
-               swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
-               mem->force_bounce = true;
+               swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
+                               false);
                mem->for_alloc = true;
 
                rmem->priv = mem;
index 950b25c..80782cd 100644 (file)
@@ -4257,7 +4257,6 @@ static void perf_event_remove_on_exec(int ctxn)
 {
        struct perf_event_context *ctx, *clone_ctx = NULL;
        struct perf_event *event, *next;
-       LIST_HEAD(free_list);
        unsigned long flags;
        bool modified = false;
 
index 124829e..9d44f2d 100644 (file)
@@ -1982,7 +1982,7 @@ static __latent_entropy struct task_struct *copy_process(
        struct task_struct *p;
        struct multiprocess_signals delayed;
        struct file *pidfile = NULL;
-       u64 clone_flags = args->flags;
+       const u64 clone_flags = args->flags;
        struct nsproxy *nsp = current->nsproxy;
 
        /*
@@ -2071,6 +2071,9 @@ static __latent_entropy struct task_struct *copy_process(
        p = dup_task_struct(current, node);
        if (!p)
                goto fork_out;
+       p->flags &= ~PF_KTHREAD;
+       if (args->kthread)
+               p->flags |= PF_KTHREAD;
        if (args->io_thread) {
                /*
                 * Mark us an IO worker, and block any signal that isn't
@@ -2160,7 +2163,7 @@ static __latent_entropy struct task_struct *copy_process(
        p->io_context = NULL;
        audit_set_context(p, NULL);
        cgroup_fork(p);
-       if (p->flags & PF_KTHREAD) {
+       if (args->kthread) {
                if (!set_kthread_struct(p))
                        goto bad_fork_cleanup_delayacct;
        }
@@ -2243,7 +2246,7 @@ static __latent_entropy struct task_struct *copy_process(
        retval = copy_io(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_namespaces;
-       retval = copy_thread(clone_flags, args->stack, args->stack_size, p, args->tls);
+       retval = copy_thread(p, args);
        if (retval)
                goto bad_fork_cleanup_io;
 
@@ -2547,11 +2550,21 @@ static inline void init_idle_pids(struct task_struct *idle)
        }
 }
 
+static int idle_dummy(void *dummy)
+{
+       /* This function is never called */
+       return 0;
+}
+
 struct task_struct * __init fork_idle(int cpu)
 {
        struct task_struct *task;
        struct kernel_clone_args args = {
-               .flags = CLONE_VM,
+               .flags          = CLONE_VM,
+               .fn             = &idle_dummy,
+               .fn_arg         = NULL,
+               .kthread        = 1,
+               .idle           = 1,
        };
 
        task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
@@ -2582,8 +2595,8 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
                .flags          = ((lower_32_bits(flags) | CLONE_VM |
                                    CLONE_UNTRACED) & ~CSIGNAL),
                .exit_signal    = (lower_32_bits(flags) & CSIGNAL),
-               .stack          = (unsigned long)fn,
-               .stack_size     = (unsigned long)arg,
+               .fn             = fn,
+               .fn_arg         = arg,
                .io_thread      = 1,
        };
 
@@ -2687,8 +2700,25 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
                .flags          = ((lower_32_bits(flags) | CLONE_VM |
                                    CLONE_UNTRACED) & ~CSIGNAL),
                .exit_signal    = (lower_32_bits(flags) & CSIGNAL),
-               .stack          = (unsigned long)fn,
-               .stack_size     = (unsigned long)arg,
+               .fn             = fn,
+               .fn_arg         = arg,
+               .kthread        = 1,
+       };
+
+       return kernel_clone(&args);
+}
+
+/*
+ * Create a user mode thread.
+ */
+pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+       struct kernel_clone_args args = {
+               .flags          = ((lower_32_bits(flags) | CLONE_VM |
+                                   CLONE_UNTRACED) & ~CSIGNAL),
+               .exit_signal    = (lower_32_bits(flags) & CSIGNAL),
+               .fn             = fn,
+               .fn_arg         = arg,
        };
 
        return kernel_clone(&args);
index c108a2a..145321a 100644 (file)
@@ -1226,7 +1226,7 @@ int crash_exclude_mem_range(struct crash_mem *mem,
        return 0;
 }
 
-int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
                          void **addr, unsigned long *sz)
 {
        Elf64_Ehdr *ehdr;
@@ -1290,7 +1290,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
        phdr++;
 
        /* Prepare PT_LOAD type program header for kernel text region */
-       if (kernel_map) {
+       if (need_kernel_map) {
                phdr->p_type = PT_LOAD;
                phdr->p_flags = PF_R|PF_W|PF_X;
                phdr->p_vaddr = (unsigned long) _text;
index c172bf9..4c4f5a7 100644 (file)
@@ -118,7 +118,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
        if (func->nop)
                goto unlock;
 
-       klp_arch_set_pc(fregs, (unsigned long)func->new_func);
+       ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func);
 
 unlock:
        ftrace_test_recursion_unlock(bit);
index 85c8999..a2ff424 100644 (file)
@@ -16,6 +16,9 @@
 #include <uapi/linux/module.h>
 #include "internal.h"
 
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "module."
+
 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
 module_param(sig_enforce, bool_enable_only, 0644);
 
index ba005eb..0d5bd62 100644 (file)
@@ -20,7 +20,8 @@ BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
  */
 
 static int notifier_chain_register(struct notifier_block **nl,
-                                  struct notifier_block *n)
+                                  struct notifier_block *n,
+                                  bool unique_priority)
 {
        while ((*nl) != NULL) {
                if (unlikely((*nl) == n)) {
@@ -30,6 +31,8 @@ static int notifier_chain_register(struct notifier_block **nl,
                }
                if (n->priority > (*nl)->priority)
                        break;
+               if (n->priority == (*nl)->priority && unique_priority)
+                       return -EBUSY;
                nl = &((*nl)->next);
        }
        n->next = *nl;
@@ -144,13 +147,36 @@ int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
        int ret;
 
        spin_lock_irqsave(&nh->lock, flags);
-       ret = notifier_chain_register(&nh->head, n);
+       ret = notifier_chain_register(&nh->head, n, false);
        spin_unlock_irqrestore(&nh->lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
 
 /**
+ *     atomic_notifier_chain_register_unique_prio - Add notifier to an atomic notifier chain
+ *     @nh: Pointer to head of the atomic notifier chain
+ *     @n: New entry in notifier chain
+ *
+ *     Adds a notifier to an atomic notifier chain if there is no other
+ *     notifier registered using the same priority.
+ *
+ *     Returns 0 on success, %-EEXIST or %-EBUSY on error.
+ */
+int atomic_notifier_chain_register_unique_prio(struct atomic_notifier_head *nh,
+                                              struct notifier_block *n)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&nh->lock, flags);
+       ret = notifier_chain_register(&nh->head, n, true);
+       spin_unlock_irqrestore(&nh->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_register_unique_prio);
+
+/**
  *     atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
  *     @nh: Pointer to head of the atomic notifier chain
  *     @n: Entry to remove from notifier chain
@@ -204,23 +230,27 @@ int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
 NOKPROBE_SYMBOL(atomic_notifier_call_chain);
 
+/**
+ *     atomic_notifier_call_chain_is_empty - Check whether notifier chain is empty
+ *     @nh: Pointer to head of the atomic notifier chain
+ *
+ *     Checks whether notifier chain is empty.
+ *
+ *     Returns true is notifier chain is empty, false otherwise.
+ */
+bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh)
+{
+       return !rcu_access_pointer(nh->head);
+}
+
 /*
  *     Blocking notifier chain routines.  All access to the chain is
  *     synchronized by an rwsem.
  */
 
-/**
- *     blocking_notifier_chain_register - Add notifier to a blocking notifier chain
- *     @nh: Pointer to head of the blocking notifier chain
- *     @n: New entry in notifier chain
- *
- *     Adds a notifier to a blocking notifier chain.
- *     Must be called in process context.
- *
- *     Returns 0 on success, %-EEXIST on error.
- */
-int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
-               struct notifier_block *n)
+static int __blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+                                             struct notifier_block *n,
+                                             bool unique_priority)
 {
        int ret;
 
@@ -230,16 +260,49 @@ int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
         * such times we must not call down_write().
         */
        if (unlikely(system_state == SYSTEM_BOOTING))
-               return notifier_chain_register(&nh->head, n);
+               return notifier_chain_register(&nh->head, n, unique_priority);
 
        down_write(&nh->rwsem);
-       ret = notifier_chain_register(&nh->head, n);
+       ret = notifier_chain_register(&nh->head, n, unique_priority);
        up_write(&nh->rwsem);
        return ret;
 }
+
+/**
+ *     blocking_notifier_chain_register - Add notifier to a blocking notifier chain
+ *     @nh: Pointer to head of the blocking notifier chain
+ *     @n: New entry in notifier chain
+ *
+ *     Adds a notifier to a blocking notifier chain.
+ *     Must be called in process context.
+ *
+ *     Returns 0 on success, %-EEXIST on error.
+ */
+int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+               struct notifier_block *n)
+{
+       return __blocking_notifier_chain_register(nh, n, false);
+}
 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
 
 /**
+ *     blocking_notifier_chain_register_unique_prio - Add notifier to a blocking notifier chain
+ *     @nh: Pointer to head of the blocking notifier chain
+ *     @n: New entry in notifier chain
+ *
+ *     Adds a notifier to an blocking notifier chain if there is no other
+ *     notifier registered using the same priority.
+ *
+ *     Returns 0 on success, %-EEXIST or %-EBUSY on error.
+ */
+int blocking_notifier_chain_register_unique_prio(struct blocking_notifier_head *nh,
+                                                struct notifier_block *n)
+{
+       return __blocking_notifier_chain_register(nh, n, true);
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_register_unique_prio);
+
+/**
  *     blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
  *     @nh: Pointer to head of the blocking notifier chain
  *     @n: Entry to remove from notifier chain
@@ -341,7 +404,7 @@ EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
 int raw_notifier_chain_register(struct raw_notifier_head *nh,
                struct notifier_block *n)
 {
-       return notifier_chain_register(&nh->head, n);
+       return notifier_chain_register(&nh->head, n, false);
 }
 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
 
@@ -420,10 +483,10 @@ int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
         * such times we must not call mutex_lock().
         */
        if (unlikely(system_state == SYSTEM_BOOTING))
-               return notifier_chain_register(&nh->head, n);
+               return notifier_chain_register(&nh->head, n, false);
 
        mutex_lock(&nh->mutex);
-       ret = notifier_chain_register(&nh->head, n);
+       ret = notifier_chain_register(&nh->head, n, false);
        mutex_unlock(&nh->mutex);
        return ret;
 }
index a3e1035..ea3dd55 100644 (file)
@@ -3904,7 +3904,7 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
        }
 
        if (pending & PRINTK_PENDING_WAKEUP)
-               wake_up_interruptible_all(&log_wait);
+               wake_up_interruptible(&log_wait);
 }
 
 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
index 49c29ba..156a992 100644 (file)
@@ -185,7 +185,12 @@ static bool looks_like_a_spurious_pid(struct task_struct *task)
        return true;
 }
 
-/* Ensure that nothing can wake it up, even SIGKILL */
+/*
+ * Ensure that nothing can wake it up, even SIGKILL
+ *
+ * A task is switched to this state while a ptrace operation is in progress;
+ * such that the ptrace operation is uninterruptible.
+ */
 static bool ptrace_freeze_traced(struct task_struct *task)
 {
        bool ret = false;
@@ -197,7 +202,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
        spin_lock_irq(&task->sighand->siglock);
        if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
            !__fatal_signal_pending(task)) {
-               WRITE_ONCE(task->__state, __TASK_TRACED);
+               task->jobctl |= JOBCTL_PTRACE_FROZEN;
                ret = true;
        }
        spin_unlock_irq(&task->sighand->siglock);
@@ -207,23 +212,21 @@ static bool ptrace_freeze_traced(struct task_struct *task)
 
 static void ptrace_unfreeze_traced(struct task_struct *task)
 {
-       if (READ_ONCE(task->__state) != __TASK_TRACED)
-               return;
-
-       WARN_ON(!task->ptrace || task->parent != current);
+       unsigned long flags;
 
        /*
-        * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
-        * Recheck state under the lock to close this race.
+        * The child may be awake and may have cleared
+        * JOBCTL_PTRACE_FROZEN (see ptrace_resume).  The child will
+        * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
         */
-       spin_lock_irq(&task->sighand->siglock);
-       if (READ_ONCE(task->__state) == __TASK_TRACED) {
-               if (__fatal_signal_pending(task))
+       if (lock_task_sighand(task, &flags)) {
+               task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
+               if (__fatal_signal_pending(task)) {
+                       task->jobctl &= ~TASK_TRACED;
                        wake_up_state(task, __TASK_TRACED);
-               else
-                       WRITE_ONCE(task->__state, TASK_TRACED);
+               }
+               unlock_task_sighand(task, &flags);
        }
-       spin_unlock_irq(&task->sighand->siglock);
 }
 
 /**
@@ -256,7 +259,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
         */
        read_lock(&tasklist_lock);
        if (child->ptrace && child->parent == current) {
-               WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
                /*
                 * child->sighand can't be NULL, release_task()
                 * does ptrace_unlink() before __exit_signal().
@@ -266,17 +268,9 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
        }
        read_unlock(&tasklist_lock);
 
-       if (!ret && !ignore_state) {
-               if (!wait_task_inactive(child, __TASK_TRACED)) {
-                       /*
-                        * This can only happen if may_ptrace_stop() fails and
-                        * ptrace_stop() changes ->state back to TASK_RUNNING,
-                        * so we should not worry about leaking __TASK_TRACED.
-                        */
-                       WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
-                       ret = -ESRCH;
-               }
-       }
+       if (!ret && !ignore_state &&
+           WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED)))
+               ret = -ESRCH;
 
        return ret;
 }
@@ -475,8 +469,10 @@ static int ptrace_attach(struct task_struct *task, long request,
         * in and out of STOPPED are protected by siglock.
         */
        if (task_is_stopped(task) &&
-           task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+           task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
+               task->jobctl &= ~JOBCTL_STOPPED;
                signal_wake_up_state(task, __TASK_STOPPED);
+       }
 
        spin_unlock(&task->sighand->siglock);
 
@@ -846,8 +842,6 @@ static long ptrace_get_rseq_configuration(struct task_struct *task,
 static int ptrace_resume(struct task_struct *child, long request,
                         unsigned long data)
 {
-       bool need_siglock;
-
        if (!valid_signal(data))
                return -EIO;
 
@@ -883,18 +877,12 @@ static int ptrace_resume(struct task_struct *child, long request,
         * Note that we need siglock even if ->exit_code == data and/or this
         * status was not reported yet, the new status must not be cleared by
         * wait_task_stopped() after resume.
-        *
-        * If data == 0 we do not care if wait_task_stopped() reports the old
-        * status and clears the code too; this can't race with the tracee, it
-        * takes siglock after resume.
         */
-       need_siglock = data && !thread_group_empty(current);
-       if (need_siglock)
-               spin_lock_irq(&child->sighand->siglock);
+       spin_lock_irq(&child->sighand->siglock);
        child->exit_code = data;
+       child->jobctl &= ~JOBCTL_TRACED;
        wake_up_state(child, __TASK_TRACED);
-       if (need_siglock)
-               spin_unlock_irq(&child->sighand->siglock);
+       spin_unlock_irq(&child->sighand->siglock);
 
        return 0;
 }
@@ -1230,9 +1218,8 @@ int ptrace_request(struct task_struct *child, long request,
                return ptrace_resume(child, request, data);
 
        case PTRACE_KILL:
-               if (child->exit_state)  /* already dead */
-                       return 0;
-               return ptrace_resume(child, request, SIGKILL);
+               send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
+               return 0;
 
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
        case PTRACE_GETREGSET:
@@ -1279,10 +1266,6 @@ int ptrace_request(struct task_struct *child, long request,
        return ret;
 }
 
-#ifndef arch_ptrace_attach
-#define arch_ptrace_attach(child)      do { } while (0)
-#endif
-
 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
                unsigned long, data)
 {
@@ -1291,8 +1274,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
 
        if (request == PTRACE_TRACEME) {
                ret = ptrace_traceme();
-               if (!ret)
-                       arch_ptrace_attach(current);
                goto out;
        }
 
@@ -1304,12 +1285,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
 
        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
                ret = ptrace_attach(child, request, addr, data);
-               /*
-                * Some architectures need to do book-keeping after
-                * a ptrace attach.
-                */
-               if (!ret)
-                       arch_ptrace_attach(child);
                goto out_put_task_struct;
        }
 
@@ -1449,12 +1424,6 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
 
        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
                ret = ptrace_attach(child, request, addr, data);
-               /*
-                * Some architectures need to do book-keeping after
-                * a ptrace attach.
-                */
-               if (!ret)
-                       arch_ptrace_attach(child);
                goto out_put_task_struct;
        }
 
index 44228a9..a091145 100644 (file)
@@ -48,12 +48,20 @@ int reboot_cpu;
 enum reboot_type reboot_type = BOOT_ACPI;
 int reboot_force;
 
+struct sys_off_handler {
+       struct notifier_block nb;
+       int (*sys_off_cb)(struct sys_off_data *data);
+       void *cb_data;
+       enum sys_off_mode mode;
+       bool blocking;
+       void *list;
+};
+
 /*
- * If set, this is used for preparing the system to power off.
+ * Temporary stub that prevents linkage failure while we're in process
+ * of removing all uses of legacy pm_power_off() around the kernel.
  */
-
-void (*pm_power_off_prepare)(void);
-EXPORT_SYMBOL_GPL(pm_power_off_prepare);
+void __weak (*pm_power_off)(void);
 
 /**
  *     emergency_restart - reboot the system
@@ -281,6 +289,316 @@ void kernel_halt(void)
 }
 EXPORT_SYMBOL_GPL(kernel_halt);
 
+/*
+ *     Notifier list for kernel code which wants to be called
+ *     to prepare system for power off.
+ */
+static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list);
+
+/*
+ *     Notifier list for kernel code which wants to be called
+ *     to power off system.
+ */
+static ATOMIC_NOTIFIER_HEAD(power_off_handler_list);
+
+static int sys_off_notify(struct notifier_block *nb,
+                         unsigned long mode, void *cmd)
+{
+       struct sys_off_handler *handler;
+       struct sys_off_data data = {};
+
+       handler = container_of(nb, struct sys_off_handler, nb);
+       data.cb_data = handler->cb_data;
+       data.mode = mode;
+       data.cmd = cmd;
+
+       return handler->sys_off_cb(&data);
+}
+
+/**
+ *     register_sys_off_handler - Register sys-off handler
+ *     @mode: Sys-off mode
+ *     @priority: Handler priority
+ *     @callback: Callback function
+ *     @cb_data: Callback argument
+ *
+ *     Registers system power-off or restart handler that will be invoked
+ *     at the step corresponding to the given sys-off mode. Handler's callback
+ *     should return NOTIFY_DONE to permit execution of the next handler in
+ *     the call chain or NOTIFY_STOP to break the chain (in error case for
+ *     example).
+ *
+ *     Multiple handlers can be registered at the default priority level.
+ *
+ *     Only one handler can be registered at the non-default priority level,
+ *     otherwise ERR_PTR(-EBUSY) is returned.
+ *
+ *     Returns a new instance of struct sys_off_handler on success, or
+ *     an ERR_PTR()-encoded error code otherwise.
+ */
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+                        int priority,
+                        int (*callback)(struct sys_off_data *data),
+                        void *cb_data)
+{
+       struct sys_off_handler *handler;
+       int err;
+
+       handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+       if (!handler)
+               return ERR_PTR(-ENOMEM);
+
+       switch (mode) {
+       case SYS_OFF_MODE_POWER_OFF_PREPARE:
+               handler->list = &power_off_prep_handler_list;
+               handler->blocking = true;
+               break;
+
+       case SYS_OFF_MODE_POWER_OFF:
+               handler->list = &power_off_handler_list;
+               break;
+
+       case SYS_OFF_MODE_RESTART:
+               handler->list = &restart_handler_list;
+               break;
+
+       default:
+               kfree(handler);
+               return ERR_PTR(-EINVAL);
+       }
+
+       handler->nb.notifier_call = sys_off_notify;
+       handler->nb.priority = priority;
+       handler->sys_off_cb = callback;
+       handler->cb_data = cb_data;
+       handler->mode = mode;
+
+       if (handler->blocking) {
+               if (priority == SYS_OFF_PRIO_DEFAULT)
+                       err = blocking_notifier_chain_register(handler->list,
+                                                              &handler->nb);
+               else
+                       err = blocking_notifier_chain_register_unique_prio(handler->list,
+                                                                          &handler->nb);
+       } else {
+               if (priority == SYS_OFF_PRIO_DEFAULT)
+                       err = atomic_notifier_chain_register(handler->list,
+                                                            &handler->nb);
+               else
+                       err = atomic_notifier_chain_register_unique_prio(handler->list,
+                                                                        &handler->nb);
+       }
+
+       if (err) {
+               kfree(handler);
+               return ERR_PTR(err);
+       }
+
+       return handler;
+}
+EXPORT_SYMBOL_GPL(register_sys_off_handler);
+
+/**
+ *     unregister_sys_off_handler - Unregister sys-off handler
+ *     @handler: Sys-off handler
+ *
+ *     Unregisters given sys-off handler.
+ */
+void unregister_sys_off_handler(struct sys_off_handler *handler)
+{
+       int err;
+
+       if (!handler)
+               return;
+
+       if (handler->blocking)
+               err = blocking_notifier_chain_unregister(handler->list,
+                                                        &handler->nb);
+       else
+               err = atomic_notifier_chain_unregister(handler->list,
+                                                      &handler->nb);
+
+       /* sanity check, shall never happen */
+       WARN_ON(err);
+
+       kfree(handler);
+}
+EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
+
+static void devm_unregister_sys_off_handler(void *data)
+{
+       struct sys_off_handler *handler = data;
+
+       unregister_sys_off_handler(handler);
+}
+
+/**
+ *     devm_register_sys_off_handler - Register sys-off handler
+ *     @dev: Device that registers handler
+ *     @mode: Sys-off mode
+ *     @priority: Handler priority
+ *     @callback: Callback function
+ *     @cb_data: Callback argument
+ *
+ *     Registers resource-managed sys-off handler.
+ *
+ *     Returns zero on success, or error code on failure.
+ */
+int devm_register_sys_off_handler(struct device *dev,
+                                 enum sys_off_mode mode,
+                                 int priority,
+                                 int (*callback)(struct sys_off_data *data),
+                                 void *cb_data)
+{
+       struct sys_off_handler *handler;
+
+       handler = register_sys_off_handler(mode, priority, callback, cb_data);
+       if (IS_ERR(handler))
+               return PTR_ERR(handler);
+
+       return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler,
+                                       handler);
+}
+EXPORT_SYMBOL_GPL(devm_register_sys_off_handler);
+
+/**
+ *     devm_register_power_off_handler - Register power-off handler
+ *     @dev: Device that registers callback
+ *     @callback: Callback function
+ *     @cb_data: Callback's argument
+ *
+ *     Registers resource-managed sys-off handler with a default priority
+ *     and using power-off mode.
+ *
+ *     Returns zero on success, or error code on failure.
+ */
+int devm_register_power_off_handler(struct device *dev,
+                                   int (*callback)(struct sys_off_data *data),
+                                   void *cb_data)
+{
+       return devm_register_sys_off_handler(dev,
+                                            SYS_OFF_MODE_POWER_OFF,
+                                            SYS_OFF_PRIO_DEFAULT,
+                                            callback, cb_data);
+}
+EXPORT_SYMBOL_GPL(devm_register_power_off_handler);
+
+/**
+ *     devm_register_restart_handler - Register restart handler
+ *     @dev: Device that registers callback
+ *     @callback: Callback function
+ *     @cb_data: Callback's argument
+ *
+ *     Registers resource-managed sys-off handler with a default priority
+ *     and using restart mode.
+ *
+ *     Returns zero on success, or error code on failure.
+ */
+int devm_register_restart_handler(struct device *dev,
+                                 int (*callback)(struct sys_off_data *data),
+                                 void *cb_data)
+{
+       return devm_register_sys_off_handler(dev,
+                                            SYS_OFF_MODE_RESTART,
+                                            SYS_OFF_PRIO_DEFAULT,
+                                            callback, cb_data);
+}
+EXPORT_SYMBOL_GPL(devm_register_restart_handler);
+
+static struct sys_off_handler *platform_power_off_handler;
+
+static int platform_power_off_notify(struct sys_off_data *data)
+{
+       void (*platform_power_power_off_cb)(void) = data->cb_data;
+
+       platform_power_power_off_cb();
+
+       return NOTIFY_DONE;
+}
+
+/**
+ *     register_platform_power_off - Register platform-level power-off callback
+ *     @power_off: Power-off callback
+ *
+ *     Registers power-off callback that will be called as last step
+ *     of the power-off sequence. This callback is expected to be invoked
+ *     for the last resort. Only one platform power-off callback is allowed
+ *     to be registered at a time.
+ *
+ *     Returns zero on success, or error code on failure.
+ */
+int register_platform_power_off(void (*power_off)(void))
+{
+       struct sys_off_handler *handler;
+
+       handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+                                          SYS_OFF_PRIO_PLATFORM,
+                                          platform_power_off_notify,
+                                          power_off);
+       if (IS_ERR(handler))
+               return PTR_ERR(handler);
+
+       platform_power_off_handler = handler;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(register_platform_power_off);
+
+/**
+ *     unregister_platform_power_off - Unregister platform-level power-off callback
+ *     @power_off: Power-off callback
+ *
+ *     Unregisters previously registered platform power-off callback.
+ */
+void unregister_platform_power_off(void (*power_off)(void))
+{
+       if (platform_power_off_handler &&
+           platform_power_off_handler->cb_data == power_off) {
+               unregister_sys_off_handler(platform_power_off_handler);
+               platform_power_off_handler = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(unregister_platform_power_off);
+
+static int legacy_pm_power_off(struct sys_off_data *data)
+{
+       if (pm_power_off)
+               pm_power_off();
+
+       return NOTIFY_DONE;
+}
+
+static void do_kernel_power_off_prepare(void)
+{
+       blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL);
+}
+
+/**
+ *     do_kernel_power_off - Execute kernel power-off handler call chain
+ *
+ *     Expected to be called as last step of the power-off sequence.
+ *
+ *     Powers off the system immediately if a power-off handler function has
+ *     been registered. Otherwise does nothing.
+ */
+void do_kernel_power_off(void)
+{
+       atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
+}
+
+/**
+ *     kernel_can_power_off - check whether system can be powered off
+ *
+ *     Returns true if power-off handler is registered and system can be
+ *     powered off, false otherwise.
+ */
+bool kernel_can_power_off(void)
+{
+       return !atomic_notifier_call_chain_is_empty(&power_off_handler_list);
+}
+EXPORT_SYMBOL_GPL(kernel_can_power_off);
+
 /**
  *     kernel_power_off - power_off the system
  *
@@ -289,8 +607,7 @@ EXPORT_SYMBOL_GPL(kernel_halt);
 void kernel_power_off(void)
 {
        kernel_shutdown_prepare(SYSTEM_POWER_OFF);
-       if (pm_power_off_prepare)
-               pm_power_off_prepare();
+       do_kernel_power_off_prepare();
        migrate_to_reboot_cpu();
        syscore_shutdown();
        pr_emerg("Power down\n");
@@ -313,6 +630,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
                void __user *, arg)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(current);
+       struct sys_off_handler *sys_off = NULL;
        char buffer[256];
        int ret = 0;
 
@@ -337,10 +655,25 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
        if (ret)
                return ret;
 
+       /*
+        * Register sys-off handlers for legacy PM callback. This allows
+        * legacy PM callbacks temporary co-exist with the new sys-off API.
+        *
+        * TODO: Remove legacy handlers once all legacy PM users will be
+        *       switched to the sys-off based APIs.
+        */
+       if (pm_power_off) {
+               sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+                                                  SYS_OFF_PRIO_DEFAULT,
+                                                  legacy_pm_power_off, NULL);
+               if (IS_ERR(sys_off))
+                       return PTR_ERR(sys_off);
+       }
+
        /* Instead of trying to make the power_off code look like
         * halt when pm_power_off is not set do it the easy way.
         */
-       if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+       if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off())
                cmd = LINUX_REBOOT_CMD_HALT;
 
        mutex_lock(&system_transition_mutex);
@@ -394,6 +727,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
                break;
        }
        mutex_unlock(&system_transition_mutex);
+       unregister_sys_off_handler(sys_off);
        return ret;
 }
 
index 16092b4..4ebaf97 100644 (file)
@@ -36,6 +36,7 @@ void __init autogroup_init(struct task_struct *init_task)
        kref_init(&autogroup_default.kref);
        init_rwsem(&autogroup_default.lock);
        init_task->signal->autogroup = &autogroup_default;
+       sched_autogroup_sysctl_init();
 }
 
 void autogroup_free(struct task_group *tg)
@@ -219,7 +220,6 @@ void sched_autogroup_exit(struct signal_struct *sig)
 static int __init setup_autogroup(char *str)
 {
        sysctl_sched_autogroup_enabled = 0;
-       sched_autogroup_sysctl_init();
 
        return 1;
 }
index 696c649..bfa7452 100644 (file)
@@ -6353,10 +6353,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
 
        /*
         * We must load prev->state once (task_struct::state is volatile), such
-        * that:
-        *
-        *  - we form a control dependency vs deactivate_task() below.
-        *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
+        * that we form a control dependency vs deactivate_task() below.
         */
        prev_state = READ_ONCE(prev->__state);
        if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
index 8c5b74f..77b2048 100644 (file)
@@ -2927,7 +2927,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr)
        /*
         * We don't care about NUMA placement if we don't have memory.
         */
-       if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
+       if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
                return;
 
        /*
index e43bc2a..edb1dc9 100644 (file)
@@ -762,7 +762,10 @@ still_pending:
  */
 void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
+       lockdep_assert_held(&t->sighand->siglock);
+
        set_tsk_thread_flag(t, TIF_SIGPENDING);
+
        /*
         * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
         * case. We don't check t->state here because there is a race with it
@@ -884,7 +887,7 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info,
 static void ptrace_trap_notify(struct task_struct *t)
 {
        WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
-       assert_spin_locked(&t->sighand->siglock);
+       lockdep_assert_held(&t->sighand->siglock);
 
        task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
        ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
@@ -930,9 +933,10 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
                for_each_thread(p, t) {
                        flush_sigqueue_mask(&flush, &t->pending);
                        task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
-                       if (likely(!(t->ptrace & PT_SEIZED)))
+                       if (likely(!(t->ptrace & PT_SEIZED))) {
+                               t->jobctl &= ~JOBCTL_STOPPED;
                                wake_up_state(t, __TASK_STOPPED);
-                       else
+                       else
                                ptrace_trap_notify(t);
                }
 
@@ -1071,15 +1075,15 @@ static inline bool legacy_queue(struct sigpending *signals, int sig)
        return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 }
 
-static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
-                       enum pid_type type, bool force)
+static int __send_signal_locked(int sig, struct kernel_siginfo *info,
+                               struct task_struct *t, enum pid_type type, bool force)
 {
        struct sigpending *pending;
        struct sigqueue *q;
        int override_rlimit;
        int ret = 0, result;
 
-       assert_spin_locked(&t->sighand->siglock);
+       lockdep_assert_held(&t->sighand->siglock);
 
        result = TRACE_SIGNAL_IGNORED;
        if (!prepare_signal(sig, t, force))
@@ -1212,8 +1216,8 @@ static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
        return ret;
 }
 
-static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
-                       enum pid_type type)
+int send_signal_locked(int sig, struct kernel_siginfo *info,
+                      struct task_struct *t, enum pid_type type)
 {
        /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
        bool force = false;
@@ -1245,7 +1249,7 @@ static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct
                        force = true;
                }
        }
-       return __send_signal(sig, info, t, type, force);
+       return __send_signal_locked(sig, info, t, type, force);
 }
 
 static void print_fatal_signal(int signr)
@@ -1281,12 +1285,6 @@ static int __init setup_print_fatal_signals(char *str)
 
 __setup("print-fatal-signals=", setup_print_fatal_signals);
 
-int
-__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
-{
-       return send_signal(sig, info, p, PIDTYPE_TGID);
-}
-
 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
                        enum pid_type type)
 {
@@ -1294,7 +1292,7 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
        int ret = -ESRCH;
 
        if (lock_task_sighand(p, &flags)) {
-               ret = send_signal(sig, info, p, type);
+               ret = send_signal_locked(sig, info, p, type);
                unlock_task_sighand(p, &flags);
        }
 
@@ -1347,7 +1345,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
        if (action->sa.sa_handler == SIG_DFL &&
            (!t->ptrace || (handler == HANDLER_EXIT)))
                t->signal->flags &= ~SIGNAL_UNKILLABLE;
-       ret = send_signal(sig, info, t, PIDTYPE_PID);
+       ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 
        return ret;
@@ -1567,7 +1565,7 @@ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
 
        if (sig) {
                if (lock_task_sighand(p, &flags)) {
-                       ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
+                       ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
                        unlock_task_sighand(p, &flags);
                } else
                        ret = -ESRCH;
@@ -2114,7 +2112,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
         * parent's namespaces.
         */
        if (valid_signal(sig) && sig)
-               __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
+               __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
        __wake_up_parent(tsk, tsk->parent);
        spin_unlock_irqrestore(&psig->siglock, flags);
 
@@ -2184,7 +2182,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
        spin_lock_irqsave(&sighand->siglock, flags);
        if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
            !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
-               __group_send_sig_info(SIGCHLD, &info, parent);
+               send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
        /*
         * Even if SIGCHLD is not generated, we must wake up wait4 calls.
         */
@@ -2204,13 +2202,12 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
  * with.  If the code did not stop because the tracer is gone,
  * the stop signal remains unchanged unless clear_code.
  */
-static int ptrace_stop(int exit_code, int why, int clear_code,
-                       unsigned long message, kernel_siginfo_t *info)
+static int ptrace_stop(int exit_code, int why, unsigned long message,
+                      kernel_siginfo_t *info)
        __releases(&current->sighand->siglock)
        __acquires(&current->sighand->siglock)
 {
        bool gstop_done = false;
-       bool read_code = true;
 
        if (arch_ptrace_stop_needed()) {
                /*
@@ -2227,10 +2224,16 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
        }
 
        /*
-        * schedule() will not sleep if there is a pending signal that
-        * can awaken the task.
+        * After this point ptrace_signal_wake_up or signal_wake_up
+        * will clear TASK_TRACED if ptrace_unlink happens or a fatal
+        * signal comes in.  Handle previous ptrace_unlinks and fatal
+        * signals here to prevent ptrace_stop sleeping in schedule.
         */
+       if (!current->ptrace || __fatal_signal_pending(current))
+               return exit_code;
+
        set_special_state(TASK_TRACED);
+       current->jobctl |= JOBCTL_TRACED;
 
        /*
         * We're committing to trapping.  TRACED should be visible before
@@ -2276,54 +2279,33 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
 
        spin_unlock_irq(&current->sighand->siglock);
        read_lock(&tasklist_lock);
-       if (likely(current->ptrace)) {
-               /*
-                * Notify parents of the stop.
-                *
-                * While ptraced, there are two parents - the ptracer and
-                * the real_parent of the group_leader.  The ptracer should
-                * know about every stop while the real parent is only
-                * interested in the completion of group stop.  The states
-                * for the two don't interact with each other.  Notify
-                * separately unless they're gonna be duplicates.
-                */
+       /*
+        * Notify parents of the stop.
+        *
+        * While ptraced, there are two parents - the ptracer and
+        * the real_parent of the group_leader.  The ptracer should
+        * know about every stop while the real parent is only
+        * interested in the completion of group stop.  The states
+        * for the two don't interact with each other.  Notify
+        * separately unless they're gonna be duplicates.
+        */
+       if (current->ptrace)
                do_notify_parent_cldstop(current, true, why);
-               if (gstop_done && ptrace_reparented(current))
-                       do_notify_parent_cldstop(current, false, why);
+       if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
+               do_notify_parent_cldstop(current, false, why);
 
-               /*
-                * Don't want to allow preemption here, because
-                * sys_ptrace() needs this task to be inactive.
-                *
-                * XXX: implement read_unlock_no_resched().
-                */
-               preempt_disable();
-               read_unlock(&tasklist_lock);
-               cgroup_enter_frozen();
-               preempt_enable_no_resched();
-               freezable_schedule();
-               cgroup_leave_frozen(true);
-       } else {
-               /*
-                * By the time we got the lock, our tracer went away.
-                * Don't drop the lock yet, another tracer may come.
-                *
-                * If @gstop_done, the ptracer went away between group stop
-                * completion and here.  During detach, it would have set
-                * JOBCTL_STOP_PENDING on us and we'll re-enter
-                * TASK_STOPPED in do_signal_stop() on return, so notifying
-                * the real parent of the group stop completion is enough.
-                */
-               if (gstop_done)
-                       do_notify_parent_cldstop(current, false, why);
-
-               /* tasklist protects us from ptrace_freeze_traced() */
-               __set_current_state(TASK_RUNNING);
-               read_code = false;
-               if (clear_code)
-                       exit_code = 0;
-               read_unlock(&tasklist_lock);
-       }
+       /*
+        * Don't want to allow preemption here, because
+        * sys_ptrace() needs this task to be inactive.
+        *
+        * XXX: implement read_unlock_no_resched().
+        */
+       preempt_disable();
+       read_unlock(&tasklist_lock);
+       cgroup_enter_frozen();
+       preempt_enable_no_resched();
+       freezable_schedule();
+       cgroup_leave_frozen(true);
 
        /*
         * We are back.  Now reacquire the siglock before touching
@@ -2331,14 +2313,13 @@ static int ptrace_stop(int exit_code, int why, int clear_code,
         * any signal-sending on another CPU that wants to examine it.
         */
        spin_lock_irq(&current->sighand->siglock);
-       if (read_code)
-               exit_code = current->exit_code;
+       exit_code = current->exit_code;
        current->last_siginfo = NULL;
        current->ptrace_message = 0;
        current->exit_code = 0;
 
        /* LISTENING can be set only during STOP traps, clear it */
-       current->jobctl &= ~JOBCTL_LISTENING;
+       current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
 
        /*
         * Queued signals ignored us while we were stopped for tracing.
@@ -2360,7 +2341,7 @@ static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long mes
        info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
 
        /* Let the debugger run.  */
-       return ptrace_stop(exit_code, why, 1, message, &info);
+       return ptrace_stop(exit_code, why, message, &info);
 }
 
 int ptrace_notify(int exit_code, unsigned long message)
@@ -2471,6 +2452,7 @@ static bool do_signal_stop(int signr)
                if (task_participate_group_stop(current))
                        notify = CLD_STOPPED;
 
+               current->jobctl |= JOBCTL_STOPPED;
                set_special_state(TASK_STOPPED);
                spin_unlock_irq(&current->sighand->siglock);
 
@@ -2532,7 +2514,7 @@ static void do_jobctl_trap(void)
                                 CLD_STOPPED, 0);
        } else {
                WARN_ON_ONCE(!signr);
-               ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL);
+               ptrace_stop(signr, CLD_STOPPED, 0, NULL);
        }
 }
 
@@ -2585,7 +2567,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
         * comment in dequeue_signal().
         */
        current->jobctl |= JOBCTL_STOP_DEQUEUED;
-       signr = ptrace_stop(signr, CLD_TRAPPED, 0, 0, info);
+       signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
 
        /* We're back.  Did the debugger cancel the sig?  */
        if (signr == 0)
@@ -2612,7 +2594,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
        /* If the (new) signal is now blocked, requeue it.  */
        if (sigismember(&current->blocked, signr) ||
            fatal_signal_pending(current)) {
-               send_signal(signr, info, current, type);
+               send_signal_locked(signr, info, current, type);
                signr = 0;
        }
 
@@ -4807,7 +4789,7 @@ void kdb_send_sig(struct task_struct *t, int sig)
                           "the deadlock.\n");
                return;
        }
-       ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
+       ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
        spin_unlock(&t->sighand->siglock);
        if (ret)
                kdb_printf("Fail to deliver Signal %d to process %d.\n",
index 0a97193..cb925e8 100644 (file)
@@ -870,7 +870,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
 {
        if (tsk->dl.dl_overrun) {
                tsk->dl.dl_overrun = 0;
-               __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+               send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
        }
 }
 
@@ -884,7 +884,7 @@ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
                        rt ? "RT" : "CPU", hard ? "hard" : "soft",
                        current->comm, task_pid_nr(current));
        }
-       __group_send_sig_info(signo, SEND_SIG_PRIV, current);
+       send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID);
        return true;
 }
 
@@ -958,7 +958,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
                trace_itimer_expire(signo == SIGPROF ?
                                    ITIMER_PROF : ITIMER_VIRTUAL,
                                    task_tgid(tsk), cur_time);
-               __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
+               send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
        }
 
        if (it->expires && it->expires < *expires)
index 3fd5284..218cd95 100644 (file)
@@ -30,6 +30,7 @@ int ftrace_graph_active;
 /* Both enabled by default (can be cleared by function_graph tracer flags */
 static bool fgraph_sleep_time = true;
 
+#ifdef CONFIG_DYNAMIC_FTRACE
 /*
  * archs can override this function if they must do something
  * to enable hook for graph tracer.
@@ -47,6 +48,7 @@ int __weak ftrace_disable_ftrace_graph_caller(void)
 {
        return 0;
 }
+#endif
 
 /**
  * ftrace_graph_stop - set to permanently disable function graph tracing
index 36c1233..b989736 100644 (file)
@@ -132,7 +132,7 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
 
        /* If SIGCLD is ignored do_wait won't populate the status. */
        kernel_sigaction(SIGCHLD, SIG_DFL);
-       pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
+       pid = user_mode_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
        if (pid < 0)
                sub_info->retval = pid;
        else
@@ -171,8 +171,8 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
                 * want to pollute current->children, and we need a parent
                 * that always ignores SIGCHLD to ensure auto-reaping.
                 */
-               pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
-                                   CLONE_PARENT | SIGCHLD);
+               pid = user_mode_thread(call_usermodehelper_exec_async, sub_info,
+                                      CLONE_PARENT | SIGCHLD);
                if (pid < 0) {
                        sub_info->retval = pid;
                        umh_complete(sub_info);
index 9dae1f6..8303f4c 100644 (file)
@@ -28,7 +28,7 @@ static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *na
 
        file = file_open_root_mnt(mnt, name, O_CREAT | O_WRONLY, 0700);
        if (IS_ERR(file)) {
-               mntput(mnt);
+               kern_unmount(mnt);
                return ERR_CAST(file);
        }
 
@@ -38,7 +38,7 @@ static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *na
                if (err >= 0)
                        err = -ENOMEM;
                filp_close(file, NULL);
-               mntput(mnt);
+               kern_unmount(mnt);
                return ERR_PTR(err);
        }
 
index 079c72e..ca0b4f3 100644 (file)
@@ -1461,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array,
        struct assoc_array_ptr *cursor, *ptr;
        struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
        unsigned long nr_leaves_on_tree;
+       bool retained;
        int keylen, slot, nr_free, next_slot, i;
 
        pr_devel("-->%s()\n", __func__);
@@ -1536,6 +1537,7 @@ continue_node:
                goto descend;
        }
 
+retry_compress:
        pr_devel("-- compress node %p --\n", new_n);
 
        /* Count up the number of empty slots in this node and work out the
@@ -1553,6 +1555,7 @@ continue_node:
        pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
 
        /* See what we can fold in */
+       retained = false;
        next_slot = 0;
        for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
                struct assoc_array_shortcut *s;
@@ -1602,9 +1605,14 @@ continue_node:
                        pr_devel("[%d] retain node %lu/%d [nx %d]\n",
                                 slot, child->nr_leaves_on_branch, nr_free + 1,
                                 next_slot);
+                       retained = true;
                }
        }
 
+       if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
+               pr_devel("internal nodes remain despite enough space, retrying\n");
+               goto retry_compress;
+       }
        pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
 
        nr_leaves_on_tree = new_n->nr_leaves_on_branch;
index 0d5c2ec..b18e31e 100644 (file)
  * for the best explanations of this ordering.
  */
 
-int __bitmap_equal(const unsigned long *bitmap1,
-               const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_equal(const unsigned long *bitmap1,
+                   const unsigned long *bitmap2, unsigned int bits)
 {
        unsigned int k, lim = bits/BITS_PER_LONG;
        for (k = 0; k < lim; ++k)
                if (bitmap1[k] != bitmap2[k])
-                       return 0;
+                       return false;
 
        if (bits % BITS_PER_LONG)
                if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
-                       return 0;
+                       return false;
 
-       return 1;
+       return true;
 }
 EXPORT_SYMBOL(__bitmap_equal);
 
@@ -303,33 +303,33 @@ void __bitmap_replace(unsigned long *dst,
 }
 EXPORT_SYMBOL(__bitmap_replace);
 
-int __bitmap_intersects(const unsigned long *bitmap1,
-                       const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_intersects(const unsigned long *bitmap1,
+                        const unsigned long *bitmap2, unsigned int bits)
 {
        unsigned int k, lim = bits/BITS_PER_LONG;
        for (k = 0; k < lim; ++k)
                if (bitmap1[k] & bitmap2[k])
-                       return 1;
+                       return true;
 
        if (bits % BITS_PER_LONG)
                if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
-                       return 1;
-       return 0;
+                       return true;
+       return false;
 }
 EXPORT_SYMBOL(__bitmap_intersects);
 
-int __bitmap_subset(const unsigned long *bitmap1,
-                   const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_subset(const unsigned long *bitmap1,
+                    const unsigned long *bitmap2, unsigned int bits)
 {
        unsigned int k, lim = bits/BITS_PER_LONG;
        for (k = 0; k < lim; ++k)
                if (bitmap1[k] & ~bitmap2[k])
-                       return 0;
+                       return false;
 
        if (bits % BITS_PER_LONG)
                if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
-                       return 0;
-       return 1;
+                       return false;
+       return true;
 }
 EXPORT_SYMBOL(__bitmap_subset);
 
@@ -527,33 +527,39 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
  * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
  * bitmask and decimal list to userspace by sysfs ABI.
  * Drivers might be using a normal attribute for this kind of ABIs. A
- * normal attribute typically has show entry as below:
- * static ssize_t example_attribute_show(struct device *dev,
+ * normal attribute typically has show entry as below::
+ *
+ *   static ssize_t example_attribute_show(struct device *dev,
  *             struct device_attribute *attr, char *buf)
- * {
+ *   {
  *     ...
  *     return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
- * }
+ *   }
+ *
  * show entry of attribute has no offset and count parameters and this
  * means the file is limited to one page only.
  * bitmap_print_to_pagebuf() API works terribly well for this kind of
- * normal attribute with buf parameter and without offset, count:
- * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ * normal attribute with buf parameter and without offset, count::
+ *
+ *   bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
  *                        int nmaskbits)
- * {
- * }
+ *   {
+ *   }
+ *
  * The problem is once we have a large bitmap, we have a chance to get a
  * bitmask or list more than one page. Especially for list, it could be
  * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
  * It turns out bin_attribute is a way to break this limit. bin_attribute
- * has show entry as below:
- * static ssize_t
- * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
+ * has show entry as below::
+ *
+ *   static ssize_t
+ *   example_bin_attribute_show(struct file *filp, struct kobject *kobj,
  *             struct bin_attribute *attr, char *buf,
  *             loff_t offset, size_t count)
- * {
+ *   {
  *     ...
- * }
+ *   }
+ *
  * With the new offset and count parameters, this makes sysfs ABI be able
  * to support file size more than one page. For example, offset could be
  * >= 4096.
@@ -577,6 +583,7 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
  * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
  * It is intended to workaround sysfs limitations discussed above and should be
  * used carefully in general case for the following reasons:
+ *
  *  - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
  *  - Memory complexity is O(nbits), comparing to O(1) for snprintf().
  *  - @off and @count are NOT offset and number of bits to print.
@@ -1505,5 +1512,59 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
                buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
 }
 EXPORT_SYMBOL(bitmap_to_arr32);
+#endif
+
+#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+/**
+ * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
+ *     @bitmap: array of unsigned longs, the destination bitmap
+ *     @buf: array of u64 (in host byte order), the source bitmap
+ *     @nbits: number of bits in @bitmap
+ */
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
+{
+       int n;
+
+       for (n = nbits; n > 0; n -= 64) {
+               u64 val = *buf++;
 
+               *bitmap++ = val;
+               if (n > 32)
+                       *bitmap++ = val >> 32;
+       }
+
+       /*
+        * Clear tail bits in the last word beyond nbits.
+        *
+        * Negative index is OK because here we point to the word next
+        * to the last word of the bitmap, except for nbits == 0, which
+        * is tested implicitly.
+        */
+       if (nbits % BITS_PER_LONG)
+               bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+EXPORT_SYMBOL(bitmap_from_arr64);
+
+/**
+ * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
+ *     @buf: array of u64 (in host byte order), the dest bitmap
+ *     @bitmap: array of unsigned longs, the source bitmap
+ *     @nbits: number of bits in @bitmap
+ */
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
+{
+       const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
+
+       while (bitmap < end) {
+               *buf = *bitmap++;
+               if (bitmap < end)
+                       *buf |= (u64)(*bitmap++) << 32;
+               buf++;
+       }
+
+       /* Clear tail bits in the last element of array beyond nbits. */
+       if (nbits % 64)
+               buf[-1] &= GENMASK_ULL(nbits % 64, 0);
+}
+EXPORT_SYMBOL(bitmap_to_arr64);
 #endif
index 3aa454c..e22647f 100644 (file)
@@ -3,9 +3,9 @@
 #include <linux/module.h>
 #include <linux/random.h>
 
-int __next_node_in(int node, const nodemask_t *srcp)
+unsigned int __next_node_in(int node, const nodemask_t *srcp)
 {
-       int ret = __next_node(node, srcp);
+       unsigned int ret = __next_node(node, srcp);
 
        if (ret == MAX_NUMNODES)
                ret = __first_node(srcp);
index 71d315a..15bc5b6 100644 (file)
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  *
  * SipHash: a fast short-input PRF
  * https://131002.net/siphash/
index 0c82f07..d5923a6 100644 (file)
@@ -585,6 +585,30 @@ static void __init test_bitmap_arr32(void)
        }
 }
 
+static void __init test_bitmap_arr64(void)
+{
+       unsigned int nbits, next_bit;
+       u64 arr[EXP1_IN_BITS / 64];
+       DECLARE_BITMAP(bmap2, EXP1_IN_BITS);
+
+       memset(arr, 0xa5, sizeof(arr));
+
+       for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) {
+               memset(bmap2, 0xff, sizeof(arr));
+               bitmap_to_arr64(arr, exp1, nbits);
+               bitmap_from_arr64(bmap2, arr, nbits);
+               expect_eq_bitmap(bmap2, exp1, nbits);
+
+               next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits);
+               if (next_bit < round_up(nbits, BITS_PER_LONG))
+                       pr_err("bitmap_copy_arr64(nbits == %d:"
+                               " tail is not safely cleared: %d\n", nbits, next_bit);
+
+               if (nbits < EXP1_IN_BITS - 64)
+                       expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
+       }
+}
+
 static void noinline __init test_mem_optimisations(void)
 {
        DECLARE_BITMAP(bmap1, 1024);
@@ -852,6 +876,7 @@ static void __init selftest(void)
        test_copy();
        test_replace();
        test_bitmap_arr32();
+       test_bitmap_arr64();
        test_bitmap_parse();
        test_bitmap_parselist();
        test_bitmap_printlist();
index 1bccd6c..c82b659 100644 (file)
@@ -31,9 +31,12 @@ MODULE_IMPORT_NS(TEST_FIRMWARE);
 #define TEST_FIRMWARE_NAME     "test-firmware.bin"
 #define TEST_FIRMWARE_NUM_REQS 4
 #define TEST_FIRMWARE_BUF_SIZE SZ_1K
+#define TEST_UPLOAD_MAX_SIZE   SZ_2K
+#define TEST_UPLOAD_BLK_SIZE   37      /* Avoid powers of two in testing */
 
 static DEFINE_MUTEX(test_fw_mutex);
 static const struct firmware *test_firmware;
+static LIST_HEAD(test_upload_list);
 
 struct test_batched_req {
        u8 idx;
@@ -63,6 +66,7 @@ struct test_batched_req {
  * @reqs: stores all requests information
  * @read_fw_idx: index of thread from which we want to read firmware results
  *     from through the read_fw trigger.
+ * @upload_name: firmware name to be used with upload_read sysfs node
  * @test_result: a test may use this to collect the result from the call
  *     of the request_firmware*() calls used in their tests. In order of
  *     priority we always keep first any setup error. If no setup errors were
@@ -101,6 +105,7 @@ struct test_config {
        bool send_uevent;
        u8 num_requests;
        u8 read_fw_idx;
+       char *upload_name;
 
        /*
         * These below don't belong her but we'll move them once we create
@@ -112,8 +117,34 @@ struct test_config {
                            struct device *device);
 };
 
+struct upload_inject_err {
+       const char *prog;
+       enum fw_upload_err err_code;
+};
+
+struct test_firmware_upload {
+       char *name;
+       struct list_head node;
+       char *buf;
+       size_t size;
+       bool cancel_request;
+       struct upload_inject_err inject;
+       struct fw_upload *fwl;
+};
+
 static struct test_config *test_fw_config;
 
+static struct test_firmware_upload *upload_lookup_name(const char *name)
+{
+       struct test_firmware_upload *tst;
+
+       list_for_each_entry(tst, &test_upload_list, node)
+               if (strncmp(name, tst->name, strlen(tst->name)) == 0)
+                       return tst;
+
+       return NULL;
+}
+
 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
                                 size_t size, loff_t *offset)
 {
@@ -198,6 +229,7 @@ static int __test_firmware_config_init(void)
        test_fw_config->req_firmware = request_firmware;
        test_fw_config->test_result = 0;
        test_fw_config->reqs = NULL;
+       test_fw_config->upload_name = NULL;
 
        return 0;
 
@@ -277,6 +309,13 @@ static ssize_t config_show(struct device *dev,
                        test_fw_config->sync_direct ? "true" : "false");
        len += scnprintf(buf + len, PAGE_SIZE - len,
                        "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
+       if (test_fw_config->upload_name)
+               len += scnprintf(buf + len, PAGE_SIZE - len,
+                               "upload_name:\t%s\n",
+                               test_fw_config->upload_name);
+       else
+               len += scnprintf(buf + len, PAGE_SIZE - len,
+                               "upload_name:\tEMTPY\n");
 
        mutex_unlock(&test_fw_mutex);
 
@@ -392,6 +431,32 @@ static ssize_t config_name_show(struct device *dev,
 }
 static DEVICE_ATTR_RW(config_name);
 
+static ssize_t config_upload_name_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct test_firmware_upload *tst;
+       int ret = count;
+
+       mutex_lock(&test_fw_mutex);
+       tst = upload_lookup_name(buf);
+       if (tst)
+               test_fw_config->upload_name = tst->name;
+       else
+               ret = -EINVAL;
+       mutex_unlock(&test_fw_mutex);
+
+       return ret;
+}
+
+static ssize_t config_upload_name_show(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       return config_test_show_str(buf, test_fw_config->upload_name);
+}
+static DEVICE_ATTR_RW(config_upload_name);
+
 static ssize_t config_num_requests_store(struct device *dev,
                                         struct device_attribute *attr,
                                         const char *buf, size_t count)
@@ -989,6 +1054,278 @@ out:
 }
 static DEVICE_ATTR_WO(trigger_batched_requests_async);
 
+static void upload_release(struct test_firmware_upload *tst)
+{
+       firmware_upload_unregister(tst->fwl);
+       kfree(tst->buf);
+       kfree(tst->name);
+       kfree(tst);
+}
+
+static void upload_release_all(void)
+{
+       struct test_firmware_upload *tst, *tmp;
+
+       list_for_each_entry_safe(tst, tmp, &test_upload_list, node) {
+               list_del(&tst->node);
+               upload_release(tst);
+       }
+       test_fw_config->upload_name = NULL;
+}
+
+/*
+ * This table is replicated from .../firmware_loader/sysfs_upload.c
+ * and needs to be kept in sync.
+ */
+static const char * const fw_upload_err_str[] = {
+       [FW_UPLOAD_ERR_NONE]         = "none",
+       [FW_UPLOAD_ERR_HW_ERROR]     = "hw-error",
+       [FW_UPLOAD_ERR_TIMEOUT]      = "timeout",
+       [FW_UPLOAD_ERR_CANCELED]     = "user-abort",
+       [FW_UPLOAD_ERR_BUSY]         = "device-busy",
+       [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+       [FW_UPLOAD_ERR_RW_ERROR]     = "read-write-error",
+       [FW_UPLOAD_ERR_WEAROUT]      = "flash-wearout",
+};
+
+static void upload_err_inject_error(struct test_firmware_upload *tst,
+                                   const u8 *p, const char *prog)
+{
+       enum fw_upload_err err;
+
+       for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) {
+               if (strncmp(p, fw_upload_err_str[err],
+                           strlen(fw_upload_err_str[err])) == 0) {
+                       tst->inject.prog = prog;
+                       tst->inject.err_code = err;
+                       return;
+               }
+       }
+}
+
+static void upload_err_inject_prog(struct test_firmware_upload *tst,
+                                  const u8 *p)
+{
+       static const char * const progs[] = {
+               "preparing:", "transferring:", "programming:"
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(progs); i++) {
+               if (strncmp(p, progs[i], strlen(progs[i])) == 0) {
+                       upload_err_inject_error(tst, p + strlen(progs[i]),
+                                               progs[i]);
+                       return;
+               }
+       }
+}
+
+#define FIVE_MINUTES_MS        (5 * 60 * 1000)
+static enum fw_upload_err
+fw_upload_wait_on_cancel(struct test_firmware_upload *tst)
+{
+       int ms_delay;
+
+       for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) {
+               msleep(100);
+               if (tst->cancel_request)
+                       return FW_UPLOAD_ERR_CANCELED;
+       }
+       return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl,
+                                                const u8 *data, u32 size)
+{
+       struct test_firmware_upload *tst = fwl->dd_handle;
+       enum fw_upload_err ret = FW_UPLOAD_ERR_NONE;
+       const char *progress = "preparing:";
+
+       tst->cancel_request = false;
+
+       if (!size || size > TEST_UPLOAD_MAX_SIZE) {
+               ret = FW_UPLOAD_ERR_INVALID_SIZE;
+               goto err_out;
+       }
+
+       if (strncmp(data, "inject:", strlen("inject:")) == 0)
+               upload_err_inject_prog(tst, data + strlen("inject:"));
+
+       memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE);
+       tst->size = size;
+
+       if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+           strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+               return FW_UPLOAD_ERR_NONE;
+
+       if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+               ret = fw_upload_wait_on_cancel(tst);
+       else
+               ret = tst->inject.err_code;
+
+err_out:
+       /*
+        * The cleanup op only executes if the prepare op succeeds.
+        * If the prepare op fails, it must do it's own clean-up.
+        */
+       tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+       tst->inject.prog = NULL;
+
+       return ret;
+}
+
+static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl,
+                                              const u8 *data, u32 offset,
+                                              u32 size, u32 *written)
+{
+       struct test_firmware_upload *tst = fwl->dd_handle;
+       const char *progress = "transferring:";
+       u32 blk_size;
+
+       if (tst->cancel_request)
+               return FW_UPLOAD_ERR_CANCELED;
+
+       blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size);
+       memcpy(tst->buf + offset, data + offset, blk_size);
+
+       *written = blk_size;
+
+       if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+           strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+               return FW_UPLOAD_ERR_NONE;
+
+       if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+               return fw_upload_wait_on_cancel(tst);
+
+       return tst->inject.err_code;
+}
+
+static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl)
+{
+       struct test_firmware_upload *tst = fwl->dd_handle;
+       const char *progress = "programming:";
+
+       if (tst->cancel_request)
+               return FW_UPLOAD_ERR_CANCELED;
+
+       if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+           strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+               return FW_UPLOAD_ERR_NONE;
+
+       if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+               return fw_upload_wait_on_cancel(tst);
+
+       return tst->inject.err_code;
+}
+
+static void test_fw_upload_cancel(struct fw_upload *fwl)
+{
+       struct test_firmware_upload *tst = fwl->dd_handle;
+
+       tst->cancel_request = true;
+}
+
+static void test_fw_cleanup(struct fw_upload *fwl)
+{
+       struct test_firmware_upload *tst = fwl->dd_handle;
+
+       tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+       tst->inject.prog = NULL;
+}
+
+static const struct fw_upload_ops upload_test_ops = {
+       .prepare = test_fw_upload_prepare,
+       .write = test_fw_upload_write,
+       .poll_complete = test_fw_upload_complete,
+       .cancel = test_fw_upload_cancel,
+       .cleanup = test_fw_cleanup
+};
+
+static ssize_t upload_register_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct test_firmware_upload *tst;
+       struct fw_upload *fwl;
+       char *name;
+       int ret;
+
+       name = kstrndup(buf, count, GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
+
+       mutex_lock(&test_fw_mutex);
+       tst = upload_lookup_name(name);
+       if (tst) {
+               ret = -EEXIST;
+               goto free_name;
+       }
+
+       tst = kzalloc(sizeof(*tst), GFP_KERNEL);
+       if (!tst) {
+               ret = -ENOMEM;
+               goto free_name;
+       }
+
+       tst->name = name;
+       tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL);
+       if (!tst->buf) {
+               ret = -ENOMEM;
+               goto free_tst;
+       }
+
+       fwl = firmware_upload_register(THIS_MODULE, dev, tst->name,
+                                      &upload_test_ops, tst);
+       if (IS_ERR(fwl)) {
+               ret = PTR_ERR(fwl);
+               goto free_buf;
+       }
+
+       tst->fwl = fwl;
+       list_add_tail(&tst->node, &test_upload_list);
+       mutex_unlock(&test_fw_mutex);
+       return count;
+
+free_buf:
+       kfree(tst->buf);
+
+free_tst:
+       kfree(tst);
+
+free_name:
+       mutex_unlock(&test_fw_mutex);
+       kfree(name);
+
+       return ret;
+}
+static DEVICE_ATTR_WO(upload_register);
+
+static ssize_t upload_unregister_store(struct device *dev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t count)
+{
+       struct test_firmware_upload *tst;
+       int ret = count;
+
+       mutex_lock(&test_fw_mutex);
+       tst = upload_lookup_name(buf);
+       if (!tst) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (test_fw_config->upload_name == tst->name)
+               test_fw_config->upload_name = NULL;
+
+       list_del(&tst->node);
+       upload_release(tst);
+
+out:
+       mutex_unlock(&test_fw_mutex);
+       return ret;
+}
+static DEVICE_ATTR_WO(upload_unregister);
+
 static ssize_t test_result_show(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
@@ -1051,6 +1388,45 @@ out:
 }
 static DEVICE_ATTR_RO(read_firmware);
 
+static ssize_t upload_read_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct test_firmware_upload *tst = NULL;
+       struct test_firmware_upload *tst_iter;
+       int ret = -EINVAL;
+
+       if (!test_fw_config->upload_name) {
+               pr_err("Set config_upload_name before using upload_read\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&test_fw_mutex);
+       list_for_each_entry(tst_iter, &test_upload_list, node)
+               if (tst_iter->name == test_fw_config->upload_name) {
+                       tst = tst_iter;
+                       break;
+               }
+
+       if (!tst) {
+               pr_err("Firmware name not found: %s\n",
+                      test_fw_config->upload_name);
+               goto out;
+       }
+
+       if (tst->size > PAGE_SIZE) {
+               pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
+               goto out;
+       }
+
+       memcpy(buf, tst->buf, tst->size);
+       ret = tst->size;
+out:
+       mutex_unlock(&test_fw_mutex);
+       return ret;
+}
+static DEVICE_ATTR_RO(upload_read);
+
 #define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
 
 static struct attribute *test_dev_attrs[] = {
@@ -1066,6 +1442,7 @@ static struct attribute *test_dev_attrs[] = {
        TEST_FW_DEV_ATTR(config_sync_direct),
        TEST_FW_DEV_ATTR(config_send_uevent),
        TEST_FW_DEV_ATTR(config_read_fw_idx),
+       TEST_FW_DEV_ATTR(config_upload_name),
 
        /* These don't use the config at all - they could be ported! */
        TEST_FW_DEV_ATTR(trigger_request),
@@ -1082,6 +1459,9 @@ static struct attribute *test_dev_attrs[] = {
        TEST_FW_DEV_ATTR(release_all_firmware),
        TEST_FW_DEV_ATTR(test_result),
        TEST_FW_DEV_ATTR(read_firmware),
+       TEST_FW_DEV_ATTR(upload_read),
+       TEST_FW_DEV_ATTR(upload_register),
+       TEST_FW_DEV_ATTR(upload_unregister),
        NULL,
 };
 
@@ -1128,6 +1508,7 @@ static void __exit test_firmware_exit(void)
        mutex_lock(&test_fw_mutex);
        release_firmware(test_firmware);
        misc_deregister(&test_fw_misc_device);
+       upload_release_all();
        __test_firmware_config_free();
        kfree(test_fw_config);
        mutex_unlock(&test_fw_mutex);
index a6d854d..a96788d 100644 (file)
@@ -1,8 +1,7 @@
-/* Test cases for siphash.c
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  *
- * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+ * Test cases for siphash.c
  *
  * SipHash: a fast short-input PRF
  * https://131002.net/siphash/
index 338f160..c76ee66 100644 (file)
@@ -215,4 +215,15 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
 }
 
 #endif
+
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FADVISE64_64)
+
+COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, compat_arg_u64_dual(offset),
+                      compat_arg_u64_dual(len), int, advice)
+{
+       return ksys_fadvise64_64(fd, compat_arg_u64_glue(offset),
+                                compat_arg_u64_glue(len), advice);
+}
+
+#endif
 #endif
index 7c468ac..a57e1be 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/cma.h>
 #include <linux/migrate.h>
 #include <linux/nospec.h>
+#include <linux/delayacct.h>
 
 #include <asm/page.h>
 #include <asm/pgalloc.h>
@@ -5230,6 +5231,8 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
        pte = huge_ptep_get(ptep);
        old_page = pte_page(pte);
 
+       delayacct_wpcopy_start();
+
 retry_avoidcopy:
        /*
         * If no-one else is actually using this page, we're the exclusive
@@ -5240,6 +5243,8 @@ retry_avoidcopy:
                        page_move_anon_rmap(old_page, vma);
                if (likely(!unshare))
                        set_huge_ptep_writable(vma, haddr, ptep);
+
+               delayacct_wpcopy_end();
                return 0;
        }
        VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
@@ -5309,6 +5314,7 @@ retry_avoidcopy:
                         * race occurs while re-acquiring page table
                         * lock, and our job is done.
                         */
+                       delayacct_wpcopy_end();
                        return 0;
                }
 
@@ -5367,6 +5373,8 @@ out_release_old:
        put_page(old_page);
 
        spin_lock(ptl); /* Caller expects lock to be held */
+
+       delayacct_wpcopy_end();
        return ret;
 }
 
index fcd9f78..1089ea8 100644 (file)
@@ -33,7 +33,7 @@ DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
 
 static enum vmemmap_optimize_mode vmemmap_optimize_mode =
-       IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON);
+       IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
 
 static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
 {
index 21dadf0..7a08914 100644 (file)
@@ -3090,6 +3090,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
        int page_copied = 0;
        struct mmu_notifier_range range;
 
+       delayacct_wpcopy_start();
+
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
 
@@ -3114,6 +3116,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                        put_page(new_page);
                        if (old_page)
                                put_page(old_page);
+
+                       delayacct_wpcopy_end();
                        return 0;
                }
        }
@@ -3220,12 +3224,16 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                        free_swap_cache(old_page);
                put_page(old_page);
        }
+
+       delayacct_wpcopy_end();
        return (page_copied && !unshare) ? VM_FAULT_WRITE : 0;
 oom_free_new:
        put_page(new_page);
 oom:
        if (old_page)
                put_page(old_page);
+
+       delayacct_wpcopy_end();
        return VM_FAULT_OOM;
 }
 
index 2b92e97..b870a65 100644 (file)
@@ -214,7 +214,7 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
 
        if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
                error = -EINVAL;
-               goto err_pfn_remap;
+               goto err_kasan;
        }
 
        mem_hotplug_begin();
index 8a70bca..3c6cf9e 100644 (file)
@@ -56,35 +56,6 @@ static int sysctl_panic_on_oom;
 static int sysctl_oom_kill_allocating_task;
 static int sysctl_oom_dump_tasks = 1;
 
-#ifdef CONFIG_SYSCTL
-static struct ctl_table vm_oom_kill_table[] = {
-       {
-               .procname       = "panic_on_oom",
-               .data           = &sysctl_panic_on_oom,
-               .maxlen         = sizeof(sysctl_panic_on_oom),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
-               .extra2         = SYSCTL_TWO,
-       },
-       {
-               .procname       = "oom_kill_allocating_task",
-               .data           = &sysctl_oom_kill_allocating_task,
-               .maxlen         = sizeof(sysctl_oom_kill_allocating_task),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "oom_dump_tasks",
-               .data           = &sysctl_oom_dump_tasks,
-               .maxlen         = sizeof(sysctl_oom_dump_tasks),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {}
-};
-#endif
-
 /*
  * Serializes oom killer invocations (out_of_memory()) from all contexts to
  * prevent from over eager oom killing (e.g. when the oom killer is invoked
@@ -729,6 +700,35 @@ static void queue_oom_reaper(struct task_struct *tsk)
        add_timer(&tsk->oom_reaper_timer);
 }
 
+#ifdef CONFIG_SYSCTL
+static struct ctl_table vm_oom_kill_table[] = {
+       {
+               .procname       = "panic_on_oom",
+               .data           = &sysctl_panic_on_oom,
+               .maxlen         = sizeof(sysctl_panic_on_oom),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_TWO,
+       },
+       {
+               .procname       = "oom_kill_allocating_task",
+               .data           = &sysctl_oom_kill_allocating_task,
+               .maxlen         = sizeof(sysctl_oom_kill_allocating_task),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "oom_dump_tasks",
+               .data           = &sysctl_oom_dump_tasks,
+               .maxlen         = sizeof(sysctl_oom_dump_tasks),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {}
+};
+#endif
+
 static int __init oom_init(void)
 {
        oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
index 6021f84..d200d41 100644 (file)
@@ -385,9 +385,9 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
                 * above do the rest. If migration is not possible, just fail.
                 */
                if (PageCompound(page)) {
-                       unsigned long nr_pages = compound_nr(page);
                        struct page *head = compound_head(page);
                        unsigned long head_pfn = page_to_pfn(head);
+                       unsigned long nr_pages = compound_nr(head);
 
                        if (head_pfn + nr_pages <= boundary_pfn) {
                                pfn = head_pfn + nr_pages;
index b78921b..415c39d 100644 (file)
@@ -749,6 +749,13 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
        return ksys_readahead(fd, offset, count);
 }
 
+#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD)
+COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count)
+{
+       return ksys_readahead(fd, compat_arg_u64_glue(offset), count);
+}
+#endif
+
 /**
  * readahead_expand - Expand a readahead request
  * @ractl: The request to be expanded
index da525bf..373d273 100644 (file)
@@ -2049,7 +2049,7 @@ static void __init init_cpu_node_state(void)
        int node;
 
        for_each_online_node(node) {
-               if (cpumask_weight(cpumask_of_node(node)) > 0)
+               if (!cpumask_empty(cpumask_of_node(node)))
                        node_set_state(node, N_CPU);
        }
 }
@@ -2081,7 +2081,7 @@ static int vmstat_cpu_dead(unsigned int cpu)
 
        refresh_zone_stat_thresholds();
        node_cpus = cpumask_of_node(node);
-       if (cpumask_weight(node_cpus) > 0)
+       if (!cpumask_empty(node_cpus))
                return 0;
 
        node_clear_state(node, N_CPU);
index 77883b6..833cd37 100644 (file)
@@ -279,13 +279,13 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
                                grant_ref_t ref;
 
                                ref = priv->rings[i].intf->ref[j];
-                               gnttab_end_foreign_access(ref, 0);
+                               gnttab_end_foreign_access(ref, NULL);
                        }
                        free_pages_exact(priv->rings[i].data.in,
                                   1UL << (priv->rings[i].intf->ring_order +
                                           XEN_PAGE_SHIFT));
                }
-               gnttab_end_foreign_access(priv->rings[i].ref, 0);
+               gnttab_end_foreign_access(priv->rings[i].ref, NULL);
                free_page((unsigned long)priv->rings[i].intf);
        }
        kfree(priv->rings);
@@ -353,10 +353,10 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
 out:
        if (bytes) {
                for (i--; i >= 0; i--)
-                       gnttab_end_foreign_access(ring->intf->ref[i], 0);
+                       gnttab_end_foreign_access(ring->intf->ref[i], NULL);
                free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
        }
-       gnttab_end_foreign_access(ring->ref, 0);
+       gnttab_end_foreign_access(ring->ref, NULL);
        free_page((unsigned long)ring->intf);
        return ret;
 }
index a5781cf..e6ae11c 100644 (file)
@@ -20,7 +20,7 @@ config NET_NS_REFCNT_TRACKER
 
 config DEBUG_NET
        bool "Add generic networking debug"
-       depends on DEBUG_KERNEL
+       depends on DEBUG_KERNEL && NET
        help
          Enable extra sanity checks in networking.
          This is mostly used by fuzzers, but is safe to select.
index 116481e..95393bb 100644 (file)
@@ -62,12 +62,12 @@ static void ax25_free_sock(struct sock *sk)
  */
 static void ax25_cb_del(ax25_cb *ax25)
 {
+       spin_lock_bh(&ax25_list_lock);
        if (!hlist_unhashed(&ax25->ax25_node)) {
-               spin_lock_bh(&ax25_list_lock);
                hlist_del_init(&ax25->ax25_node);
-               spin_unlock_bh(&ax25_list_lock);
                ax25_cb_put(ax25);
        }
+       spin_unlock_bh(&ax25_list_lock);
 }
 
 /*
@@ -81,6 +81,7 @@ static void ax25_kill_by_device(struct net_device *dev)
 
        if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
                return;
+       ax25_dev->device_up = false;
 
        spin_lock_bh(&ax25_list_lock);
 again:
@@ -91,6 +92,7 @@ again:
                                spin_unlock_bh(&ax25_list_lock);
                                ax25_disconnect(s, ENETUNREACH);
                                s->ax25_dev = NULL;
+                               ax25_cb_del(s);
                                spin_lock_bh(&ax25_list_lock);
                                goto again;
                        }
@@ -103,6 +105,7 @@ again:
                                dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
                                ax25_dev_put(ax25_dev);
                        }
+                       ax25_cb_del(s);
                        release_sock(sk);
                        spin_lock_bh(&ax25_list_lock);
                        sock_put(sk);
@@ -995,9 +998,11 @@ static int ax25_release(struct socket *sock)
        if (sk->sk_type == SOCK_SEQPACKET) {
                switch (ax25->state) {
                case AX25_STATE_0:
-                       release_sock(sk);
-                       ax25_disconnect(ax25, 0);
-                       lock_sock(sk);
+                       if (!sock_flag(ax25->sk, SOCK_DEAD)) {
+                               release_sock(sk);
+                               ax25_disconnect(ax25, 0);
+                               lock_sock(sk);
+                       }
                        ax25_destroy_socket(ax25);
                        break;
 
@@ -1053,11 +1058,13 @@ static int ax25_release(struct socket *sock)
                ax25_destroy_socket(ax25);
        }
        if (ax25_dev) {
-               del_timer_sync(&ax25->timer);
-               del_timer_sync(&ax25->t1timer);
-               del_timer_sync(&ax25->t2timer);
-               del_timer_sync(&ax25->t3timer);
-               del_timer_sync(&ax25->idletimer);
+               if (!ax25_dev->device_up) {
+                       del_timer_sync(&ax25->timer);
+                       del_timer_sync(&ax25->t1timer);
+                       del_timer_sync(&ax25->t2timer);
+                       del_timer_sync(&ax25->t3timer);
+                       del_timer_sync(&ax25->idletimer);
+               }
                dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
                ax25_dev_put(ax25_dev);
        }
index b80fccb..95a76d5 100644 (file)
@@ -62,6 +62,7 @@ void ax25_dev_device_up(struct net_device *dev)
        ax25_dev->dev     = dev;
        dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
        ax25_dev->forward = NULL;
+       ax25_dev->device_up = true;
 
        ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE;
        ax25_dev->values[AX25_VALUES_AXDEFMODE] = AX25_DEF_AXDEFMODE;
index 3a476e4..9ff98f4 100644 (file)
@@ -268,7 +268,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
                del_timer_sync(&ax25->t3timer);
                del_timer_sync(&ax25->idletimer);
        } else {
-               if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
+               if (ax25->sk && !sock_flag(ax25->sk, SOCK_DESTROY))
                        ax25_stop_heartbeat(ax25);
                ax25_stop_t1timer(ax25);
                ax25_stop_t2timer(ax25);
index 5abb2ca..59a5c13 100644 (file)
@@ -2153,7 +2153,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
 
        bacpy(&entry->bdaddr, bdaddr);
        entry->bdaddr_type = type;
-       bitmap_from_u64(entry->flags, flags);
+       entry->flags = flags;
 
        list_add(&entry->list, list);
 
@@ -2634,7 +2634,7 @@ int hci_register_dev(struct hci_dev *hdev)
         * callback.
         */
        if (hdev->wakeup)
-               set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
+               hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
 
        hci_sock_dev_event(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
index 635cc5f..38ecaf9 100644 (file)
@@ -482,7 +482,7 @@ static int add_to_accept_list(struct hci_request *req,
 
        /* During suspend, only wakeable devices can be in accept list */
        if (hdev->suspended &&
-           !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
+           !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
                return 0;
 
        *num_entries += 1;
index 4d2203c..286d676 100644 (file)
@@ -1637,7 +1637,7 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
         * indicates that LL Privacy has been enabled and
         * HCI_OP_LE_SET_PRIVACY_MODE is supported.
         */
-       if (!test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, params->flags))
+       if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
                return 0;
 
        irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
@@ -1666,7 +1666,7 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
 
        /* During suspend, only wakeable devices can be in acceptlist */
        if (hdev->suspended &&
-           !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
+           !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
                return 0;
 
        /* Select filter policy to accept all advertising */
@@ -4888,7 +4888,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev)
        hci_clear_event_filter_sync(hdev);
 
        list_for_each_entry(b, &hdev->accept_list, list) {
-               if (!test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, b->flags))
+               if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
                        continue;
 
                bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
index 74937a8..ae758ab 100644 (file)
@@ -4013,10 +4013,11 @@ static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
        memcpy(ev.uuid, rpa_resolution_uuid, 16);
        ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
 
+       // Do we need to be atomic with the conn_flags?
        if (enabled && privacy_mode_capable(hdev))
-               set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
+               hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
        else
-               clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
+               hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
 
        return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
                                  &ev, sizeof(ev),
@@ -4435,8 +4436,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_dev_lock(hdev);
 
-       bitmap_to_arr32(&supported_flags, hdev->conn_flags,
-                       __HCI_CONN_NUM_FLAGS);
+       supported_flags = hdev->conn_flags;
 
        memset(&rp, 0, sizeof(rp));
 
@@ -4447,8 +4447,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
                if (!br_params)
                        goto done;
 
-               bitmap_to_arr32(&current_flags, br_params->flags,
-                               __HCI_CONN_NUM_FLAGS);
+               current_flags = br_params->flags;
        } else {
                params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
                                                le_addr_type(cp->addr.type));
@@ -4456,8 +4455,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
                if (!params)
                        goto done;
 
-               bitmap_to_arr32(&current_flags, params->flags,
-                               __HCI_CONN_NUM_FLAGS);
+               current_flags = params->flags;
        }
 
        bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@@ -4502,8 +4500,8 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
                   &cp->addr.bdaddr, cp->addr.type,
                   __le32_to_cpu(current_flags));
 
-       bitmap_to_arr32(&supported_flags, hdev->conn_flags,
-                       __HCI_CONN_NUM_FLAGS);
+       // We should take hci_dev_lock() early, I think.. conn_flags can change
+       supported_flags = hdev->conn_flags;
 
        if ((supported_flags | current_flags) != supported_flags) {
                bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
@@ -4519,7 +4517,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
                                                              cp->addr.type);
 
                if (br_params) {
-                       bitmap_from_u64(br_params->flags, current_flags);
+                       br_params->flags = current_flags;
                        status = MGMT_STATUS_SUCCESS;
                } else {
                        bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
@@ -4529,15 +4527,11 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
                params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
                                                le_addr_type(cp->addr.type));
                if (params) {
-                       DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
-
-                       bitmap_from_u64(flags, current_flags);
-
                        /* Devices using RPAs can only be programmed in the
                         * acceptlist LL Privacy has been enable otherwise they
                         * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
                         */
-                       if (test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, flags) &&
+                       if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
                            !use_ll_privacy(hdev) &&
                            hci_find_irk_by_addr(hdev, &params->addr,
                                                 params->addr_type)) {
@@ -4546,14 +4540,13 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
                                goto unlock;
                        }
 
-                       bitmap_from_u64(params->flags, current_flags);
+                       params->flags = current_flags;
                        status = MGMT_STATUS_SUCCESS;
 
                        /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
                         * has been set.
                         */
-                       if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
-                                    params->flags))
+                       if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
                                hci_update_passive_scan(hdev);
                } else {
                        bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
@@ -7154,8 +7147,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
                params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
                                                addr_type);
                if (params)
-                       bitmap_to_arr32(&current_flags, params->flags,
-                                       __HCI_CONN_NUM_FLAGS);
+                       current_flags = params->flags;
        }
 
        err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
@@ -7164,8 +7156,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
 
 added:
        device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
-       bitmap_to_arr32(&supported_flags, hdev->conn_flags,
-                       __HCI_CONN_NUM_FLAGS);
+       supported_flags = hdev->conn_flags;
        device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
                             supported_flags, current_flags);
 
index 7057f8d..1daf95e 100644 (file)
@@ -906,7 +906,6 @@ int crush_do_rule(const struct crush_map *map,
        int recurse_to_leaf;
        int wsize = 0;
        int osize;
-       int *tmp;
        const struct crush_rule *rule;
        __u32 step;
        int i, j;
@@ -1073,9 +1072,7 @@ int crush_do_rule(const struct crush_map *map,
                                memcpy(o, c, osize*sizeof(*o));
 
                        /* swap o and w arrays */
-                       tmp = o;
-                       o = w;
-                       w = tmp;
+                       swap(o, w);
                        wsize = osize;
                        break;
 
index 47b6c1f..5462528 100644 (file)
@@ -1579,7 +1579,7 @@ static void neigh_managed_work(struct work_struct *work)
        list_for_each_entry(neigh, &tbl->managed_list, managed_list)
                neigh_event_send_probe(neigh, NULL, false);
        queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
-                          NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME));
+                          max(NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME), HZ));
        write_unlock_bh(&tbl->lock);
 }
 
index 3231af7..2e2a9ec 100644 (file)
@@ -2706,12 +2706,15 @@ static void tcp_mtup_probe_success(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       u64 val;
 
-       /* FIXME: breaks with very large cwnd */
        tp->prior_ssthresh = tcp_current_ssthresh(sk);
-       tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) *
-                            tcp_mss_to_mtu(sk, tp->mss_cache) /
-                            icsk->icsk_mtup.probe_size);
+
+       val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache);
+       do_div(val, icsk->icsk_mtup.probe_size);
+       DEBUG_NET_WARN_ON_ONCE((u32)val != val);
+       tcp_snd_cwnd_set(tp, max_t(u32, 1U, val));
+
        tp->snd_cwnd_cnt = 0;
        tp->snd_cwnd_stamp = tcp_jiffies32;
        tp->snd_ssthresh = tcp_current_ssthresh(sk);
index dac2650..fe8f23b 100644 (file)
@@ -1207,8 +1207,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        key->l3index = l3index;
        key->flags = flags;
        memcpy(&key->addr, addr,
-              (family == AF_INET6) ? sizeof(struct in6_addr) :
-                                     sizeof(struct in_addr));
+              (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
+                                                                sizeof(struct in_addr));
        hlist_add_head_rcu(&key->node, &md5sig->head);
        return 0;
 }
index b4b2284..1c05443 100644 (file)
@@ -4115,8 +4115,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
        res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
                                  NULL);
        if (!res) {
-               __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
                if (unlikely(tcp_passive_fastopen(sk)))
                        tcp_sk(sk)->total_retrans++;
                trace_tcp_retransmit_synack(sk, req);
index ca0aa74..1b19325 100644 (file)
@@ -5586,7 +5586,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
        array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
        array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
-       array[DEVCONF_ACCEPT_UNSOLICITED_NA] = cnf->accept_unsolicited_na;
+       array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -7038,8 +7038,8 @@ static const struct ctl_table addrconf_sysctl[] = {
                .extra2         = (void *)SYSCTL_ONE,
        },
        {
-               .procname       = "accept_unsolicited_na",
-               .data           = &ipv6_devconf.accept_unsolicited_na,
+               .procname       = "accept_untracked_na",
+               .data           = &ipv6_devconf.accept_untracked_na,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
index 254adda..b0dfe97 100644 (file)
@@ -979,7 +979,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
        struct inet6_dev *idev = __in6_dev_get(dev);
        struct inet6_ifaddr *ifp;
        struct neighbour *neigh;
-       bool create_neigh;
+       u8 new_state;
 
        if (skb->len < sizeof(struct nd_msg)) {
                ND_PRINTK(2, warn, "NA: packet too short\n");
@@ -1000,7 +1000,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
        /* For some 802.11 wireless deployments (and possibly other networks),
         * there will be a NA proxy and unsolicitd packets are attacks
         * and thus should not be accepted.
-        * drop_unsolicited_na takes precedence over accept_unsolicited_na
+        * drop_unsolicited_na takes precedence over accept_untracked_na
         */
        if (!msg->icmph.icmp6_solicited && idev &&
            idev->cnf.drop_unsolicited_na)
@@ -1041,25 +1041,33 @@ static void ndisc_recv_na(struct sk_buff *skb)
                in6_ifa_put(ifp);
                return;
        }
+
+       neigh = neigh_lookup(&nd_tbl, &msg->target, dev);
+
        /* RFC 9131 updates original Neighbour Discovery RFC 4861.
-        * An unsolicited NA can now create a neighbour cache entry
-        * on routers if it has Target LL Address option.
+        * NAs with Target LL Address option without a corresponding
+        * entry in the neighbour cache can now create a STALE neighbour
+        * cache entry on routers.
+        *
+        *   entry accept  fwding  solicited        behaviour
+        * ------- ------  ------  ---------    ----------------------
+        * present      X       X         0     Set state to STALE
+        * present      X       X         1     Set state to REACHABLE
+        *  absent      0       X         X     Do nothing
+        *  absent      1       0         X     Do nothing
+        *  absent      1       1         X     Add a new STALE entry
         *
-        * drop   accept  fwding                   behaviour
-        * ----   ------  ------  ----------------------------------------------
-        *    1        X       X  Drop NA packet and don't pass up the stack
-        *    0        0       X  Pass NA packet up the stack, don't update NC
-        *    0        1       0  Pass NA packet up the stack, don't update NC
-        *    0        1       1  Pass NA packet up the stack, and add a STALE
-        *                          NC entry
         * Note that we don't do a (daddr == all-routers-mcast) check.
         */
-       create_neigh = !msg->icmph.icmp6_solicited && lladdr &&
-                      idev && idev->cnf.forwarding &&
-                      idev->cnf.accept_unsolicited_na;
-       neigh = __neigh_lookup(&nd_tbl, &msg->target, dev, create_neigh);
+       new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
+       if (!neigh && lladdr &&
+           idev && idev->cnf.forwarding &&
+           idev->cnf.accept_untracked_na) {
+               neigh = neigh_create(&nd_tbl, &msg->target, dev);
+               new_state = NUD_STALE;
+       }
 
-       if (neigh) {
+       if (neigh && !IS_ERR(neigh)) {
                u8 old_flags = neigh->flags;
                struct net *net = dev_net(dev);
 
@@ -1079,7 +1087,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
                }
 
                ndisc_update(dev, neigh, lladdr,
-                            msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE,
+                            new_state,
                             NEIGH_UPDATE_F_WEAK_OVERRIDE|
                             (msg->icmph.icmp6_override ? NEIGH_UPDATE_F_OVERRIDE : 0)|
                             NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
index ff033d1..ecf3a55 100644 (file)
@@ -101,6 +101,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        ipc6.sockc.tsflags = sk->sk_tsflags;
        ipc6.sockc.mark = sk->sk_mark;
 
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_oif = oif;
+
        if (msg->msg_controllen) {
                struct ipv6_txoptions opt = {};
 
@@ -112,17 +115,14 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        return err;
 
                /* Changes to txoptions and flow info are not implemented, yet.
-                * Drop the options, fl6 is wiped below.
+                * Drop the options.
                 */
                ipc6.opt = NULL;
        }
 
-       memset(&fl6, 0, sizeof(fl6));
-
        fl6.flowi6_proto = IPPROTO_ICMPV6;
        fl6.saddr = np->saddr;
        fl6.daddr = *daddr;
-       fl6.flowi6_oif = oif;
        fl6.flowi6_mark = ipc6.sockc.mark;
        fl6.flowi6_uid = sk->sk_uid;
        fl6.fl6_icmp_type = user_icmph.icmp6_type;
index 11e1a3a..fb16d7c 100644 (file)
@@ -2826,10 +2826,12 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
        void *ext_hdrs[SADB_EXT_MAX];
        int err;
 
-       err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
-                             BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
-       if (err)
-               return err;
+       /* Non-zero return value of pfkey_broadcast() does not always signal
+        * an error and even on an actual error we may still want to process
+        * the message so rather ignore the return value.
+        */
+       pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+                       BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
 
        memset(ext_hdrs, 0, sizeof(ext_hdrs));
        err = parse_exthdrs(skb, hdr, ext_hdrs);
index e345244..d8246e0 100644 (file)
@@ -1749,12 +1749,9 @@ int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
 
        if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) {
                if (old_ctx)
-                       err = ieee80211_vif_use_reserved_reassign(sdata);
-               else
-                       err = ieee80211_vif_use_reserved_assign(sdata);
+                       return ieee80211_vif_use_reserved_reassign(sdata);
 
-               if (err)
-                       return err;
+               return ieee80211_vif_use_reserved_assign(sdata);
        }
 
        /*
index 12fc9cd..746be13 100644 (file)
@@ -222,12 +222,18 @@ err_register:
 }
 
 static void nft_netdev_unregister_hooks(struct net *net,
-                                       struct list_head *hook_list)
+                                       struct list_head *hook_list,
+                                       bool release_netdev)
 {
-       struct nft_hook *hook;
+       struct nft_hook *hook, *next;
 
-       list_for_each_entry(hook, hook_list, list)
+       list_for_each_entry_safe(hook, next, hook_list, list) {
                nf_unregister_net_hook(net, &hook->ops);
+               if (release_netdev) {
+                       list_del(&hook->list);
+                       kfree_rcu(hook, rcu);
+               }
+       }
 }
 
 static int nf_tables_register_hook(struct net *net,
@@ -253,9 +259,10 @@ static int nf_tables_register_hook(struct net *net,
        return nf_register_net_hook(net, &basechain->ops);
 }
 
-static void nf_tables_unregister_hook(struct net *net,
-                                     const struct nft_table *table,
-                                     struct nft_chain *chain)
+static void __nf_tables_unregister_hook(struct net *net,
+                                       const struct nft_table *table,
+                                       struct nft_chain *chain,
+                                       bool release_netdev)
 {
        struct nft_base_chain *basechain;
        const struct nf_hook_ops *ops;
@@ -270,11 +277,19 @@ static void nf_tables_unregister_hook(struct net *net,
                return basechain->type->ops_unregister(net, ops);
 
        if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
-               nft_netdev_unregister_hooks(net, &basechain->hook_list);
+               nft_netdev_unregister_hooks(net, &basechain->hook_list,
+                                           release_netdev);
        else
                nf_unregister_net_hook(net, &basechain->ops);
 }
 
+static void nf_tables_unregister_hook(struct net *net,
+                                     const struct nft_table *table,
+                                     struct nft_chain *chain)
+{
+       return __nf_tables_unregister_hook(net, table, chain, false);
+}
+
 static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
@@ -2873,27 +2888,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
 
        err = nf_tables_expr_parse(ctx, nla, &expr_info);
        if (err < 0)
-               goto err1;
+               goto err_expr_parse;
+
+       err = -EOPNOTSUPP;
+       if (!(expr_info.ops->type->flags & NFT_EXPR_STATEFUL))
+               goto err_expr_stateful;
 
        err = -ENOMEM;
        expr = kzalloc(expr_info.ops->size, GFP_KERNEL_ACCOUNT);
        if (expr == NULL)
-               goto err2;
+               goto err_expr_stateful;
 
        err = nf_tables_newexpr(ctx, &expr_info, expr);
        if (err < 0)
-               goto err3;
+               goto err_expr_new;
 
        return expr;
-err3:
+err_expr_new:
        kfree(expr);
-err2:
+err_expr_stateful:
        owner = expr_info.ops->type->owner;
        if (expr_info.ops->type->release_ops)
                expr_info.ops->type->release_ops(expr_info.ops);
 
        module_put(owner);
-err1:
+err_expr_parse:
        return ERR_PTR(err);
 }
 
@@ -4242,6 +4261,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
        u32 len;
        int err;
 
+       if (desc->field_count >= ARRAY_SIZE(desc->field_len))
+               return -E2BIG;
+
        err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
                                          nft_concat_policy, NULL);
        if (err < 0)
@@ -4251,9 +4273,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
                return -EINVAL;
 
        len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
-
-       if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
-               return -E2BIG;
+       if (!len || len > U8_MAX)
+               return -EINVAL;
 
        desc->field_len[desc->field_count++] = len;
 
@@ -4264,7 +4285,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
                               const struct nlattr *nla)
 {
        struct nlattr *attr;
-       int rem, err;
+       u32 num_regs = 0;
+       int rem, err, i;
 
        nla_for_each_nested(attr, nla, rem) {
                if (nla_type(attr) != NFTA_LIST_ELEM)
@@ -4275,6 +4297,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
                        return err;
        }
 
+       for (i = 0; i < desc->field_count; i++)
+               num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
+
+       if (num_regs > NFT_REG32_COUNT)
+               return -E2BIG;
+
        return 0;
 }
 
@@ -5344,8 +5372,10 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
                err = nft_get_set_elem(&ctx, set, attr);
-               if (err < 0)
+               if (err < 0) {
+                       NL_SET_BAD_ATTR(extack, attr);
                        break;
+               }
        }
 
        return err;
@@ -5413,9 +5443,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
                return expr;
 
        err = -EOPNOTSUPP;
-       if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL))
-               goto err_set_elem_expr;
-
        if (expr->ops->type->flags & NFT_EXPR_GC) {
                if (set->flags & NFT_SET_TIMEOUT)
                        goto err_set_elem_expr;
@@ -6125,8 +6152,10 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
                err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags);
-               if (err < 0)
+               if (err < 0) {
+                       NL_SET_BAD_ATTR(extack, attr);
                        return err;
+               }
        }
 
        if (nft_net->validate_state == NFT_VALIDATE_DO)
@@ -6396,8 +6425,10 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
                err = nft_del_setelem(&ctx, set, attr);
-               if (err < 0)
+               if (err < 0) {
+                       NL_SET_BAD_ATTR(extack, attr);
                        break;
+               }
        }
        return err;
 }
@@ -7291,13 +7322,25 @@ static void nft_unregister_flowtable_hook(struct net *net,
                                    FLOW_BLOCK_UNBIND);
 }
 
-static void nft_unregister_flowtable_net_hooks(struct net *net,
-                                              struct list_head *hook_list)
+static void __nft_unregister_flowtable_net_hooks(struct net *net,
+                                                struct list_head *hook_list,
+                                                bool release_netdev)
 {
-       struct nft_hook *hook;
+       struct nft_hook *hook, *next;
 
-       list_for_each_entry(hook, hook_list, list)
+       list_for_each_entry_safe(hook, next, hook_list, list) {
                nf_unregister_net_hook(net, &hook->ops);
+               if (release_netdev) {
+                       list_del(&hook->list);
+                       kfree_rcu(hook);
+               }
+       }
+}
+
+static void nft_unregister_flowtable_net_hooks(struct net *net,
+                                              struct list_head *hook_list)
+{
+       __nft_unregister_flowtable_net_hooks(net, hook_list, false);
 }
 
 static int nft_register_flowtable_net_hooks(struct net *net,
@@ -9739,9 +9782,10 @@ static void __nft_release_hook(struct net *net, struct nft_table *table)
        struct nft_chain *chain;
 
        list_for_each_entry(chain, &table->chains, list)
-               nf_tables_unregister_hook(net, table, chain);
+               __nf_tables_unregister_hook(net, table, chain, true);
        list_for_each_entry(flowtable, &table->flowtables, list)
-               nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
+               __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
+                                                    true);
 }
 
 static void __nft_release_hooks(struct net *net)
@@ -9880,7 +9924,11 @@ static int __net_init nf_tables_init_net(struct net *net)
 
 static void __net_exit nf_tables_pre_exit_net(struct net *net)
 {
+       struct nftables_pernet *nft_net = nft_pernet(net);
+
+       mutex_lock(&nft_net->commit_mutex);
        __nft_release_hooks(net);
+       mutex_unlock(&nft_net->commit_mutex);
 }
 
 static void __net_exit nf_tables_exit_net(struct net *net)
index ad3bbe3..2f7c477 100644 (file)
@@ -45,7 +45,6 @@ MODULE_DESCRIPTION("Netfilter messages via netlink socket");
 static unsigned int nfnetlink_pernet_id __read_mostly;
 
 struct nfnl_net {
-       unsigned int ctnetlink_listeners;
        struct sock *nfnl;
 };
 
@@ -673,18 +672,8 @@ static int nfnetlink_bind(struct net *net, int group)
 
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
        if (type == NFNL_SUBSYS_CTNETLINK) {
-               struct nfnl_net *nfnlnet = nfnl_pernet(net);
-
                nfnl_lock(NFNL_SUBSYS_CTNETLINK);
-
-               if (WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == UINT_MAX)) {
-                       nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
-                       return -EOVERFLOW;
-               }
-
-               nfnlnet->ctnetlink_listeners++;
-               if (nfnlnet->ctnetlink_listeners == 1)
-                       WRITE_ONCE(net->ct.ctnetlink_has_listener, true);
+               WRITE_ONCE(net->ct.ctnetlink_has_listener, true);
                nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
        }
 #endif
@@ -694,15 +683,12 @@ static int nfnetlink_bind(struct net *net, int group)
 static void nfnetlink_unbind(struct net *net, int group)
 {
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
-       int type = nfnl_group2type[group];
-
-       if (type == NFNL_SUBSYS_CTNETLINK) {
-               struct nfnl_net *nfnlnet = nfnl_pernet(net);
+       if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
+               return;
 
+       if (nfnl_group2type[group] == NFNL_SUBSYS_CTNETLINK) {
                nfnl_lock(NFNL_SUBSYS_CTNETLINK);
-               WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == 0);
-               nfnlnet->ctnetlink_listeners--;
-               if (nfnlnet->ctnetlink_listeners == 0)
+               if (!nfnetlink_has_listeners(net, group))
                        WRITE_ONCE(net->ct.ctnetlink_has_listener, false);
                nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
        }
index f069c24..af15102 100644 (file)
@@ -35,12 +35,13 @@ static unsigned int nfct_timeout_id __read_mostly;
 
 struct ctnl_timeout {
        struct list_head        head;
+       struct list_head        free_head;
        struct rcu_head         rcu_head;
        refcount_t              refcnt;
        char                    name[CTNL_TIMEOUT_NAME_MAX];
-       struct nf_ct_timeout    timeout;
 
-       struct list_head        free_head;
+       /* must be at the end */
+       struct nf_ct_timeout    timeout;
 };
 
 struct nfct_timeout_pernet {
index a16cf47..a25c88b 100644 (file)
@@ -232,19 +232,21 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
        switch (nft_pf(pkt)) {
        case NFPROTO_IPV4:
                fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
-               fl.u.ip4.saddr = ct->tuplehash[dir].tuple.dst.u3.ip;
+               fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
                fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
                fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
                fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos);
                fl.u.ip4.flowi4_mark = pkt->skb->mark;
+               fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
                break;
        case NFPROTO_IPV6:
                fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
-               fl.u.ip6.saddr = ct->tuplehash[dir].tuple.dst.u3.in6;
+               fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.src.u3.in6;
                fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
                fl.u.ip6.flowi6_iif = this_dst->dev->ifindex;
                fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
                fl.u.ip6.flowi6_mark = pkt->skb->mark;
+               fl.u.ip6.flowi6_flags = FLOWI_FLAG_ANYSRC;
                break;
        }
 
index 04ea8b9..981addb 100644 (file)
@@ -213,6 +213,8 @@ static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src
        struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
        struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
 
+       priv_dst->cost = priv_src->cost;
+
        return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
 }
 
index 6ff3e10..eb2c095 100644 (file)
@@ -975,7 +975,7 @@ static void nfc_release(struct device *d)
                        kfree(se);
        }
 
-       ida_simple_remove(&nfc_index_ida, dev->idx);
+       ida_free(&nfc_index_ida, dev->idx);
 
        kfree(dev);
 }
@@ -1066,7 +1066,7 @@ struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
        if (!dev)
                return NULL;
 
-       rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+       rc = ida_alloc(&nfc_index_ida, GFP_KERNEL);
        if (rc < 0)
                goto err_free_dev;
        dev->idx = rc;
index 677f9cf..ca6e92a 100644 (file)
@@ -1935,8 +1935,10 @@ static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
        /* Move network header to the right position for VLAN tagged packets */
        if (likely(skb->dev->type == ARPHRD_ETHER) &&
            eth_type_vlan(skb->protocol) &&
-           __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
-               skb_set_network_header(skb, depth);
+           __vlan_get_protocol(skb, skb->protocol, &depth) != 0) {
+               if (pskb_may_pull(skb, depth))
+                       skb_set_network_header(skb, depth);
+       }
 
        skb_probe_transport_header(skb);
 }
index 8af9d6e..e013253 100644 (file)
@@ -548,7 +548,7 @@ tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
                break;
 #endif
        default:
-               return -1;
+               return false;
        }
 
        if (ip6h->hop_limit <= 1)
index a201bf2..433bb5a 100644 (file)
@@ -2161,6 +2161,7 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
 
 not_found:
        ini->smcr_version &= ~SMC_V2;
+       ini->smcrv2.ib_dev_v2 = NULL;
        ini->check_smcrv2 = false;
 }
 
index 5c731f2..53f63bf 100644 (file)
@@ -82,7 +82,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
                /* abnormal termination */
                if (!rc)
                        smc_wr_tx_put_slot(link,
-                                          (struct smc_wr_tx_pend_priv *)pend);
+                                          (struct smc_wr_tx_pend_priv *)(*pend));
                rc = -EPIPE;
        }
        return rc;
index 281ddb8..190a4de 100644 (file)
@@ -1121,6 +1121,7 @@ static bool
 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 {
+       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
        struct xdr_stream *xdr = &rep->rr_stream;
        __be32 *p;
 
@@ -1144,6 +1145,10 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
        if (*p != cpu_to_be32(RPC_CALL))
                return false;
 
+       /* No bc service. */
+       if (xprt->bc_serv == NULL)
+               return false;
+
        /* Now that we are sure this is a backchannel call,
         * advance to the RPC header.
         */
index 6d39ca0..932c87b 100644 (file)
@@ -259,9 +259,8 @@ static int tipc_enable_bearer(struct net *net, const char *name,
        u32 i;
 
        if (!bearer_name_validate(name, &b_names)) {
-               errstr = "illegal name";
                NL_SET_ERR_MSG(extack, "Illegal name");
-               goto rejected;
+               return res;
        }
 
        if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
index d4935b3..555ab35 100644 (file)
@@ -273,6 +273,7 @@ static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
  */
 static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
 {
+       bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
        struct dst_entry *dst = skb_dst(skb);
        struct iphdr *top_iph;
        int flags;
@@ -303,7 +304,7 @@ static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
        if (flags & XFRM_STATE_NOECN)
                IP_ECN_clear(top_iph);
 
-       top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
+       top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
                0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
 
        top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
index 3514c21..ece44b7 100644 (file)
@@ -16,6 +16,10 @@ pound := \#
 dot-target = $(dir $@).$(notdir $@)
 
 ###
+# Name of target with a '.tmp_' as filename prefix. foo/bar.o => foo/.tmp_bar.o
+tmp-target = $(dir $@).tmp_$(notdir $@)
+
+###
 # The temporary file to save gcc -MMD generated dependencies must not
 # contain a comma
 depfile = $(subst $(comma),_,$(dot-target).d)
@@ -138,9 +142,11 @@ check-FORCE = $(if $(filter FORCE, $^),,$(warning FORCE prerequisite is missing)
 if-changed-cond = $(newer-prereqs)$(cmd-check)$(check-FORCE)
 
 # Execute command if command has changed or prerequisite(s) are updated.
-if_changed = $(if $(if-changed-cond),                                        \
+if_changed = $(if $(if-changed-cond),$(cmd_and_savecmd),@:)
+
+cmd_and_savecmd =                                                            \
        $(cmd);                                                              \
-       printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
+       printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd
 
 # Execute the command and also postprocess generated .d dependencies file.
 if_changed_dep = $(if $(if-changed-cond),$(cmd_and_fixdep),@:)
index 0640050..1f01ac6 100644 (file)
@@ -88,10 +88,6 @@ endif
 targets-for-modules := $(foreach x, o mod $(if $(CONFIG_TRIM_UNUSED_KSYMS), usyms), \
                                $(patsubst %.o, %.$x, $(filter %.o, $(obj-m))))
 
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-targets-for-modules += $(patsubst %.o, %.prelink.o, $(filter %.o, $(obj-m)))
-endif
-
 ifdef need-modorder
 targets-for-modules += $(obj)/modules.order
 endif
@@ -152,8 +148,18 @@ $(obj)/%.ll: $(src)/%.c FORCE
 # The C file is compiled and updated dependency information is generated.
 # (See cmd_cc_o_c + relevant part of rule_cc_o_c)
 
+is-single-obj-m = $(and $(part-of-module),$(filter $@, $(obj-m)),y)
+
+# When a module consists of a single object, there is no reason to keep LLVM IR.
+# Make $(LD) covert LLVM IR to ELF here.
+ifdef CONFIG_LTO_CLANG
+cmd_ld_single_m = $(if $(is-single-obj-m), ; $(LD) $(ld_flags) -r -o $(tmp-target) $@; mv $(tmp-target) $@)
+endif
+
 quiet_cmd_cc_o_c = CC $(quiet_modtag)  $@
-      cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< $(cmd_objtool)
+      cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< \
+               $(cmd_ld_single_m) \
+               $(cmd_objtool)
 
 ifdef CONFIG_MODVERSIONS
 # When module versioning is enabled the following steps are executed:
@@ -204,54 +210,25 @@ cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),
        $(sub_cmd_record_mcount))
 endif # CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
 
-ifdef CONFIG_OBJTOOL
-
-objtool := $(objtree)/tools/objtool/objtool
-
-objtool_args =                                                         \
-       $(if $(CONFIG_HAVE_JUMP_LABEL_HACK), --hacks=jump_label)        \
-       $(if $(CONFIG_HAVE_NOINSTR_HACK), --hacks=noinstr)              \
-       $(if $(CONFIG_X86_KERNEL_IBT), --ibt)                           \
-       $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount)             \
-       $(if $(CONFIG_UNWINDER_ORC), --orc)                             \
-       $(if $(CONFIG_RETPOLINE), --retpoline)                          \
-       $(if $(CONFIG_SLS), --sls)                                      \
-       $(if $(CONFIG_STACK_VALIDATION), --stackval)                    \
-       $(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call)          \
-       --uaccess                                                       \
-       $(if $(linked-object), --link)                                  \
-       $(if $(part-of-module), --module)                               \
-       $(if $(CONFIG_GCOV_KERNEL), --no-unreachable)
-
-cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool_args) $@)
-cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
-
-endif # CONFIG_OBJTOOL
-
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-
-# Skip objtool for LLVM bitcode
-$(obj)/%.o: objtool-enabled :=
-
-else
-
 # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
 # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
 # 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
 
-$(obj)/%.o: objtool-enabled = $(if $(filter-out y%, \
-       $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n),y)
+is-standard-object = $(if $(filter-out y%, $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n),y)
 
-endif
+$(obj)/%.o: objtool-enabled = $(if $(is-standard-object),$(if $(delay-objtool),$(is-single-obj-m),y))
 
 ifdef CONFIG_TRIM_UNUSED_KSYMS
 cmd_gen_ksymdeps = \
        $(CONFIG_SHELL) $(srctree)/scripts/gen_ksymdeps.sh $@ >> $(dot-target).cmd
 endif
 
+cmd_check_local_export = $(srctree)/scripts/check-local-export $@
+
 define rule_cc_o_c
        $(call cmd_and_fixdep,cc_o_c)
        $(call cmd,gen_ksymdeps)
+       $(call cmd,check_local_export)
        $(call cmd,checksrc)
        $(call cmd,checkdoc)
        $(call cmd,gen_objtooldep)
@@ -262,6 +239,7 @@ endef
 define rule_as_o_S
        $(call cmd_and_fixdep,as_o_S)
        $(call cmd,gen_ksymdeps)
+       $(call cmd,check_local_export)
        $(call cmd,gen_objtooldep)
        $(call cmd,gen_symversions_S)
 endef
@@ -271,27 +249,10 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
        $(call if_changed_rule,cc_o_c)
        $(call cmd,force_checksrc)
 
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-# Module .o files may contain LLVM bitcode, compile them into native code
-# before ELF processing
-quiet_cmd_cc_prelink_modules = LD [M]  $@
-      cmd_cc_prelink_modules =                                         \
-       $(LD) $(ld_flags) -r -o $@                                      \
-               --whole-archive $(filter-out FORCE,$^)                  \
-               $(cmd_objtool)
-
-# objtool was skipped for LLVM bitcode, run it now that we have compiled
-# modules into native code
-$(obj)/%.prelink.o: objtool-enabled = y
-$(obj)/%.prelink.o: part-of-module := y
-$(obj)/%.prelink.o: linked-object := y
-
-$(obj)/%.prelink.o: $(obj)/%.o FORCE
-       $(call if_changed,cc_prelink_modules)
-endif
-
-cmd_mod = echo $(addprefix $(obj)/, $(call real-search, $*.o, .o, -objs -y -m)) | \
-       $(AWK) -v RS='( |\n)' '!x[$$0]++' > $@
+# To make this rule robust against "Argument list too long" error,
+# ensure to add $(obj)/ prefix by a shell command.
+cmd_mod = echo $(call real-search, $*.o, .o, -objs -y -m) | \
+       $(AWK) -v RS='( |\n)' '!x[$$0]++ { print("$(obj)/"$$0) }' > $@
 
 $(obj)/%.mod: FORCE
        $(call if_changed,mod)
@@ -299,7 +260,7 @@ $(obj)/%.mod: FORCE
 # List module undefined symbols
 cmd_undefined_syms = $(NM) $< | sed -n 's/^  *U //p' > $@
 
-$(obj)/%.usyms: $(obj)/%$(mod-prelink-ext).o FORCE
+$(obj)/%.usyms: $(obj)/%.o FORCE
        $(call if_changed,undefined_syms)
 
 quiet_cmd_cc_lst_c = MKLST   $@
@@ -392,9 +353,14 @@ $(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ;
 #
 # Rule to compile a set of .o files into one .a file (without symbol table)
 #
+# To make this rule robust against "Argument list too long" error,
+# remove $(obj)/ prefix, and restore it by a shell command.
 
 quiet_cmd_ar_builtin = AR      $@
-      cmd_ar_builtin = rm -f $@; $(AR) cDPrST $@ $(real-prereqs)
+      cmd_ar_builtin = rm -f $@; \
+               echo $(patsubst $(obj)/%,%,$(real-prereqs)) | \
+               sed -E 's:([^ ]+):$(obj)/\1:g' | \
+               xargs $(AR) cDPrST $@
 
 $(obj)/built-in.a: $(real-obj-y) FORCE
        $(call if_changed,ar_builtin)
@@ -421,18 +387,18 @@ $(obj)/modules.order: $(obj-m) FORCE
 $(obj)/lib.a: $(lib-y) FORCE
        $(call if_changed,ar)
 
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-quiet_cmd_link_multi-m = AR [M]  $@
-cmd_link_multi-m =                                             \
-       rm -f $@;                                               \
-       $(AR) cDPrsT $@ @$(patsubst %.o,%.mod,$@)
-else
-quiet_cmd_link_multi-m = LD [M]  $@
-      cmd_link_multi-m = $(LD) $(ld_flags) -r -o $@ @$(patsubst %.o,%.mod,$@)
-endif
+quiet_cmd_ld_multi_m = LD [M]  $@
+      cmd_ld_multi_m = $(LD) $(ld_flags) -r -o $@ @$(patsubst %.o,%.mod,$@) $(cmd_objtool)
+
+define rule_ld_multi_m
+       $(call cmd_and_savecmd,ld_multi_m)
+       $(call cmd,gen_objtooldep)
+endef
 
+$(multi-obj-m): objtool-enabled := $(delay-objtool)
+$(multi-obj-m): part-of-module := y
 $(multi-obj-m): %.o: %.mod FORCE
-       $(call if_changed,link_multi-m)
+       $(call if_changed_rule,ld_multi_m)
 $(call multi_depend, $(multi-obj-m), .o, -objs -y -m)
 
 targets := $(filter-out $(PHONY), $(targets))
index 0453a19..d142577 100644 (file)
@@ -225,12 +225,31 @@ dtc_cpp_flags  = -Wp,-MMD,$(depfile).pre.tmp -nostdinc                    \
                 $(addprefix -I,$(DTC_INCLUDE))                          \
                 -undef -D__DTS__
 
-ifneq ($(CONFIG_LTO_CLANG)$(CONFIG_X86_KERNEL_IBT),)
-# With CONFIG_LTO_CLANG, .o files in modules might be LLVM bitcode, so we
-# need to run LTO to compile them into native code (.lto.o) before further
-# processing.
-mod-prelink-ext := .prelink
-endif
+ifdef CONFIG_OBJTOOL
+
+objtool := $(objtree)/tools/objtool/objtool
+
+objtool_args =                                                         \
+       $(if $(CONFIG_HAVE_JUMP_LABEL_HACK), --hacks=jump_label)        \
+       $(if $(CONFIG_HAVE_NOINSTR_HACK), --hacks=noinstr)              \
+       $(if $(CONFIG_X86_KERNEL_IBT), --ibt)                           \
+       $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount)             \
+       $(if $(CONFIG_UNWINDER_ORC), --orc)                             \
+       $(if $(CONFIG_RETPOLINE), --retpoline)                          \
+       $(if $(CONFIG_SLS), --sls)                                      \
+       $(if $(CONFIG_STACK_VALIDATION), --stackval)                    \
+       $(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call)          \
+       $(if $(CONFIG_HAVE_UACCESS_VALIDATION), --uaccess)              \
+       $(if $(delay-objtool), --link)                                  \
+       $(if $(part-of-module), --module)                               \
+       $(if $(CONFIG_GCOV_KERNEL), --no-unreachable)
+
+delay-objtool := $(or $(CONFIG_LTO_CLANG),$(CONFIG_X86_KERNEL_IBT))
+
+cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool_args) $@)
+cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
+
+endif # CONFIG_OBJTOOL
 
 # Useful for describing the dependency of composite objects
 # Usage:
index 7f39599..35100e9 100644 (file)
@@ -9,7 +9,7 @@ __modfinal:
 include include/config/auto.conf
 include $(srctree)/scripts/Kbuild.include
 
-# for c_flags and mod-prelink-ext
+# for c_flags
 include $(srctree)/scripts/Makefile.lib
 
 # find all modules listed in modules.order
@@ -54,9 +54,8 @@ if_changed_except = $(if $(call newer_prereqs_except,$(2))$(cmd-check),      \
        $(cmd);                                                              \
        printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
 
-
 # Re-generate module BTFs if either module's .ko or vmlinux changed
-$(modules): %.ko: %$(mod-prelink-ext).o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),vmlinux) FORCE
+$(modules): %.ko: %.o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),vmlinux) FORCE
        +$(call if_changed_except,ld_ko_o,vmlinux)
 ifdef CONFIG_DEBUG_INFO_BTF_MODULES
        +$(if $(newer-prereqs),$(call cmd,btf_ko))
index 48585c4..9116064 100644 (file)
@@ -41,9 +41,6 @@ __modpost:
 include include/config/auto.conf
 include $(srctree)/scripts/Kbuild.include
 
-# for mod-prelink-ext
-include $(srctree)/scripts/Makefile.lib
-
 MODPOST = scripts/mod/modpost                                                          \
        $(if $(CONFIG_MODVERSIONS),-m)                                                  \
        $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a)                                        \
@@ -87,8 +84,7 @@ obj := $(KBUILD_EXTMOD)
 src := $(obj)
 
 # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
-             $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
+include $(or $(wildcard $(src)/Kbuild), $(src)/Makefile)
 
 # modpost option for external modules
 MODPOST += -e
@@ -118,8 +114,6 @@ $(input-symdump):
        @echo >&2 '         Modules may not have dependencies or modversions.'
        @echo >&2 '         You may get many unresolved symbol warnings.'
 
-modules := $(sort $(shell cat $(MODORDER)))
-
 # KBUILD_MODPOST_WARN can be set to avoid error out in case of undefined symbols
 ifneq ($(KBUILD_MODPOST_WARN)$(filter-out $(existing-input-symdump), $(input-symdump)),)
 MODPOST += -w
@@ -128,9 +122,9 @@ endif
 # Read out modules.order to pass in modpost.
 # Otherwise, allmodconfig would fail with "Argument list too long".
 quiet_cmd_modpost = MODPOST $@
-      cmd_modpost = sed 's/\.ko$$/$(mod-prelink-ext)\.o/' $< | $(MODPOST) -T -
+      cmd_modpost = sed 's/ko$$/o/' $< | $(MODPOST) -T -
 
-$(output-symdump): $(MODORDER) $(input-symdump) $(modules:.ko=$(mod-prelink-ext).o) FORCE
+$(output-symdump): $(MODORDER) $(input-symdump) FORCE
        $(call if_changed,modpost)
 
 targets += $(output-symdump)
diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
new file mode 100644 (file)
index 0000000..3c97a15
--- /dev/null
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+PHONY := __default
+__default: vmlinux.o
+
+include include/config/auto.conf
+include $(srctree)/scripts/Kbuild.include
+
+# for objtool
+include $(srctree)/scripts/Makefile.lib
+
+# Generate a linker script to ensure correct ordering of initcalls for Clang LTO
+# ---------------------------------------------------------------------------
+
+quiet_cmd_gen_initcalls_lds = GEN     $@
+      cmd_gen_initcalls_lds = \
+       $(PYTHON3) $(srctree)/scripts/jobserver-exec \
+       $(PERL) $(real-prereqs) > $@
+
+.tmp_initcalls.lds: $(srctree)/scripts/generate_initcall_order.pl \
+               $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS) FORCE
+       $(call if_changed,gen_initcalls_lds)
+
+targets := .tmp_initcalls.lds
+
+ifdef CONFIG_LTO_CLANG
+initcalls-lds := .tmp_initcalls.lds
+endif
+
+# objtool for vmlinux.o
+# ---------------------------------------------------------------------------
+#
+# For LTO and IBT, objtool doesn't run on individual translation units.
+# Run everything on vmlinux instead.
+
+objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
+
+# Reuse objtool_args defined in scripts/Makefile.lib if LTO or IBT is enabled.
+#
+# Add some more flags as needed.
+# --no-unreachable and --link might be added twice, but it is fine.
+#
+# Expand objtool_args to a simple variable to avoid circular reference.
+
+objtool_args := \
+       $(if $(delay-objtool),$(objtool_args)) \
+       $(if $(CONFIG_NOINSTR_VALIDATION), --noinstr) \
+       $(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \
+       --link
+
+# Link of vmlinux.o used for section mismatch analysis
+# ---------------------------------------------------------------------------
+
+quiet_cmd_ld_vmlinux.o = LD      $@
+      cmd_ld_vmlinux.o = \
+       $(LD) ${KBUILD_LDFLAGS} -r -o $@ \
+       $(addprefix -T , $(initcalls-lds)) \
+       --whole-archive $(KBUILD_VMLINUX_OBJS) --no-whole-archive \
+       --start-group $(KBUILD_VMLINUX_LIBS) --end-group \
+       $(cmd_objtool)
+
+define rule_ld_vmlinux.o
+       $(call cmd_and_savecmd,ld_vmlinux.o)
+       $(call cmd,gen_objtooldep)
+endef
+
+vmlinux.o: $(initcalls-lds) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS) FORCE
+       $(call if_changed_rule,ld_vmlinux.o)
+
+targets += vmlinux.o
+
+# Add FORCE to the prequisites of a target to force it to be always rebuilt.
+# ---------------------------------------------------------------------------
+
+PHONY += FORCE
+FORCE:
+
+# Read all saved command lines and dependencies for the $(targets) we
+# may be building above, using $(if_changed{,_dep}). As an
+# optimization, we don't need to read them if the target does not
+# exist, we will rebuild anyway in that case.
+
+existing-targets := $(wildcard $(sort $(targets)))
+
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
+
+.PHONY: $(PHONY)
diff --git a/scripts/check-local-export b/scripts/check-local-export
new file mode 100755 (executable)
index 0000000..da745e2
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2022 Masahiro Yamada <masahiroy@kernel.org>
+#
+# Exit with error if a local exported symbol is found.
+# EXPORT_SYMBOL should be used for global symbols.
+
+set -e
+
+declare -A symbol_types
+declare -a export_symbols
+
+exit_code=0
+
+while read value type name
+do
+       # Skip the line if the number of fields is less than 3.
+       #
+       # case 1)
+       #   For undefined symbols, the first field (value) is empty.
+       #   The outout looks like this:
+       #     "                 U _printk"
+       #   It is unneeded to record undefined symbols.
+       #
+       # case 2)
+       #   For Clang LTO, llvm-nm outputs a line with type 't' but empty name:
+       #     "---------------- t"
+       if [[ -z ${name} ]]; then
+               continue
+       fi
+
+       # save (name, type) in the associative array
+       symbol_types[${name}]=${type}
+
+       # append the exported symbol to the array
+       if [[ ${name} == __ksymtab_* ]]; then
+               export_symbols+=(${name#__ksymtab_})
+       fi
+
+       # If there is no symbol in the object, ${NM} (both GNU nm and llvm-nm)
+       # shows 'no symbols' diagnostic (but exits with 0). It is harmless and
+       # hidden by '2>/dev/null'. However, it suppresses real error messages
+       # as well. Add a hand-crafted error message here.
+       #
+       # Use --quiet instead of 2>/dev/null when we upgrade the minimum version
+       # of binutils to 2.37, llvm to 13.0.0.
+       #
+       # Then, the following line will be really simple:
+       #   done < <(${NM} --quiet ${1})
+done < <(${NM} ${1} 2>/dev/null || { echo "${0}: ${NM} failed" >&2; false; } )
+
+# Catch error in the process substitution
+wait $!
+
+for name in "${export_symbols[@]}"
+do
+       # nm(3) says "If lowercase, the symbol is usually local"
+       if [[ ${symbol_types[$name]} =~ [a-z] ]]; then
+               echo "$@: error: local symbol '${name}' was exported" >&2
+               exit_code=1
+       fi
+done
+
+exit ${exit_code}
index 1389db7..0ffd553 100755 (executable)
@@ -981,11 +981,11 @@ __END__
 
 =head1 NAME
 
-abi_book.pl - parse the Linux ABI files and produce a ReST book.
+get_abi.pl - parse the Linux ABI files and produce a ReST book.
 
 =head1 SYNOPSIS
 
-B<abi_book.pl> [--debug <level>] [--enable-lineno] [--man] [--help]
+B<get_abi.pl> [--debug <level>] [--enable-lineno] [--man] [--help]
               [--(no-)rst-source] [--dir=<dir>] [--show-hints]
               [--search-string <regex>]
               <COMMAND> [<ARGUMENT>]
index e6906f7..f18e6df 100644 (file)
@@ -70,7 +70,7 @@ static unsigned char best_table_len[256];
 
 static void usage(void)
 {
-       fprintf(stderr, "Usage: kallsyms [--all-symbols] "
+       fprintf(stderr, "Usage: kallsyms [--all-symbols] [--absolute-percpu] "
                        "[--base-relative] < in.map > out.S\n");
        exit(1);
 }
index 7b371bd..3ba8b1a 100644 (file)
@@ -52,8 +52,8 @@ static const char nconf_global_help[] =
 "\n"
 "Menu navigation keys\n"
 "----------------------------------------------------------------------\n"
-"Linewise up                 <Up>\n"
-"Linewise down               <Down>\n"
+"Linewise up                 <Up>    <k>\n"
+"Linewise down               <Down>  <j>\n"
 "Pagewise up                 <Page Up>\n"
 "Pagewise down               <Page Down>\n"
 "First entry                 <Home>\n"
@@ -1105,9 +1105,11 @@ static void conf(struct menu *menu)
                                break;
                        switch (res) {
                        case KEY_DOWN:
+                       case 'j':
                                menu_driver(curses_menu, REQ_DOWN_ITEM);
                                break;
                        case KEY_UP:
+                       case 'k':
                                menu_driver(curses_menu, REQ_UP_ITEM);
                                break;
                        case KEY_NPAGE:
@@ -1287,9 +1289,11 @@ static void conf_choice(struct menu *menu)
                                break;
                        switch (res) {
                        case KEY_DOWN:
+                       case 'j':
                                menu_driver(curses_menu, REQ_DOWN_ITEM);
                                break;
                        case KEY_UP:
+                       case 'k':
                                menu_driver(curses_menu, REQ_UP_ITEM);
                                break;
                        case KEY_NPAGE:
index a7f6196..eecc186 100755 (executable)
@@ -45,115 +45,6 @@ info()
        printf "  %-7s %s\n" "${1}" "${2}"
 }
 
-# Generate a linker script to ensure correct ordering of initcalls.
-gen_initcalls()
-{
-       info GEN .tmp_initcalls.lds
-
-       ${PYTHON3} ${srctree}/scripts/jobserver-exec            \
-       ${PERL} ${srctree}/scripts/generate_initcall_order.pl   \
-               ${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS}   \
-               > .tmp_initcalls.lds
-}
-
-# Link of vmlinux.o used for section mismatch analysis
-# ${1} output file
-modpost_link()
-{
-       local objects
-       local lds=""
-
-       objects="--whole-archive                                \
-               ${KBUILD_VMLINUX_OBJS}                          \
-               --no-whole-archive                              \
-               --start-group                                   \
-               ${KBUILD_VMLINUX_LIBS}                          \
-               --end-group"
-
-       if is_enabled CONFIG_LTO_CLANG; then
-               gen_initcalls
-               lds="-T .tmp_initcalls.lds"
-
-               # This might take a while, so indicate that we're doing
-               # an LTO link
-               info LTO ${1}
-       else
-               info LD ${1}
-       fi
-
-       ${LD} ${KBUILD_LDFLAGS} -r -o ${1} ${lds} ${objects}
-}
-
-objtool_link()
-{
-       local objtoolcmd;
-       local objtoolopt;
-
-       if ! is_enabled CONFIG_OBJTOOL; then
-               return;
-       fi
-
-       if is_enabled CONFIG_LTO_CLANG || is_enabled CONFIG_X86_KERNEL_IBT; then
-
-               # For LTO and IBT, objtool doesn't run on individual
-               # translation units.  Run everything on vmlinux instead.
-
-               if is_enabled CONFIG_HAVE_JUMP_LABEL_HACK; then
-                       objtoolopt="${objtoolopt} --hacks=jump_label"
-               fi
-
-               if is_enabled CONFIG_HAVE_NOINSTR_HACK; then
-                       objtoolopt="${objtoolopt} --hacks=noinstr"
-               fi
-
-               if is_enabled CONFIG_X86_KERNEL_IBT; then
-                       objtoolopt="${objtoolopt} --ibt"
-               fi
-
-               if is_enabled CONFIG_FTRACE_MCOUNT_USE_OBJTOOL; then
-                       objtoolopt="${objtoolopt} --mcount"
-               fi
-
-               if is_enabled CONFIG_UNWINDER_ORC; then
-                       objtoolopt="${objtoolopt} --orc"
-               fi
-
-               if is_enabled CONFIG_RETPOLINE; then
-                       objtoolopt="${objtoolopt} --retpoline"
-               fi
-
-               if is_enabled CONFIG_SLS; then
-                       objtoolopt="${objtoolopt} --sls"
-               fi
-
-               if is_enabled CONFIG_STACK_VALIDATION; then
-                       objtoolopt="${objtoolopt} --stackval"
-               fi
-
-               if is_enabled CONFIG_HAVE_STATIC_CALL_INLINE; then
-                       objtoolopt="${objtoolopt} --static-call"
-               fi
-
-               objtoolopt="${objtoolopt} --uaccess"
-       fi
-
-       if is_enabled CONFIG_NOINSTR_VALIDATION; then
-               objtoolopt="${objtoolopt} --noinstr"
-       fi
-
-       if [ -n "${objtoolopt}" ]; then
-
-               if is_enabled CONFIG_GCOV_KERNEL; then
-                       objtoolopt="${objtoolopt} --no-unreachable"
-               fi
-
-               objtoolopt="${objtoolopt} --link"
-
-               info OBJTOOL ${1}
-               tools/objtool/objtool ${objtoolopt} ${1}
-       fi
-}
-
 # Link of vmlinux
 # ${1} - output file
 # ${2}, ${3}, ... - optional extra .o files
@@ -303,14 +194,9 @@ sorttable()
 cleanup()
 {
        rm -f .btf.*
-       rm -f .tmp_System.map
-       rm -f .tmp_initcalls.lds
-       rm -f .tmp_vmlinux*
        rm -f System.map
        rm -f vmlinux
        rm -f vmlinux.map
-       rm -f vmlinux.o
-       rm -f .vmlinux.d
        rm -f .vmlinux.objs
        rm -f .vmlinux.export.c
 }
@@ -341,12 +227,18 @@ fi;
 ${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init need-builtin=1
 
 #link vmlinux.o
-modpost_link vmlinux.o
-objtool_link vmlinux.o
+${MAKE} -f "${srctree}/scripts/Makefile.vmlinux_o"
 
-# Generate the list of objects in vmlinux
+# Generate the list of in-tree objects in vmlinux
+#
+# This is used to retrieve symbol versions generated by genksyms.
 for f in ${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS}; do
        case ${f} in
+       *libgcc.a)
+               # Some architectures do '$(CC) --print-libgcc-file-name' to
+               # borrow libgcc.a from the toolchain.
+               # There is no EXPORT_SYMBOL in external objects. Ignore this.
+               ;;
        *.a)
                ${AR} t ${f} ;;
        *)
index 5258247..cbd6b0f 100644 (file)
@@ -734,8 +734,6 @@ static int do_vio_entry(const char *filename, void *symval,
        return 1;
 }
 
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
 static void do_input(char *alias,
                     kernel_ulong_t *arr, unsigned int min, unsigned int max)
 {
@@ -1391,6 +1389,15 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
        return 1;
 }
 
+/* Looks like: mhi_ep:S */
+static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+{
+       DEF_FIELD_ADDR(symval, mhi_device_id, chan);
+       sprintf(alias, MHI_EP_DEVICE_MODALIAS_FMT, *chan);
+
+       return 1;
+}
+
 /* Looks like: ishtp:{guid} */
 static int do_ishtp_entry(const char *filename, void *symval, char *alias)
 {
@@ -1519,6 +1526,7 @@ static const struct devtable devtable[] = {
        {"tee", SIZE_tee_client_device_id, do_tee_entry},
        {"wmi", SIZE_wmi_device_id, do_wmi_entry},
        {"mhi", SIZE_mhi_device_id, do_mhi_entry},
+       {"mhi_ep", SIZE_mhi_device_id, do_mhi_ep_entry},
        {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry},
        {"ssam", SIZE_ssam_device_id, do_ssam_entry},
        {"dfl", SIZE_dfl_device_id, do_dfl_entry},
index 42e949c..29d5a84 100644 (file)
@@ -13,6 +13,7 @@
 
 #define _GNU_SOURCE
 #include <elf.h>
+#include <fnmatch.h>
 #include <stdio.h>
 #include <ctype.h>
 #include <string.h>
@@ -172,11 +173,11 @@ static struct module *find_module(const char *modname)
        return NULL;
 }
 
-static struct module *new_module(const char *modname)
+static struct module *new_module(const char *name, size_t namelen)
 {
        struct module *mod;
 
-       mod = NOFAIL(malloc(sizeof(*mod) + strlen(modname) + 1));
+       mod = NOFAIL(malloc(sizeof(*mod) + namelen + 1));
        memset(mod, 0, sizeof(*mod));
 
        INIT_LIST_HEAD(&mod->exported_symbols);
@@ -184,8 +185,9 @@ static struct module *new_module(const char *modname)
        INIT_LIST_HEAD(&mod->missing_namespaces);
        INIT_LIST_HEAD(&mod->imported_namespaces);
 
-       strcpy(mod->name, modname);
-       mod->is_vmlinux = (strcmp(modname, "vmlinux") == 0);
+       memcpy(mod->name, name, namelen);
+       mod->name[namelen] = '\0';
+       mod->is_vmlinux = (strcmp(mod->name, "vmlinux") == 0);
 
        /*
         * Set mod->is_gpl_compatible to true by default. If MODULE_LICENSE()
@@ -212,7 +214,6 @@ struct symbol {
        unsigned int crc;
        bool crc_valid;
        bool weak;
-       bool is_static;         /* true if symbol is not global */
        bool is_gpl_only;       /* exported by EXPORT_SYMBOL_GPL */
        char name[];
 };
@@ -242,7 +243,7 @@ static struct symbol *alloc_symbol(const char *name)
 
        memset(s, 0, sizeof(*s));
        strcpy(s->name, name);
-       s->is_static = true;
+
        return s;
 }
 
@@ -710,29 +711,6 @@ static char *get_modinfo(struct elf_info *info, const char *tag)
        return get_next_modinfo(info, tag, NULL);
 }
 
-/**
- * Test if string s ends in string sub
- * return 0 if match
- **/
-static int strrcmp(const char *s, const char *sub)
-{
-       int slen, sublen;
-
-       if (!s || !sub)
-               return 1;
-
-       slen = strlen(s);
-       sublen = strlen(sub);
-
-       if ((slen == 0) || (sublen == 0))
-               return 1;
-
-       if (sublen > slen)
-               return 1;
-
-       return memcmp(s + slen - sublen, sub, sublen);
-}
-
 static const char *sym_name(struct elf_info *elf, Elf_Sym *sym)
 {
        if (sym)
@@ -741,48 +719,22 @@ static const char *sym_name(struct elf_info *elf, Elf_Sym *sym)
                return "(unknown)";
 }
 
-/* The pattern is an array of simple patterns.
- * "foo" will match an exact string equal to "foo"
- * "*foo" will match a string that ends with "foo"
- * "foo*" will match a string that begins with "foo"
- * "*foo*" will match a string that contains "foo"
+/*
+ * Check whether the 'string' argument matches one of the 'patterns',
+ * an array of shell wildcard patterns (glob).
+ *
+ * Return true is there is a match.
  */
-static int match(const char *sym, const char * const pat[])
+static bool match(const char *string, const char *const patterns[])
 {
-       const char *p;
-       while (*pat) {
-               const char *endp;
-
-               p = *pat++;
-               endp = p + strlen(p) - 1;
+       const char *pattern;
 
-               /* "*foo*" */
-               if (*p == '*' && *endp == '*') {
-                       char *bare = NOFAIL(strndup(p + 1, strlen(p) - 2));
-                       char *here = strstr(sym, bare);
-
-                       free(bare);
-                       if (here != NULL)
-                               return 1;
-               }
-               /* "*foo" */
-               else if (*p == '*') {
-                       if (strrcmp(sym, p + 1) == 0)
-                               return 1;
-               }
-               /* "foo*" */
-               else if (*endp == '*') {
-                       if (strncmp(sym, p, strlen(p) - 1) == 0)
-                               return 1;
-               }
-               /* no wildcards */
-               else {
-                       if (strcmp(p, sym) == 0)
-                               return 1;
-               }
+       while ((pattern = *patterns++)) {
+               if (!fnmatch(pattern, string, 0))
+                       return true;
        }
-       /* no match */
-       return 0;
+
+       return false;
 }
 
 /* sections that we do not want to do full section mismatch check on */
@@ -1049,8 +1001,6 @@ static const struct sectioncheck *section_mismatch(
                const char *fromsec, const char *tosec)
 {
        int i;
-       int elems = sizeof(sectioncheck) / sizeof(struct sectioncheck);
-       const struct sectioncheck *check = &sectioncheck[0];
 
        /*
         * The target section could be the SHT_NUL section when we're
@@ -1061,14 +1011,15 @@ static const struct sectioncheck *section_mismatch(
        if (*tosec == '\0')
                return NULL;
 
-       for (i = 0; i < elems; i++) {
+       for (i = 0; i < ARRAY_SIZE(sectioncheck); i++) {
+               const struct sectioncheck *check = &sectioncheck[i];
+
                if (match(fromsec, check->fromsec)) {
                        if (check->bad_tosec[0] && match(tosec, check->bad_tosec))
                                return check;
                        if (check->good_tosec[0] && !match(tosec, check->good_tosec))
                                return check;
                }
-               check++;
        }
        return NULL;
 }
@@ -1180,7 +1131,8 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
 
 static inline int is_arm_mapping_symbol(const char *str)
 {
-       return str[0] == '$' && strchr("axtd", str[1])
+       return str[0] == '$' &&
+              (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
               && (str[2] == '\0' || str[2] == '.');
 }
 
@@ -1270,13 +1222,9 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
                        continue;
                if (!is_valid_name(elf, sym))
                        continue;
-               if (sym->st_value <= addr) {
-                       if ((addr - sym->st_value) < distance) {
-                               distance = addr - sym->st_value;
-                               near = sym;
-                       } else if ((addr - sym->st_value) == distance) {
-                               near = sym;
-                       }
+               if (sym->st_value <= addr && addr - sym->st_value <= distance) {
+                       distance = addr - sym->st_value;
+                       near = sym;
                }
        }
        return near;
@@ -1883,8 +1831,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
  * to find all references to a section that reference a section that will
  * be discarded and warns about it.
  **/
-static void check_sec_ref(struct module *mod, const char *modname,
-                         struct elf_info *elf)
+static void check_sec_ref(const char *modname, struct elf_info *elf)
 {
        int i;
        Elf_Shdr *sechdrs = elf->sechdrs;
@@ -1906,12 +1853,8 @@ static char *remove_dot(char *s)
 
        if (n && s[n]) {
                size_t m = strspn(s + n + 1, "0123456789");
-               if (m && (s[n + m] == '.' || s[n + m] == 0))
+               if (m && (s[n + m + 1] == '.' || s[n + m + 1] == 0))
                        s[n] = 0;
-
-               /* strip trailing .prelink */
-               if (strends(s, ".prelink"))
-                       s[strlen(s) - 8] = '\0';
        }
        return s;
 }
@@ -2027,19 +1970,14 @@ static void read_symbols(const char *modname)
        if (!parse_elf(&info, modname))
                return;
 
-       {
-               char *tmp;
-
-               /* strip trailing .o */
-               tmp = NOFAIL(strdup(modname));
-               tmp[strlen(tmp) - 2] = '\0';
-               /* strip trailing .prelink */
-               if (strends(tmp, ".prelink"))
-                       tmp[strlen(tmp) - 8] = '\0';
-               mod = new_module(tmp);
-               free(tmp);
+       if (!strends(modname, ".o")) {
+               error("%s: filename must be suffixed with .o\n", modname);
+               return;
        }
 
+       /* strip trailing .o */
+       mod = new_module(modname, strlen(modname) - strlen(".o"));
+
        if (!mod->is_vmlinux) {
                license = get_modinfo(&info, "license");
                if (!license)
@@ -2076,21 +2014,7 @@ static void read_symbols(const char *modname)
                                             sym_get_data(&info, sym));
        }
 
-       // check for static EXPORT_SYMBOL_* functions && global vars
-       for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
-               unsigned char bind = ELF_ST_BIND(sym->st_info);
-
-               if (bind == STB_GLOBAL || bind == STB_WEAK) {
-                       struct symbol *s =
-                               find_symbol(remove_dot(info.strtab +
-                                                      sym->st_name));
-
-                       if (s)
-                               s->is_static = false;
-               }
-       }
-
-       check_sec_ref(mod, modname, &info);
+       check_sec_ref(modname, &info);
 
        if (!mod->is_vmlinux) {
                version = get_modinfo(&info, "version");
@@ -2515,11 +2439,10 @@ static void read_dump(const char *fname)
 
                mod = find_module(modname);
                if (!mod) {
-                       mod = new_module(modname);
+                       mod = new_module(modname, strlen(modname));
                        mod->from_dump = true;
                }
                s = sym_add_exported(symname, mod, gpl_only);
-               s->is_static = false;
                sym_set_crc(s, crc);
                sym_update_namespace(symname, namespace);
        }
@@ -2584,7 +2507,6 @@ int main(int argc, char **argv)
        char *missing_namespace_deps = NULL;
        char *dump_write = NULL, *files_source = NULL;
        int opt;
-       int n;
        LIST_HEAD(dump_lists);
        struct dump_list *dl, *dl2;
 
@@ -2660,15 +2582,6 @@ int main(int argc, char **argv)
        if (sec_mismatch_count && !sec_mismatch_warn_only)
                error("Section mismatches detected.\n"
                      "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n");
-       for (n = 0; n < SYMBOL_HASH_SIZE; n++) {
-               struct symbol *s;
-
-               for (s = symbolhash[n]; s; s = s->next) {
-                       if (s->is_static)
-                               error("\"%s\" [%s] is a static EXPORT_SYMBOL\n",
-                                     s->name, s->module->name);
-               }
-       }
 
        if (nr_unresolved > MAX_UNRESOLVED_REPORTS)
                warn("suppressed %u unresolved symbol warnings because there were too many)\n",
index d9daeff..044bdfb 100644 (file)
@@ -97,6 +97,9 @@ static inline void __endian(const void *src, void *dest, unsigned int size)
 #endif
 
 #define NOFAIL(ptr)   do_nofail((ptr), #ptr)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
 void *do_nofail(void *ptr, const char *expr);
 
 struct buffer {
index 72b0b63..0685bc3 100755 (executable)
 # $ ./scripts/objdiff diff COMMIT_A COMMIT_B
 # $
 
-# And to clean up (everything is in .tmp_objdiff/*)
+# And to clean up (everything is in .objdiff/*)
 # $ ./scripts/objdiff clean all
 #
-# Note: 'make mrproper' will also remove .tmp_objdiff
+# Note: 'make mrproper' will also remove .objdiff
 
 SRCTREE=$(cd $(git rev-parse --show-toplevel 2>/dev/null); pwd)
 
@@ -32,7 +32,7 @@ if [ -z "$SRCTREE" ]; then
        exit 1
 fi
 
-TMPD=$SRCTREE/.tmp_objdiff
+TMPD=$SRCTREE/.objdiff
 
 usage() {
        echo >&2 "Usage: $0 <command> <args>"
index d00504c..fba40e9 100644 (file)
 #define EM_RISCV       243
 #endif
 
+#ifndef EM_LOONGARCH
+#define EM_LOONGARCH   258
+#endif
+
 static uint32_t (*r)(const uint32_t *);
 static uint16_t (*r2)(const uint16_t *);
 static uint64_t (*r8)(const uint64_t *);
@@ -313,6 +317,7 @@ static int do_file(char const *const fname, void *addr)
        case EM_ARCOMPACT:
        case EM_ARCV2:
        case EM_ARM:
+       case EM_LOONGARCH:
        case EM_MICROBLAZE:
        case EM_MIPS:
        case EM_XTENSA:
index cb76324..9f6d1a7 100644 (file)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 # run check on a text and a binary file
-for FILE in Makefile Documentation/logo.gif; do
+for FILE in Makefile Documentation/images/logo.gif; do
        python3 scripts/spdxcheck.py $FILE
        python3 scripts/spdxcheck.py - < $FILE
 done
index f3be8ed..18cb9f5 100755 (executable)
@@ -6,6 +6,7 @@ from argparse import ArgumentParser
 from ply import lex, yacc
 import locale
 import traceback
+import fnmatch
 import sys
 import git
 import re
@@ -28,6 +29,21 @@ class SPDXdata(object):
         self.licenses = [ ]
         self.exceptions = { }
 
+class dirinfo(object):
+    def __init__(self):
+        self.missing = 0
+        self.total = 0
+        self.files = []
+
+    def update(self, fname, basedir, miss):
+        self.total += 1
+        self.missing += miss
+        if miss:
+            fname = './' + fname
+            bdir = os.path.dirname(fname)
+            if bdir == basedir.rstrip('/'):
+                self.files.append(fname)
+
 # Read the spdx data from the LICENSES directory
 def read_spdxdata(repo):
 
@@ -91,11 +107,25 @@ class id_parser(object):
         self.parser = yacc.yacc(module = self, write_tables = False, debug = False)
         self.lines_checked = 0
         self.checked = 0
+        self.excluded = 0
         self.spdx_valid = 0
         self.spdx_errors = 0
+        self.spdx_dirs = {}
+        self.dirdepth = -1
+        self.basedir = '.'
         self.curline = 0
         self.deepest = 0
 
+    def set_dirinfo(self, basedir, dirdepth):
+        if dirdepth >= 0:
+            self.basedir = basedir
+            bdir = basedir.lstrip('./').rstrip('/')
+            if bdir != '':
+                parts = bdir.split('/')
+            else:
+                parts = []
+            self.dirdepth = dirdepth + len(parts)
+
     # Validate License and Exception IDs
     def validate(self, tok):
         id = tok.value.upper()
@@ -167,6 +197,7 @@ class id_parser(object):
     def parse_lines(self, fd, maxlines, fname):
         self.checked += 1
         self.curline = 0
+        fail = 1
         try:
             for line in fd:
                 line = line.decode(locale.getpreferredencoding(False), errors='ignore')
@@ -192,6 +223,7 @@ class id_parser(object):
                 # Should we check for more SPDX ids in the same file and
                 # complain if there are any?
                 #
+                fail = 0
                 break
 
         except ParserException as pe:
@@ -203,28 +235,102 @@ class id_parser(object):
                 sys.stdout.write('%s: %d:0 %s\n' %(fname, self.curline, pe.txt))
             self.spdx_errors += 1
 
-def scan_git_tree(tree):
+        if fname == '-':
+            return
+
+        base = os.path.dirname(fname)
+        if self.dirdepth > 0:
+            parts = base.split('/')
+            i = 0
+            base = '.'
+            while i < self.dirdepth and i < len(parts) and len(parts[i]):
+                base += '/' + parts[i]
+                i += 1
+        elif self.dirdepth == 0:
+            base = self.basedir
+        else:
+            base = './' + base.rstrip('/')
+        base += '/'
+
+        di = self.spdx_dirs.get(base, dirinfo())
+        di.update(fname, base, fail)
+        self.spdx_dirs[base] = di
+
+class pattern(object):
+    def __init__(self, line):
+        self.pattern = line
+        self.match = self.match_file
+        if line == '.*':
+            self.match = self.match_dot
+        elif line.endswith('/'):
+            self.pattern = line[:-1]
+            self.match = self.match_dir
+        elif line.startswith('/'):
+            self.pattern = line[1:]
+            self.match = self.match_fn
+
+    def match_dot(self, fpath):
+        return os.path.basename(fpath).startswith('.')
+
+    def match_file(self, fpath):
+        return os.path.basename(fpath) == self.pattern
+
+    def match_fn(self, fpath):
+        return fnmatch.fnmatchcase(fpath, self.pattern)
+
+    def match_dir(self, fpath):
+        if self.match_fn(os.path.dirname(fpath)):
+            return True
+        return fpath.startswith(self.pattern)
+
+def exclude_file(fpath):
+    for rule in exclude_rules:
+        if rule.match(fpath):
+            return True
+    return False
+
+def scan_git_tree(tree, basedir, dirdepth):
+    parser.set_dirinfo(basedir, dirdepth)
     for el in tree.traverse():
-        # Exclude stuff which would make pointless noise
-        # FIXME: Put this somewhere more sensible
-        if el.path.startswith("LICENSES"):
-            continue
-        if el.path.find("license-rules.rst") >= 0:
-            continue
         if not os.path.isfile(el.path):
             continue
+        if exclude_file(el.path):
+            parser.excluded += 1
+            continue
         with open(el.path, 'rb') as fd:
             parser.parse_lines(fd, args.maxlines, el.path)
 
-def scan_git_subtree(tree, path):
+def scan_git_subtree(tree, path, dirdepth):
     for p in path.strip('/').split('/'):
         tree = tree[p]
-    scan_git_tree(tree)
+    scan_git_tree(tree, path.strip('/'), dirdepth)
+
+def read_exclude_file(fname):
+    rules = []
+    if not fname:
+        return rules
+    with open(fname) as fd:
+        for line in fd:
+            line = line.strip()
+            if line.startswith('#'):
+                continue
+            if not len(line):
+                continue
+            rules.append(pattern(line))
+    return rules
 
 if __name__ == '__main__':
 
     ap = ArgumentParser(description='SPDX expression checker')
     ap.add_argument('path', nargs='*', help='Check path or file. If not given full git tree scan. For stdin use "-"')
+    ap.add_argument('-d', '--dirs', action='store_true',
+                    help='Show [sub]directory statistics.')
+    ap.add_argument('-D', '--depth', type=int, default=-1,
+                    help='Directory depth for -d statistics. Default: unlimited')
+    ap.add_argument('-e', '--exclude',
+                    help='File containing file patterns to exclude. Default: scripts/spdxexclude')
+    ap.add_argument('-f', '--files', action='store_true',
+                    help='Show files without SPDX.')
     ap.add_argument('-m', '--maxlines', type=int, default=15,
                     help='Maximum number of lines to scan in a file. Default 15')
     ap.add_argument('-v', '--verbose', action='store_true', help='Verbose statistics output')
@@ -259,6 +365,15 @@ if __name__ == '__main__':
         sys.exit(1)
 
     try:
+        fname = args.exclude
+        if not fname:
+            fname = os.path.join(os.path.dirname(__file__), 'spdxexclude')
+        exclude_rules = read_exclude_file(fname)
+    except Exception as ex:
+        sys.stderr.write('FAIL: Reading exclude file %s: %s\n' %(fname, ex))
+        sys.exit(1)
+
+    try:
         if len(args.path) and args.path[0] == '-':
             stdin = os.fdopen(sys.stdin.fileno(), 'rb')
             parser.parse_lines(stdin, args.maxlines, '-')
@@ -268,13 +383,21 @@ if __name__ == '__main__':
                     if os.path.isfile(p):
                         parser.parse_lines(open(p, 'rb'), args.maxlines, p)
                     elif os.path.isdir(p):
-                        scan_git_subtree(repo.head.reference.commit.tree, p)
+                        scan_git_subtree(repo.head.reference.commit.tree, p,
+                                         args.depth)
                     else:
                         sys.stderr.write('path %s does not exist\n' %p)
                         sys.exit(1)
             else:
                 # Full git tree scan
-                scan_git_tree(repo.head.commit.tree)
+                scan_git_tree(repo.head.commit.tree, '.', args.depth)
+
+            ndirs = len(parser.spdx_dirs)
+            dirsok = 0
+            if ndirs:
+                for di in parser.spdx_dirs.values():
+                    if not di.missing:
+                        dirsok += 1
 
             if args.verbose:
                 sys.stderr.write('\n')
@@ -283,10 +406,38 @@ if __name__ == '__main__':
                 sys.stderr.write('License IDs        %12d\n' %len(spdx.licenses))
                 sys.stderr.write('Exception IDs      %12d\n' %len(spdx.exceptions))
                 sys.stderr.write('\n')
+                sys.stderr.write('Files excluded:    %12d\n' %parser.excluded)
                 sys.stderr.write('Files checked:     %12d\n' %parser.checked)
                 sys.stderr.write('Lines checked:     %12d\n' %parser.lines_checked)
-                sys.stderr.write('Files with SPDX:   %12d\n' %parser.spdx_valid)
+                if parser.checked:
+                    pc = int(100 * parser.spdx_valid / parser.checked)
+                    sys.stderr.write('Files with SPDX:   %12d %3d%%\n' %(parser.spdx_valid, pc))
                 sys.stderr.write('Files with errors: %12d\n' %parser.spdx_errors)
+                if ndirs:
+                    sys.stderr.write('\n')
+                    sys.stderr.write('Directories accounted: %8d\n' %ndirs)
+                    pc = int(100 * dirsok / ndirs)
+                    sys.stderr.write('Directories complete:  %8d %3d%%\n' %(dirsok, pc))
+
+            if ndirs and ndirs != dirsok and args.dirs:
+                if args.verbose:
+                    sys.stderr.write('\n')
+                sys.stderr.write('Incomplete directories: SPDX in Files\n')
+                for f in sorted(parser.spdx_dirs.keys()):
+                    di = parser.spdx_dirs[f]
+                    if di.missing:
+                        valid = di.total - di.missing
+                        pc = int(100 * valid / di.total)
+                        sys.stderr.write('    %-80s: %5d of %5d  %3d%%\n' %(f, valid, di.total, pc))
+
+            if ndirs and ndirs != dirsok and args.files:
+                if args.verbose or args.dirs:
+                    sys.stderr.write('\n')
+                sys.stderr.write('Files without SPDX:\n')
+                for f in sorted(parser.spdx_dirs.keys()):
+                    di = parser.spdx_dirs[f]
+                    for f in sorted(di.files):
+                        sys.stderr.write('    %s\n' %f)
 
             sys.exit(0)
 
diff --git a/scripts/spdxexclude b/scripts/spdxexclude
new file mode 100644 (file)
index 0000000..81bdb13
--- /dev/null
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Patterns for excluding files and directories
+
+# Ignore the license directory and the licensing documentation which would
+# create lots of noise for no value
+LICENSES/
+license-rules.rst
+
+# Ignore config files and snippets. The majority is generated
+# by the Kconfig tools
+kernel/configs/
+arch/*/configs/
+
+# Other files without copyrightable content
+/CREDITS
+/MAINTAINERS
+/README
index 776849a..4bd327d 100644 (file)
@@ -10,4 +10,4 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
                                  -e s/s390x/s390/ \
                                  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
                                  -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
-                                 -e s/riscv.*/riscv/)
+                                 -e s/riscv.*/riscv/ -e s/loongarch.*/loongarch/)
index 16d475b..01fab3d 100755 (executable)
@@ -95,10 +95,13 @@ all_sources()
 
 all_compiled_sources()
 {
-       realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) \
-               include/generated/autoconf.h $(find $ignore -name "*.cmd" -exec \
-               grep -Poh '(?(?=^source_.* \K).*|(?=^  \K\S).*(?= \\))' {} \+ |
-               awk '!a[$0]++') | sort -u
+       {
+               echo include/generated/autoconf.h
+               find $ignore -name "*.cmd" -exec \
+                       grep -Poh '(?(?=^source_.* \K).*|(?=^  \K\S).*(?= \\))' {} \+ |
+               awk '!a[$0]++'
+       } | xargs realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) |
+       sort -u
 }
 
 all_target_sources()
index 9e61014..4b58526 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/ctype.h>
 #include <linux/audit.h>
 #include <linux/magic.h>
+#include <linux/mount.h>
 #include <linux/fs_context.h>
 #include "smack.h"
 
index 58274b4..e55c042 100644 (file)
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <linux/soc/pxa/cpu.h>
 
 #include <sound/pxa2xx-lib.h>
 
-#include <mach/irqs.h>
-#include <mach/regs-ac97.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
+
+#include "pxa2xx-ac97-regs.h"
 
 static DEFINE_MUTEX(car_mutex);
 static DECLARE_WAIT_QUEUE_HEAD(gsr_wq);
@@ -30,6 +31,7 @@ static volatile long gsr_bits;
 static struct clk *ac97_clk;
 static struct clk *ac97conf_clk;
 static int reset_gpio;
+static void __iomem *ac97_reg_base;
 
 extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
 
@@ -46,7 +48,7 @@ extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
 int pxa2xx_ac97_read(int slot, unsigned short reg)
 {
        int val = -ENODEV;
-       volatile u32 *reg_addr;
+       u32 __iomem *reg_addr;
 
        if (slot > 0)
                return -ENODEV;
@@ -55,31 +57,33 @@ int pxa2xx_ac97_read(int slot, unsigned short reg)
 
        /* set up primary or secondary codec space */
        if (cpu_is_pxa25x() && reg == AC97_GPIO_STATUS)
-               reg_addr = slot ? &SMC_REG_BASE : &PMC_REG_BASE;
+               reg_addr = ac97_reg_base +
+                          (slot ? SMC_REG_BASE : PMC_REG_BASE);
        else
-               reg_addr = slot ? &SAC_REG_BASE : &PAC_REG_BASE;
+               reg_addr = ac97_reg_base +
+                          (slot ? SAC_REG_BASE : PAC_REG_BASE);
        reg_addr += (reg >> 1);
 
        /* start read access across the ac97 link */
-       GSR = GSR_CDONE | GSR_SDONE;
+       writel(GSR_CDONE | GSR_SDONE, ac97_reg_base + GSR);
        gsr_bits = 0;
-       val = (*reg_addr & 0xffff);
+       val = (readl(reg_addr) & 0xffff);
        if (reg == AC97_GPIO_STATUS)
                goto out;
-       if (wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_SDONE, 1) <= 0 &&
-           !((GSR | gsr_bits) & GSR_SDONE)) {
+       if (wait_event_timeout(gsr_wq, (readl(ac97_reg_base + GSR) | gsr_bits) & GSR_SDONE, 1) <= 0 &&
+           !((readl(ac97_reg_base + GSR) | gsr_bits) & GSR_SDONE)) {
                printk(KERN_ERR "%s: read error (ac97_reg=%d GSR=%#lx)\n",
-                               __func__, reg, GSR | gsr_bits);
+                               __func__, reg, readl(ac97_reg_base + GSR) | gsr_bits);
                val = -ETIMEDOUT;
                goto out;
        }
 
        /* valid data now */
-       GSR = GSR_CDONE | GSR_SDONE;
+       writel(GSR_CDONE | GSR_SDONE, ac97_reg_base + GSR);
        gsr_bits = 0;
-       val = (*reg_addr & 0xffff);
+       val = (readl(reg_addr) & 0xffff);
        /* but we've just started another cycle... */
-       wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_SDONE, 1);
+       wait_event_timeout(gsr_wq, (readl(ac97_reg_base + GSR) | gsr_bits) & GSR_SDONE, 1);
 
 out:   mutex_unlock(&car_mutex);
        return val;
@@ -88,25 +92,27 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_read);
 
 int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val)
 {
-       volatile u32 *reg_addr;
+       u32 __iomem *reg_addr;
        int ret = 0;
 
        mutex_lock(&car_mutex);
 
        /* set up primary or secondary codec space */
        if (cpu_is_pxa25x() && reg == AC97_GPIO_STATUS)
-               reg_addr = slot ? &SMC_REG_BASE : &PMC_REG_BASE;
+               reg_addr = ac97_reg_base +
+                          (slot ? SMC_REG_BASE : PMC_REG_BASE);
        else
-               reg_addr = slot ? &SAC_REG_BASE : &PAC_REG_BASE;
+               reg_addr = ac97_reg_base +
+                          (slot ? SAC_REG_BASE : PAC_REG_BASE);
        reg_addr += (reg >> 1);
 
-       GSR = GSR_CDONE | GSR_SDONE;
+       writel(GSR_CDONE | GSR_SDONE, ac97_reg_base + GSR);
        gsr_bits = 0;
-       *reg_addr = val;
-       if (wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_CDONE, 1) <= 0 &&
-           !((GSR | gsr_bits) & GSR_CDONE)) {
+       writel(val, reg_addr);
+       if (wait_event_timeout(gsr_wq, (readl(ac97_reg_base + GSR) | gsr_bits) & GSR_CDONE, 1) <= 0 &&
+           !((readl(ac97_reg_base + GSR) | gsr_bits) & GSR_CDONE)) {
                printk(KERN_ERR "%s: write error (ac97_reg=%d GSR=%#lx)\n",
-                               __func__, reg, GSR | gsr_bits);
+                               __func__, reg, readl(ac97_reg_base + GSR) | gsr_bits);
                ret = -EIO;
        }
 
@@ -120,17 +126,17 @@ static inline void pxa_ac97_warm_pxa25x(void)
 {
        gsr_bits = 0;
 
-       GCR |= GCR_WARM_RST;
+       writel(readl(ac97_reg_base + GCR) | (GCR_WARM_RST), ac97_reg_base + GCR);
 }
 
 static inline void pxa_ac97_cold_pxa25x(void)
 {
-       GCR &=  GCR_COLD_RST;  /* clear everything but nCRST */
-       GCR &= ~GCR_COLD_RST;  /* then assert nCRST */
+       writel(readl(ac97_reg_base + GCR) & ( GCR_COLD_RST), ac97_reg_base + GCR);  /* clear everything but nCRST */
+       writel(readl(ac97_reg_base + GCR) & (~GCR_COLD_RST), ac97_reg_base + GCR);  /* then assert nCRST */
 
        gsr_bits = 0;
 
-       GCR = GCR_COLD_RST;
+       writel(GCR_COLD_RST, ac97_reg_base + GCR);
 }
 #endif
 
@@ -142,15 +148,15 @@ static inline void pxa_ac97_warm_pxa27x(void)
        /* warm reset broken on Bulverde, so manually keep AC97 reset high */
        pxa27x_configure_ac97reset(reset_gpio, true);
        udelay(10);
-       GCR |= GCR_WARM_RST;
+       writel(readl(ac97_reg_base + GCR) | (GCR_WARM_RST), ac97_reg_base + GCR);
        pxa27x_configure_ac97reset(reset_gpio, false);
        udelay(500);
 }
 
 static inline void pxa_ac97_cold_pxa27x(void)
 {
-       GCR &=  GCR_COLD_RST;  /* clear everything but nCRST */
-       GCR &= ~GCR_COLD_RST;  /* then assert nCRST */
+       writel(readl(ac97_reg_base + GCR) & ( GCR_COLD_RST), ac97_reg_base + GCR);  /* clear everything but nCRST */
+       writel(readl(ac97_reg_base + GCR) & (~GCR_COLD_RST), ac97_reg_base + GCR);  /* then assert nCRST */
 
        gsr_bits = 0;
 
@@ -158,7 +164,7 @@ static inline void pxa_ac97_cold_pxa27x(void)
        clk_prepare_enable(ac97conf_clk);
        udelay(5);
        clk_disable_unprepare(ac97conf_clk);
-       GCR = GCR_COLD_RST | GCR_WARM_RST;
+       writel(GCR_COLD_RST | GCR_WARM_RST, ac97_reg_base + GCR);
 }
 #endif
 
@@ -168,26 +174,26 @@ static inline void pxa_ac97_warm_pxa3xx(void)
        gsr_bits = 0;
 
        /* Can't use interrupts */
-       GCR |= GCR_WARM_RST;
+       writel(readl(ac97_reg_base + GCR) | (GCR_WARM_RST), ac97_reg_base + GCR);
 }
 
 static inline void pxa_ac97_cold_pxa3xx(void)
 {
        /* Hold CLKBPB for 100us */
-       GCR = 0;
-       GCR = GCR_CLKBPB;
+       writel(0, ac97_reg_base + GCR);
+       writel(GCR_CLKBPB, ac97_reg_base + GCR);
        udelay(100);
-       GCR = 0;
+       writel(0, ac97_reg_base + GCR);
 
-       GCR &=  GCR_COLD_RST;  /* clear everything but nCRST */
-       GCR &= ~GCR_COLD_RST;  /* then assert nCRST */
+       writel(readl(ac97_reg_base + GCR) & ( GCR_COLD_RST), ac97_reg_base + GCR);  /* clear everything but nCRST */
+       writel(readl(ac97_reg_base + GCR) & (~GCR_COLD_RST), ac97_reg_base + GCR);  /* then assert nCRST */
 
        gsr_bits = 0;
 
        /* Can't use interrupts on PXA3xx */
-       GCR &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
+       writel(readl(ac97_reg_base + GCR) & (~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN)), ac97_reg_base + GCR);
 
-       GCR = GCR_WARM_RST | GCR_COLD_RST;
+       writel(GCR_WARM_RST | GCR_COLD_RST, ac97_reg_base + GCR);
 }
 #endif
 
@@ -213,10 +219,10 @@ bool pxa2xx_ac97_try_warm_reset(void)
 #endif
                snd_BUG();
 
-       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+       while (!((readl(ac97_reg_base + GSR) | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
                mdelay(1);
 
-       gsr = GSR | gsr_bits;
+       gsr = readl(ac97_reg_base + GSR) | gsr_bits;
        if (!(gsr & (GSR_PCR | GSR_SCR))) {
                printk(KERN_INFO "%s: warm reset timeout (GSR=%#lx)\n",
                                 __func__, gsr);
@@ -250,10 +256,10 @@ bool pxa2xx_ac97_try_cold_reset(void)
 #endif
                snd_BUG();
 
-       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+       while (!((readl(ac97_reg_base + GSR) | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
                mdelay(1);
 
-       gsr = GSR | gsr_bits;
+       gsr = readl(ac97_reg_base + GSR) | gsr_bits;
        if (!(gsr & (GSR_PCR | GSR_SCR))) {
                printk(KERN_INFO "%s: cold reset timeout (GSR=%#lx)\n",
                                 __func__, gsr);
@@ -268,8 +274,10 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_cold_reset);
 
 void pxa2xx_ac97_finish_reset(void)
 {
-       GCR &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
-       GCR |= GCR_SDONE_IE|GCR_CDONE_IE;
+       u32 gcr = readl(ac97_reg_base + GCR);
+       gcr &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
+       gcr |= GCR_SDONE_IE|GCR_CDONE_IE;
+       writel(gcr, ac97_reg_base + GCR);
 }
 EXPORT_SYMBOL_GPL(pxa2xx_ac97_finish_reset);
 
@@ -277,9 +285,9 @@ static irqreturn_t pxa2xx_ac97_irq(int irq, void *dev_id)
 {
        long status;
 
-       status = GSR;
+       status = readl(ac97_reg_base + GSR);
        if (status) {
-               GSR = status;
+               writel(status, ac97_reg_base + GSR);
                gsr_bits |= status;
                wake_up(&gsr_wq);
 
@@ -287,9 +295,9 @@ static irqreturn_t pxa2xx_ac97_irq(int irq, void *dev_id)
                   since they tend to spuriously trigger when MMC is used
                   (hardware bug? go figure)... */
                if (cpu_is_pxa27x()) {
-                       MISR = MISR_EOC;
-                       PISR = PISR_EOC;
-                       MCSR = MCSR_EOC;
+                       writel(MISR_EOC, ac97_reg_base + MISR);
+                       writel(PISR_EOC, ac97_reg_base + PISR);
+                       writel(MCSR_EOC, ac97_reg_base + MCSR);
                }
 
                return IRQ_HANDLED;
@@ -301,7 +309,7 @@ static irqreturn_t pxa2xx_ac97_irq(int irq, void *dev_id)
 #ifdef CONFIG_PM
 int pxa2xx_ac97_hw_suspend(void)
 {
-       GCR |= GCR_ACLINK_OFF;
+       writel(readl(ac97_reg_base + GCR) | (GCR_ACLINK_OFF), ac97_reg_base + GCR);
        clk_disable_unprepare(ac97_clk);
        return 0;
 }
@@ -318,8 +326,15 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_resume);
 int pxa2xx_ac97_hw_probe(struct platform_device *dev)
 {
        int ret;
+       int irq;
        pxa2xx_audio_ops_t *pdata = dev->dev.platform_data;
 
+       ac97_reg_base = devm_platform_ioremap_resource(dev, 0);
+       if (IS_ERR(ac97_reg_base)) {
+               dev_err(&dev->dev, "Missing MMIO resource\n");
+               return PTR_ERR(ac97_reg_base);
+       }
+
        if (pdata) {
                switch (pdata->reset_gpio) {
                case 95:
@@ -386,14 +401,18 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev)
        if (ret)
                goto err_clk2;
 
-       ret = request_irq(IRQ_AC97, pxa2xx_ac97_irq, 0, "AC97", NULL);
+       irq = platform_get_irq(dev, 0);
+       if (!irq)
+               goto err_irq;
+
+       ret = request_irq(irq, pxa2xx_ac97_irq, 0, "AC97", NULL);
        if (ret < 0)
                goto err_irq;
 
        return 0;
 
 err_irq:
-       GCR |= GCR_ACLINK_OFF;
+       writel(readl(ac97_reg_base + GCR) | (GCR_ACLINK_OFF), ac97_reg_base + GCR);
 err_clk2:
        clk_put(ac97_clk);
        ac97_clk = NULL;
@@ -411,8 +430,8 @@ void pxa2xx_ac97_hw_remove(struct platform_device *dev)
 {
        if (cpu_is_pxa27x())
                gpio_free(reset_gpio);
-       GCR |= GCR_ACLINK_OFF;
-       free_irq(IRQ_AC97, NULL);
+       writel(readl(ac97_reg_base + GCR) | (GCR_ACLINK_OFF), ac97_reg_base + GCR);
+       free_irq(platform_get_irq(dev, 0), NULL);
        if (ac97conf_clk) {
                clk_put(ac97conf_clk);
                ac97conf_clk = NULL;
@@ -423,6 +442,24 @@ void pxa2xx_ac97_hw_remove(struct platform_device *dev)
 }
 EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_remove);
 
+u32 pxa2xx_ac97_read_modr(void)
+{
+       if (!ac97_reg_base)
+               return 0;
+
+       return readl(ac97_reg_base + MODR);
+}
+EXPORT_SYMBOL_GPL(pxa2xx_ac97_read_modr);
+
+u32 pxa2xx_ac97_read_misr(void)
+{
+       if (!ac97_reg_base)
+               return 0;
+
+       return readl(ac97_reg_base + MISR);
+}
+EXPORT_SYMBOL_GPL(pxa2xx_ac97_read_misr);
+
 MODULE_AUTHOR("Nicolas Pitre");
 MODULE_DESCRIPTION("Intel/Marvell PXA sound library");
 MODULE_LICENSE("GPL");
similarity index 71%
rename from arch/arm/mach-pxa/include/mach/regs-ac97.h
rename to sound/arm/pxa2xx-ac97-regs.h
index 1db96fd..ae638a1 100644 (file)
@@ -2,25 +2,23 @@
 #ifndef __ASM_ARCH_REGS_AC97_H
 #define __ASM_ARCH_REGS_AC97_H
 
-#include <mach/hardware.h>
-
 /*
  * AC97 Controller registers
  */
 
-#define POCR           __REG(0x40500000)  /* PCM Out Control Register */
+#define POCR           (0x0000)        /* PCM Out Control Register */
 #define POCR_FEIE      (1 << 3)        /* FIFO Error Interrupt Enable */
 #define POCR_FSRIE     (1 << 1)        /* FIFO Service Request Interrupt Enable */
 
-#define PICR           __REG(0x40500004)  /* PCM In Control Register */
+#define PICR           (0x0004)        /* PCM In Control Register */
 #define PICR_FEIE      (1 << 3)        /* FIFO Error Interrupt Enable */
 #define PICR_FSRIE     (1 << 1)        /* FIFO Service Request Interrupt Enable */
 
-#define MCCR           __REG(0x40500008)  /* Mic In Control Register */
+#define MCCR           (0x0008)        /* Mic In Control Register */
 #define MCCR_FEIE      (1 << 3)        /* FIFO Error Interrupt Enable */
 #define MCCR_FSRIE     (1 << 1)        /* FIFO Service Request Interrupt Enable */
 
-#define GCR            __REG(0x4050000C)  /* Global Control Register */
+#define GCR            (0x000C)         /* Global Control Register */
 #ifdef CONFIG_PXA3xx
 #define GCR_CLKBPB     (1 << 31)       /* Internal clock enable */
 #endif
 #define GCR_COLD_RST   (1 << 1)        /* AC'97 Cold Reset (0 = active) */
 #define GCR_GIE                (1 << 0)        /* Codec GPI Interrupt Enable */
 
-#define POSR           __REG(0x40500010)  /* PCM Out Status Register */
+#define POSR           (0x0010)        /* PCM Out Status Register */
 #define POSR_FIFOE     (1 << 4)        /* FIFO error */
 #define POSR_FSR       (1 << 2)        /* FIFO Service Request */
 
-#define PISR           __REG(0x40500014)  /* PCM In Status Register */
+#define PISR           (0x0014)        /* PCM In Status Register */
 #define PISR_FIFOE     (1 << 4)        /* FIFO error */
 #define PISR_EOC       (1 << 3)        /* DMA End-of-Chain (exclusive clear) */
 #define PISR_FSR       (1 << 2)        /* FIFO Service Request */
 
-#define MCSR           __REG(0x40500018)  /* Mic In Status Register */
+#define MCSR           (0x0018)        /* Mic In Status Register */
 #define MCSR_FIFOE     (1 << 4)        /* FIFO error */
 #define MCSR_EOC       (1 << 3)        /* DMA End-of-Chain (exclusive clear) */
 #define MCSR_FSR       (1 << 2)        /* FIFO Service Request */
 
-#define GSR            __REG(0x4050001C)  /* Global Status Register */
+#define GSR            (0x001C)        /* Global Status Register */
 #define GSR_CDONE      (1 << 19)       /* Command Done */
 #define GSR_SDONE      (1 << 18)       /* Status Done */
 #define GSR_RDCS       (1 << 15)       /* Read Completion Status */
 #define GSR_MIINT      (1 << 1)        /* Modem In Interrupt */
 #define GSR_GSCI       (1 << 0)        /* Codec GPI Status Change Interrupt */
 
-#define CAR            __REG(0x40500020)  /* CODEC Access Register */
+#define CAR            (0x0020)        /* CODEC Access Register */
 #define CAR_CAIP       (1 << 0)        /* Codec Access In Progress */
 
-#define PCDR           __REG(0x40500040)  /* PCM FIFO Data Register */
-#define MCDR           __REG(0x40500060)  /* Mic-in FIFO Data Register */
+#define PCDR           (0x0040)        /* PCM FIFO Data Register */
+#define MCDR           (0x0060)        /* Mic-in FIFO Data Register */
 
-#define MOCR           __REG(0x40500100)  /* Modem Out Control Register */
+#define MOCR           (0x0100)        /* Modem Out Control Register */
 #define MOCR_FEIE      (1 << 3)        /* FIFO Error */
 #define MOCR_FSRIE     (1 << 1)        /* FIFO Service Request Interrupt Enable */
 
-#define MICR           __REG(0x40500108)  /* Modem In Control Register */
+#define MICR           (0x0108)        /* Modem In Control Register */
 #define MICR_FEIE      (1 << 3)        /* FIFO Error */
 #define MICR_FSRIE     (1 << 1)        /* FIFO Service Request Interrupt Enable */
 
-#define MOSR           __REG(0x40500110)  /* Modem Out Status Register */
+#define MOSR           (0x0110)        /* Modem Out Status Register */
 #define MOSR_FIFOE     (1 << 4)        /* FIFO error */
 #define MOSR_FSR       (1 << 2)        /* FIFO Service Request */
 
-#define MISR           __REG(0x40500118)  /* Modem In Status Register */
+#define MISR           (0x0118)        /* Modem In Status Register */
 #define MISR_FIFOE     (1 << 4)        /* FIFO error */
 #define MISR_EOC       (1 << 3)        /* DMA End-of-Chain (exclusive clear) */
 #define MISR_FSR       (1 << 2)        /* FIFO Service Request */
 
-#define MODR           __REG(0x40500140)  /* Modem FIFO Data Register */
+#define MODR           (0x0140)        /* Modem FIFO Data Register */
 
-#define PAC_REG_BASE   __REG(0x40500200)  /* Primary Audio Codec */
-#define SAC_REG_BASE   __REG(0x40500300)  /* Secondary Audio Codec */
-#define PMC_REG_BASE   __REG(0x40500400)  /* Primary Modem Codec */
-#define SMC_REG_BASE   __REG(0x40500500)  /* Secondary Modem Codec */
+#define PAC_REG_BASE   (0x0200)        /* Primary Audio Codec */
+#define SAC_REG_BASE   (0x0300)        /* Secondary Audio Codec */
+#define PMC_REG_BASE   (0x0400)        /* Primary Modem Codec */
+#define SMC_REG_BASE   (0x0500)        /* Secondary Modem Codec */
 
 #endif /* __ASM_ARCH_REGS_AC97_H */
index c17a19f..c162086 100644 (file)
@@ -21,8 +21,7 @@
 #include <sound/pxa2xx-lib.h>
 #include <sound/dmaengine_pcm.h>
 
-#include <mach/regs-ac97.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 static void pxa2xx_ac97_legacy_reset(struct snd_ac97 *ac97)
 {
index 350d704..2762f03 100644 (file)
@@ -9,9 +9,7 @@ ifneq ($(CONFIG_SND_PROC_FS),)
 snd-y += info.o
 snd-$(CONFIG_SND_OSSEMUL) += info_oss.o
 endif
-ifneq ($(CONFIG_M68K),y)
 snd-$(CONFIG_ISA_DMA_API) += isadma.o
-endif
 snd-$(CONFIG_SND_OSSEMUL) += sound_oss.o
 snd-$(CONFIG_SND_VMASTER) += vmaster.o
 snd-$(CONFIG_SND_JACK)   += ctljack.o jack.o
index 570b88e..6ffa48d 100644 (file)
@@ -22,7 +22,7 @@ config SND_SB16_DSP
 menuconfig SND_ISA
        bool "ISA sound devices"
        depends on ISA || COMPILE_TEST
-       depends on ISA_DMA_API && !M68K
+       depends on ISA_DMA_API
        default y
        help
          Support for sound devices connected via the ISA bus.
index 80e4955..f3ad454 100644 (file)
@@ -1981,6 +1981,7 @@ enum {
        ALC1220_FIXUP_CLEVO_PB51ED_PINS,
        ALC887_FIXUP_ASUS_AUDIO,
        ALC887_FIXUP_ASUS_HMIC,
+       ALCS1200A_FIXUP_MIC_VREF,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2526,6 +2527,14 @@ static const struct hda_fixup alc882_fixups[] = {
                .chained = true,
                .chain_id = ALC887_FIXUP_ASUS_AUDIO,
        },
+       [ALCS1200A_FIXUP_MIC_VREF] = {
+               .type = HDA_FIXUP_PINCTLS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, PIN_VREF50 }, /* rear mic */
+                       { 0x19, PIN_VREF50 }, /* front mic */
+                       {}
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2563,6 +2572,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
        SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
        SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+       SND_PCI_QUIRK(0x1043, 0x8797, "ASUS TUF B550M-PLUS", ALCS1200A_FIXUP_MIC_VREF),
        SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
        SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
        SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
@@ -8905,6 +8915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
index 773a136..a05304f 100644 (file)
@@ -449,8 +449,6 @@ static void vt1708_set_pinconfig_connect(struct hda_codec *codec, hda_nid_t nid)
                def_conf = def_conf & (~(AC_JACK_PORT_BOTH << 30));
                snd_hda_codec_set_pincfg(codec, nid, def_conf);
        }
-
-       return;
 }
 
 static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
index 340e39d..c893963 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/module.h>
 
 #include "amd.h"
 #include "../mach-config.h"
index 7998fdd..bba73c4 100644 (file)
@@ -60,6 +60,9 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
        bool micbias_up = false;
        int retries = 0;
 
+       /* Disable ground switch */
+       snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
        /* Drive headphones/lineout */
        snd_soc_component_update_bits(component, DA7219_HP_L_CTRL,
                            DA7219_HP_L_AMP_OE_MASK,
@@ -153,6 +156,9 @@ static void da7219_aad_hptest_work(struct work_struct *work)
                tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
        }
 
+       /* Disable ground switch */
+       snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
        /* Ensure gain ramping at fastest rate */
        gain_ramp_ctrl = snd_soc_component_read(component, DA7219_GAIN_RAMP_CTRL);
        snd_soc_component_write(component, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
@@ -428,6 +434,10 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
                        mask |= DA7219_AAD_REPORT_ALL_MASK;
                        da7219_aad->jack_inserted = false;
 
+                       /* Cancel any pending work */
+                       cancel_work_sync(&da7219_aad->btn_det_work);
+                       cancel_work_sync(&da7219_aad->hptest_work);
+
                        /* Un-drive headphones/lineout */
                        snd_soc_component_update_bits(component, DA7219_HP_R_CTRL,
                                            DA7219_HP_R_AMP_OE_MASK, 0);
@@ -444,9 +454,8 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
                        snd_soc_dapm_disable_pin(dapm, "Mic Bias");
                        snd_soc_dapm_sync(dapm);
 
-                       /* Cancel any pending work */
-                       cancel_work_sync(&da7219_aad->btn_det_work);
-                       cancel_work_sync(&da7219_aad->hptest_work);
+                       /* Enable ground switch */
+                       snd_soc_component_update_bits(component, 0xFB, 0x01, 0x01);
                }
        }
 
@@ -899,6 +908,9 @@ int da7219_aad_init(struct snd_soc_component *component)
        snd_soc_component_update_bits(component, DA7219_ACCDET_CONFIG_1,
                            DA7219_BUTTON_CONFIG_MASK, 0);
 
+       /* Enable ground switch */
+       snd_soc_component_update_bits(component, 0xFB, 0x01, 0x01);
+
        INIT_WORK(&da7219_aad->btn_det_work, da7219_aad_btn_det_work);
        INIT_WORK(&da7219_aad->hptest_work, da7219_aad_hptest_work);
 
index 12da2be..69c80d8 100644 (file)
@@ -2094,12 +2094,14 @@ EXPORT_SYMBOL_GPL(rt5640_sel_asrc_clk_src);
 void rt5640_enable_micbias1_for_ovcd(struct snd_soc_component *component)
 {
        struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+       struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
 
        snd_soc_dapm_mutex_lock(dapm);
        snd_soc_dapm_force_enable_pin_unlocked(dapm, "LDO2");
        snd_soc_dapm_force_enable_pin_unlocked(dapm, "MICBIAS1");
        /* OVCD is unreliable when used with RCCLK as sysclk-source */
-       snd_soc_dapm_force_enable_pin_unlocked(dapm, "Platform Clock");
+       if (rt5640->use_platform_clock)
+               snd_soc_dapm_force_enable_pin_unlocked(dapm, "Platform Clock");
        snd_soc_dapm_sync_unlocked(dapm);
        snd_soc_dapm_mutex_unlock(dapm);
 }
@@ -2108,9 +2110,11 @@ EXPORT_SYMBOL_GPL(rt5640_enable_micbias1_for_ovcd);
 void rt5640_disable_micbias1_for_ovcd(struct snd_soc_component *component)
 {
        struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+       struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
 
        snd_soc_dapm_mutex_lock(dapm);
-       snd_soc_dapm_disable_pin_unlocked(dapm, "Platform Clock");
+       if (rt5640->use_platform_clock)
+               snd_soc_dapm_disable_pin_unlocked(dapm, "Platform Clock");
        snd_soc_dapm_disable_pin_unlocked(dapm, "MICBIAS1");
        snd_soc_dapm_disable_pin_unlocked(dapm, "LDO2");
        snd_soc_dapm_sync_unlocked(dapm);
@@ -2535,6 +2539,9 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
                rt5640->jd_gpio_irq_requested = true;
        }
 
+       if (jack_data && jack_data->use_platform_clock)
+               rt5640->use_platform_clock = jack_data->use_platform_clock;
+
        ret = request_irq(rt5640->irq, rt5640_irq,
                          IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                          "rt5640", rt5640);
index 9e49b9a..505c935 100644 (file)
@@ -2155,11 +2155,13 @@ struct rt5640_priv {
        bool jd_inverted;
        unsigned int ovcd_th;
        unsigned int ovcd_sf;
+       bool use_platform_clock;
 };
 
 struct rt5640_set_jack_data {
        int codec_irq_override;
        struct gpio_desc *jd_gpio;
+       bool use_platform_clock;
 };
 
 int rt5640_dmic_enable(struct snd_soc_component *component,
index e4965ef..1c8f5ca 100644 (file)
@@ -80,8 +80,8 @@
 #define FSL_SAI_xCR3(tx, ofs)  (tx ? FSL_SAI_TCR3(ofs) : FSL_SAI_RCR3(ofs))
 #define FSL_SAI_xCR4(tx, ofs)  (tx ? FSL_SAI_TCR4(ofs) : FSL_SAI_RCR4(ofs))
 #define FSL_SAI_xCR5(tx, ofs)  (tx ? FSL_SAI_TCR5(ofs) : FSL_SAI_RCR5(ofs))
-#define FSL_SAI_xDR(tx, ofs)   (tx ? FSL_SAI_TDR(ofs) : FSL_SAI_RDR(ofs))
-#define FSL_SAI_xFR(tx, ofs)   (tx ? FSL_SAI_TFR(ofs) : FSL_SAI_RFR(ofs))
+#define FSL_SAI_xDR0(tx)       (tx ? FSL_SAI_TDR0 : FSL_SAI_RDR0)
+#define FSL_SAI_xFR0(tx)       (tx ? FSL_SAI_TFR0 : FSL_SAI_RFR0)
 #define FSL_SAI_xMR(tx)                (tx ? FSL_SAI_TMR : FSL_SAI_RMR)
 
 /* SAI Transmit/Receive Control Register */
index 80cb016..87f9c18 100644 (file)
@@ -326,7 +326,8 @@ static int avs_register_i2s_board(struct avs_dev *adev, struct snd_soc_acpi_mach
        num_ssps = adev->hw_cfg.i2s_caps.ctrl_count;
        if (fls(mach->mach_params.i2s_link_mask) > num_ssps) {
                dev_err(adev->dev, "Platform supports %d SSPs but board %s requires SSP%ld\n",
-                       num_ssps, mach->drv_name, __fls(mach->mach_params.i2s_link_mask));
+                       num_ssps, mach->drv_name,
+                       (unsigned long)__fls(mach->mach_params.i2s_link_mask));
                return -ENODEV;
        }
 
index 7b948a2..ed9fa17 100644 (file)
@@ -1191,12 +1191,14 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
 {
        struct snd_soc_card *card = runtime->card;
        struct byt_rt5640_private *priv = snd_soc_card_get_drvdata(card);
+       struct rt5640_set_jack_data *jack_data = &priv->jack_data;
        struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
        const struct snd_soc_dapm_route *custom_map = NULL;
        int num_routes = 0;
        int ret;
 
        card->dapm.idle_bias_off = true;
+       jack_data->use_platform_clock = true;
 
        /* Start with RC clk for jack-detect (we disable MCLK below) */
        if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN)
index e6e52c7..c138516 100644 (file)
@@ -453,7 +453,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
                .drv_name = "adl_mx98360a_nau8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_max98360a_amp,
-               .sof_tplg_filename = "sof-adl-mx98360a-nau8825.tplg",
+               .sof_tplg_filename = "sof-adl-max98360a-nau8825.tplg",
        },
        {
                .id = "RTL5682",
index 8ee2dea..4489d2c 100644 (file)
@@ -21,8 +21,7 @@
 #include <sound/soc.h>
 
 #include <asm/mach-types.h>
-#include <mach/corgi.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #include "../codecs/wm8731.h"
 #include "pxa2xx-i2s.h"
@@ -41,6 +40,9 @@
 static int corgi_jack_func;
 static int corgi_spk_func;
 
+static struct gpio_desc *gpiod_mute_l, *gpiod_mute_r,
+                       *gpiod_apm_on, *gpiod_mic_bias;
+
 static void corgi_ext_control(struct snd_soc_dapm_context *dapm)
 {
        snd_soc_dapm_mutex_lock(dapm);
@@ -49,8 +51,8 @@ static void corgi_ext_control(struct snd_soc_dapm_context *dapm)
        switch (corgi_jack_func) {
        case CORGI_HP:
                /* set = unmute headphone */
-               gpio_set_value(CORGI_GPIO_MUTE_L, 1);
-               gpio_set_value(CORGI_GPIO_MUTE_R, 1);
+               gpiod_set_value(gpiod_mute_l, 1);
+               gpiod_set_value(gpiod_mute_r, 1);
                snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
                snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack");
@@ -58,24 +60,24 @@ static void corgi_ext_control(struct snd_soc_dapm_context *dapm)
                break;
        case CORGI_MIC:
                /* reset = mute headphone */
-               gpio_set_value(CORGI_GPIO_MUTE_L, 0);
-               gpio_set_value(CORGI_GPIO_MUTE_R, 0);
+               gpiod_set_value(gpiod_mute_l, 0);
+               gpiod_set_value(gpiod_mute_r, 0);
                snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
                break;
        case CORGI_LINE:
-               gpio_set_value(CORGI_GPIO_MUTE_L, 0);
-               gpio_set_value(CORGI_GPIO_MUTE_R, 0);
+               gpiod_set_value(gpiod_mute_l, 0);
+               gpiod_set_value(gpiod_mute_r, 0);
                snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_enable_pin_unlocked(dapm, "Line Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
                break;
        case CORGI_HEADSET:
-               gpio_set_value(CORGI_GPIO_MUTE_L, 0);
-               gpio_set_value(CORGI_GPIO_MUTE_R, 1);
+               gpiod_set_value(gpiod_mute_l, 0);
+               gpiod_set_value(gpiod_mute_r, 1);
                snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
@@ -108,8 +110,8 @@ static int corgi_startup(struct snd_pcm_substream *substream)
 static void corgi_shutdown(struct snd_pcm_substream *substream)
 {
        /* set = unmute headphone */
-       gpio_set_value(CORGI_GPIO_MUTE_L, 1);
-       gpio_set_value(CORGI_GPIO_MUTE_R, 1);
+       gpiod_set_value(gpiod_mute_l, 1);
+       gpiod_set_value(gpiod_mute_r, 1);
 }
 
 static int corgi_hw_params(struct snd_pcm_substream *substream,
@@ -199,14 +201,14 @@ static int corgi_set_spk(struct snd_kcontrol *kcontrol,
 static int corgi_amp_event(struct snd_soc_dapm_widget *w,
        struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(CORGI_GPIO_APM_ON, SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value(gpiod_apm_on, SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
 static int corgi_mic_event(struct snd_soc_dapm_widget *w,
        struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(CORGI_GPIO_MIC_BIAS, SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value(gpiod_mic_bias, SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
@@ -293,6 +295,19 @@ static int corgi_probe(struct platform_device *pdev)
 
        card->dev = &pdev->dev;
 
+       gpiod_mute_l = devm_gpiod_get(&pdev->dev, "mute-l", GPIOD_OUT_HIGH);
+       if (IS_ERR(gpiod_mute_l))
+               return PTR_ERR(gpiod_mute_l);
+       gpiod_mute_r = devm_gpiod_get(&pdev->dev, "mute-r", GPIOD_OUT_HIGH);
+       if (IS_ERR(gpiod_mute_r))
+               return PTR_ERR(gpiod_mute_r);
+       gpiod_apm_on = devm_gpiod_get(&pdev->dev, "apm-on", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_apm_on))
+               return PTR_ERR(gpiod_apm_on);
+       gpiod_mic_bias = devm_gpiod_get(&pdev->dev, "mic-bias", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_mic_bias))
+               return PTR_ERR(gpiod_mic_bias);
+
        ret = devm_snd_soc_register_card(&pdev->dev, card);
        if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
index eafa148..4e0e9b7 100644 (file)
@@ -7,17 +7,19 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
 
-#include <mach/audio.h>
-#include <mach/eseries-gpio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #include <asm/mach-types.h>
 
+static struct gpio_desc *gpiod_output_amp, *gpiod_input_amp;
+static struct gpio_desc *gpiod_audio_power;
+
 #define E740_AUDIO_OUT 1
 #define E740_AUDIO_IN  2
 
@@ -25,9 +27,9 @@ static int e740_audio_power;
 
 static void e740_sync_audio_power(int status)
 {
-       gpio_set_value(GPIO_E740_WM9705_nAVDD2, !status);
-       gpio_set_value(GPIO_E740_AMP_ON, (status & E740_AUDIO_OUT) ? 1 : 0);
-       gpio_set_value(GPIO_E740_MIC_ON, (status & E740_AUDIO_IN) ? 1 : 0);
+       gpiod_set_value(gpiod_audio_power, !status);
+       gpiod_set_value(gpiod_output_amp, (status & E740_AUDIO_OUT) ? 1 : 0);
+       gpiod_set_value(gpiod_input_amp, (status & E740_AUDIO_IN) ? 1 : 0);
 }
 
 static int e740_mic_amp_event(struct snd_soc_dapm_widget *w,
@@ -116,36 +118,35 @@ static struct snd_soc_card e740 = {
        .fully_routed = true,
 };
 
-static struct gpio e740_audio_gpios[] = {
-       { GPIO_E740_MIC_ON, GPIOF_OUT_INIT_LOW, "Mic amp" },
-       { GPIO_E740_AMP_ON, GPIOF_OUT_INIT_LOW, "Output amp" },
-       { GPIO_E740_WM9705_nAVDD2, GPIOF_OUT_INIT_HIGH, "Audio power" },
-};
-
 static int e740_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &e740;
        int ret;
 
-       ret = gpio_request_array(e740_audio_gpios,
-                                ARRAY_SIZE(e740_audio_gpios));
+       gpiod_input_amp  = devm_gpiod_get(&pdev->dev, "Mic amp", GPIOD_OUT_LOW);
+       ret = PTR_ERR_OR_ZERO(gpiod_input_amp);
+       if (ret)
+               return ret;
+       gpiod_output_amp  = devm_gpiod_get(&pdev->dev, "Output amp", GPIOD_OUT_LOW);
+       ret = PTR_ERR_OR_ZERO(gpiod_output_amp);
+       if (ret)
+               return ret;
+       gpiod_audio_power = devm_gpiod_get(&pdev->dev, "Audio power", GPIOD_OUT_HIGH);
+       ret = PTR_ERR_OR_ZERO(gpiod_audio_power);
        if (ret)
                return ret;
 
        card->dev = &pdev->dev;
 
        ret = devm_snd_soc_register_card(&pdev->dev, card);
-       if (ret) {
+       if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
                        ret);
-               gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios));
-       }
        return ret;
 }
 
 static int e740_remove(struct platform_device *pdev)
 {
-       gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios));
        return 0;
 }
 
index d75510d..7a1e0d8 100644 (file)
@@ -7,24 +7,25 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
 
-#include <mach/audio.h>
-#include <mach/eseries-gpio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #include <asm/mach-types.h>
 
+static struct gpio_desc *gpiod_spk_amp, *gpiod_hp_amp;
+
 static int e750_spk_amp_event(struct snd_soc_dapm_widget *w,
                                struct snd_kcontrol *kcontrol, int event)
 {
        if (event & SND_SOC_DAPM_PRE_PMU)
-               gpio_set_value(GPIO_E750_SPK_AMP_OFF, 0);
+               gpiod_set_value(gpiod_spk_amp, 1);
        else if (event & SND_SOC_DAPM_POST_PMD)
-               gpio_set_value(GPIO_E750_SPK_AMP_OFF, 1);
+               gpiod_set_value(gpiod_spk_amp, 0);
 
        return 0;
 }
@@ -33,9 +34,9 @@ static int e750_hp_amp_event(struct snd_soc_dapm_widget *w,
                                struct snd_kcontrol *kcontrol, int event)
 {
        if (event & SND_SOC_DAPM_PRE_PMU)
-               gpio_set_value(GPIO_E750_HP_AMP_OFF, 0);
+               gpiod_set_value(gpiod_hp_amp, 1);
        else if (event & SND_SOC_DAPM_POST_PMD)
-               gpio_set_value(GPIO_E750_HP_AMP_OFF, 1);
+               gpiod_set_value(gpiod_hp_amp, 0);
 
        return 0;
 }
@@ -100,35 +101,31 @@ static struct snd_soc_card e750 = {
        .fully_routed = true,
 };
 
-static struct gpio e750_audio_gpios[] = {
-       { GPIO_E750_HP_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Headphone amp" },
-       { GPIO_E750_SPK_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Speaker amp" },
-};
-
 static int e750_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &e750;
        int ret;
 
-       ret = gpio_request_array(e750_audio_gpios,
-                                ARRAY_SIZE(e750_audio_gpios));
+       gpiod_hp_amp  = devm_gpiod_get(&pdev->dev, "Headphone amp", GPIOD_OUT_LOW);
+       ret = PTR_ERR_OR_ZERO(gpiod_hp_amp);
+       if (ret)
+               return ret;
+       gpiod_spk_amp  = devm_gpiod_get(&pdev->dev, "Speaker amp", GPIOD_OUT_LOW);
+       ret = PTR_ERR_OR_ZERO(gpiod_spk_amp);
        if (ret)
                return ret;
 
        card->dev = &pdev->dev;
 
        ret = devm_snd_soc_register_card(&pdev->dev, card);
-       if (ret) {
+       if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
                        ret);
-               gpio_free_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios));
-       }
        return ret;
 }
 
 static int e750_remove(struct platform_device *pdev)
 {
-       gpio_free_array(e750_audio_gpios, ARRAY_SIZE(e750_audio_gpios));
        return 0;
 }
 
index 56d543d..a39c494 100644 (file)
@@ -7,23 +7,24 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
 
 #include <asm/mach-types.h>
-#include <mach/audio.h>
-#include <mach/eseries-gpio.h>
+#include <linux/platform_data/asoc-pxa.h>
+
+static struct gpio_desc *gpiod_spk_amp, *gpiod_hp_amp;
 
 static int e800_spk_amp_event(struct snd_soc_dapm_widget *w,
                                struct snd_kcontrol *kcontrol, int event)
 {
        if (event & SND_SOC_DAPM_PRE_PMU)
-               gpio_set_value(GPIO_E800_SPK_AMP_ON, 1);
+               gpiod_set_value(gpiod_spk_amp, 1);
        else if (event & SND_SOC_DAPM_POST_PMD)
-               gpio_set_value(GPIO_E800_SPK_AMP_ON, 0);
+               gpiod_set_value(gpiod_spk_amp, 0);
 
        return 0;
 }
@@ -32,9 +33,9 @@ static int e800_hp_amp_event(struct snd_soc_dapm_widget *w,
                                struct snd_kcontrol *kcontrol, int event)
 {
        if (event & SND_SOC_DAPM_PRE_PMU)
-               gpio_set_value(GPIO_E800_HP_AMP_OFF, 0);
+               gpiod_set_value(gpiod_hp_amp, 1);
        else if (event & SND_SOC_DAPM_POST_PMD)
-               gpio_set_value(GPIO_E800_HP_AMP_OFF, 1);
+               gpiod_set_value(gpiod_hp_amp, 0);
 
        return 0;
 }
@@ -100,35 +101,31 @@ static struct snd_soc_card e800 = {
        .num_dapm_routes = ARRAY_SIZE(audio_map),
 };
 
-static struct gpio e800_audio_gpios[] = {
-       { GPIO_E800_SPK_AMP_ON, GPIOF_OUT_INIT_HIGH, "Headphone amp" },
-       { GPIO_E800_HP_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Speaker amp" },
-};
-
 static int e800_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &e800;
        int ret;
 
-       ret = gpio_request_array(e800_audio_gpios,
-                                ARRAY_SIZE(e800_audio_gpios));
+       gpiod_hp_amp  = devm_gpiod_get(&pdev->dev, "Headphone amp", GPIOD_OUT_LOW);
+       ret = PTR_ERR_OR_ZERO(gpiod_hp_amp);
+       if (ret)
+               return ret;
+       gpiod_spk_amp  = devm_gpiod_get(&pdev->dev, "Speaker amp", GPIOD_OUT_LOW);
+       ret = PTR_ERR_OR_ZERO(gpiod_spk_amp);
        if (ret)
                return ret;
 
        card->dev = &pdev->dev;
 
        ret = devm_snd_soc_register_card(&pdev->dev, card);
-       if (ret) {
+       if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
                        ret);
-               gpio_free_array(e800_audio_gpios, ARRAY_SIZE(e800_audio_gpios));
-       }
        return ret;
 }
 
 static int e800_remove(struct platform_device *pdev)
 {
-       gpio_free_array(e800_audio_gpios, ARRAY_SIZE(e800_audio_gpios));
        return 0;
 }
 
index 9076ea7..b59ec22 100644 (file)
@@ -23,7 +23,7 @@
 #include <sound/soc.h>
 
 #include <asm/mach-types.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 SND_SOC_DAILINK_DEFS(ac97,
        DAILINK_COMP_ARRAY(COMP_CPU("pxa2xx-ac97")),
index 9a81615..a323ddb 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <sound/core.h>
 #include <sound/jack.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 
-#include <mach/hx4700.h>
 #include <asm/mach-types.h>
 #include "pxa2xx-i2s.h"
 
+static struct gpio_desc *gpiod_hp_driver, *gpiod_spk_sd;
 static struct snd_soc_jack hs_jack;
 
 /* Headphones jack detection DAPM pin */
@@ -29,20 +29,18 @@ static struct snd_soc_jack_pin hs_jack_pin[] = {
        {
                .pin    = "Headphone Jack",
                .mask   = SND_JACK_HEADPHONE,
+               .invert = 1,
        },
        {
                .pin    = "Speaker",
                /* disable speaker when hp jack is inserted */
                .mask   = SND_JACK_HEADPHONE,
-               .invert = 1,
        },
 };
 
 /* Headphones jack detection GPIO */
 static struct snd_soc_jack_gpio hs_jack_gpio = {
-       .gpio           = GPIO75_HX4700_EARPHONE_nDET,
-       .invert         = true,
-       .name           = "hp-gpio",
+       .name           = "earphone-det",
        .report         = SND_JACK_HEADPHONE,
        .debounce_time  = 200,
 };
@@ -81,14 +79,14 @@ static const struct snd_soc_ops hx4700_ops = {
 static int hx4700_spk_power(struct snd_soc_dapm_widget *w,
                            struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(GPIO107_HX4700_SPK_nSD, !!SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value(gpiod_spk_sd, !SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
 static int hx4700_hp_power(struct snd_soc_dapm_widget *w,
                           struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(GPIO92_HX4700_HP_DRIVER, !!SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value(gpiod_hp_driver, !!SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
@@ -162,11 +160,6 @@ static struct snd_soc_card snd_soc_card_hx4700 = {
        .fully_routed           = true,
 };
 
-static struct gpio hx4700_audio_gpios[] = {
-       { GPIO107_HX4700_SPK_nSD, GPIOF_OUT_INIT_HIGH, "SPK_POWER" },
-       { GPIO92_HX4700_HP_DRIVER, GPIOF_OUT_INIT_LOW, "EP_POWER" },
-};
-
 static int hx4700_audio_probe(struct platform_device *pdev)
 {
        int ret;
@@ -174,26 +167,26 @@ static int hx4700_audio_probe(struct platform_device *pdev)
        if (!machine_is_h4700())
                return -ENODEV;
 
-       ret = gpio_request_array(hx4700_audio_gpios,
-                               ARRAY_SIZE(hx4700_audio_gpios));
+       gpiod_hp_driver = devm_gpiod_get(&pdev->dev, "hp-driver", GPIOD_ASIS);
+       ret = PTR_ERR_OR_ZERO(gpiod_hp_driver);
+       if (ret)
+               return ret;
+       gpiod_spk_sd = devm_gpiod_get(&pdev->dev, "spk-sd", GPIOD_ASIS);
+       ret = PTR_ERR_OR_ZERO(gpiod_spk_sd);
        if (ret)
                return ret;
 
+       hs_jack_gpio.gpiod_dev = &pdev->dev;
        snd_soc_card_hx4700.dev = &pdev->dev;
        ret = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_hx4700);
-       if (ret)
-               gpio_free_array(hx4700_audio_gpios,
-                               ARRAY_SIZE(hx4700_audio_gpios));
 
        return ret;
 }
 
 static int hx4700_audio_remove(struct platform_device *pdev)
 {
-       gpio_set_value(GPIO92_HX4700_HP_DRIVER, 0);
-       gpio_set_value(GPIO107_HX4700_SPK_nSD, 0);
-
-       gpio_free_array(hx4700_audio_gpios, ARRAY_SIZE(hx4700_audio_gpios));
+       gpiod_set_value(gpiod_hp_driver, 0);
+       gpiod_set_value(gpiod_spk_sd, 0);
        return 0;
 }
 
index a5f326c..9433cc9 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/uda1380.h>
 
-#include <mach/magician.h>
 #include <asm/mach-types.h>
 #include "../codecs/uda1380.h"
 #include "pxa2xx-i2s.h"
@@ -36,6 +34,9 @@ static int magician_hp_switch;
 static int magician_spk_switch = 1;
 static int magician_in_sel = MAGICIAN_MIC;
 
+static struct gpio_desc *gpiod_spk_power, *gpiod_ep_power, *gpiod_mic_power;
+static struct gpio_desc *gpiod_in_sel0, *gpiod_in_sel1;
+
 static void magician_ext_control(struct snd_soc_dapm_context *dapm)
 {
 
@@ -215,10 +216,10 @@ static int magician_set_input(struct snd_kcontrol *kcontrol,
 
        switch (magician_in_sel) {
        case MAGICIAN_MIC:
-               gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 1);
+               gpiod_set_value(gpiod_in_sel1, 1);
                break;
        case MAGICIAN_MIC_EXT:
-               gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 0);
+               gpiod_set_value(gpiod_in_sel1, 0);
        }
 
        return 1;
@@ -227,21 +228,21 @@ static int magician_set_input(struct snd_kcontrol *kcontrol,
 static int magician_spk_power(struct snd_soc_dapm_widget *w,
                                struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value(gpiod_spk_power, SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
 static int magician_hp_power(struct snd_soc_dapm_widget *w,
                                struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(EGPIO_MAGICIAN_EP_POWER, SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value(gpiod_ep_power, SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
 static int magician_mic_bias(struct snd_soc_dapm_widget *w,
                                struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value(gpiod_mic_power, SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
@@ -328,106 +329,38 @@ static struct snd_soc_card snd_soc_card_magician = {
        .fully_routed = true,
 };
 
-static struct platform_device *magician_snd_device;
-
-/*
- * FIXME: move into magician board file once merged into the pxa tree
- */
-static struct uda1380_platform_data uda1380_info = {
-       .gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
-       .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
-       .dac_clk    = UDA1380_DAC_CLK_WSPLL,
-};
-
-static struct i2c_board_info i2c_board_info[] = {
-       {
-               I2C_BOARD_INFO("uda1380", 0x18),
-               .platform_data = &uda1380_info,
-       },
-};
-
-static int __init magician_init(void)
-{
-       int ret;
-       struct i2c_adapter *adapter;
-       struct i2c_client *client;
-
-       if (!machine_is_magician())
-               return -ENODEV;
-
-       adapter = i2c_get_adapter(0);
-       if (!adapter)
-               return -ENODEV;
-       client = i2c_new_client_device(adapter, i2c_board_info);
-       i2c_put_adapter(adapter);
-       if (IS_ERR(client))
-               return PTR_ERR(client);
-
-       ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
-       if (ret)
-               goto err_request_spk;
-       ret = gpio_request(EGPIO_MAGICIAN_EP_POWER, "EP_POWER");
-       if (ret)
-               goto err_request_ep;
-       ret = gpio_request(EGPIO_MAGICIAN_MIC_POWER, "MIC_POWER");
-       if (ret)
-               goto err_request_mic;
-       ret = gpio_request(EGPIO_MAGICIAN_IN_SEL0, "IN_SEL0");
-       if (ret)
-               goto err_request_in_sel0;
-       ret = gpio_request(EGPIO_MAGICIAN_IN_SEL1, "IN_SEL1");
-       if (ret)
-               goto err_request_in_sel1;
-
-       gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0);
-
-       magician_snd_device = platform_device_alloc("soc-audio", -1);
-       if (!magician_snd_device) {
-               ret = -ENOMEM;
-               goto err_pdev;
-       }
-
-       platform_set_drvdata(magician_snd_device, &snd_soc_card_magician);
-       ret = platform_device_add(magician_snd_device);
-       if (ret) {
-               platform_device_put(magician_snd_device);
-               goto err_pdev;
-       }
-
-       return 0;
-
-err_pdev:
-       gpio_free(EGPIO_MAGICIAN_IN_SEL1);
-err_request_in_sel1:
-       gpio_free(EGPIO_MAGICIAN_IN_SEL0);
-err_request_in_sel0:
-       gpio_free(EGPIO_MAGICIAN_MIC_POWER);
-err_request_mic:
-       gpio_free(EGPIO_MAGICIAN_EP_POWER);
-err_request_ep:
-       gpio_free(EGPIO_MAGICIAN_SPK_POWER);
-err_request_spk:
-       return ret;
-}
-
-static void __exit magician_exit(void)
+static int magician_audio_probe(struct platform_device *pdev)
 {
-       platform_device_unregister(magician_snd_device);
-
-       gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0);
-       gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0);
-       gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0);
-
-       gpio_free(EGPIO_MAGICIAN_IN_SEL1);
-       gpio_free(EGPIO_MAGICIAN_IN_SEL0);
-       gpio_free(EGPIO_MAGICIAN_MIC_POWER);
-       gpio_free(EGPIO_MAGICIAN_EP_POWER);
-       gpio_free(EGPIO_MAGICIAN_SPK_POWER);
+       struct device *dev = &pdev->dev;
+
+       gpiod_spk_power = devm_gpiod_get(dev, "SPK_POWER", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_spk_power))
+               return PTR_ERR(gpiod_spk_power);
+       gpiod_ep_power = devm_gpiod_get(dev, "EP_POWER", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_ep_power))
+               return PTR_ERR(gpiod_ep_power);
+       gpiod_mic_power = devm_gpiod_get(dev, "MIC_POWER", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_mic_power))
+               return PTR_ERR(gpiod_mic_power);
+       gpiod_in_sel0 = devm_gpiod_get(dev, "IN_SEL0", GPIOD_OUT_HIGH);
+       if (IS_ERR(gpiod_in_sel0))
+               return PTR_ERR(gpiod_in_sel0);
+       gpiod_in_sel1 = devm_gpiod_get(dev, "IN_SEL1", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_in_sel1))
+               return PTR_ERR(gpiod_in_sel1);
+
+       snd_soc_card_magician.dev = &pdev->dev;
+       return devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_magician);
 }
 
-module_init(magician_init);
-module_exit(magician_exit);
+static struct platform_driver magician_audio_driver = {
+       .driver.name = "magician-audio",
+       .driver.pm = &snd_soc_pm_ops,
+       .probe = magician_audio_probe,
+};
+module_platform_driver(magician_audio_driver);
 
 MODULE_AUTHOR("Philipp Zabel");
 MODULE_DESCRIPTION("ALSA SoC Magician");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:magician-audio");
index 763db7b..0fa3763 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/mach-types.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
index 65257f7..a2321c0 100644 (file)
@@ -20,7 +20,7 @@
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 #include <linux/platform_data/asoc-palm27x.h>
 
 static struct snd_soc_jack hs_jack;
index 323ba3e..5fdaa47 100644 (file)
@@ -21,8 +21,8 @@
 
 #include <asm/mach-types.h>
 #include <asm/hardware/locomo.h>
-#include <mach/poodle.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
+#include <linux/platform_data/asoc-poodle.h>
 
 #include "../codecs/wm8731.h"
 #include "pxa2xx-i2s.h"
 static int poodle_jack_func;
 static int poodle_spk_func;
 
+static struct poodle_audio_platform_data *poodle_pdata;
+
 static void poodle_ext_control(struct snd_soc_dapm_context *dapm)
 {
        /* set up jack connection */
        if (poodle_jack_func == POODLE_HP) {
                /* set = unmute headphone */
-               locomo_gpio_write(&poodle_locomo_device.dev,
-                       POODLE_LOCOMO_GPIO_MUTE_L, 1);
-               locomo_gpio_write(&poodle_locomo_device.dev,
-                       POODLE_LOCOMO_GPIO_MUTE_R, 1);
+               locomo_gpio_write(poodle_pdata->locomo_dev,
+                       poodle_pdata->gpio_mute_l, 1);
+               locomo_gpio_write(poodle_pdata->locomo_dev,
+                       poodle_pdata->gpio_mute_r, 1);
                snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
        } else {
-               locomo_gpio_write(&poodle_locomo_device.dev,
-                       POODLE_LOCOMO_GPIO_MUTE_L, 0);
-               locomo_gpio_write(&poodle_locomo_device.dev,
-                       POODLE_LOCOMO_GPIO_MUTE_R, 0);
+               locomo_gpio_write(poodle_pdata->locomo_dev,
+                       poodle_pdata->gpio_mute_l, 0);
+               locomo_gpio_write(poodle_pdata->locomo_dev,
+                       poodle_pdata->gpio_mute_r, 0);
                snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
        }
 
@@ -80,10 +82,10 @@ static int poodle_startup(struct snd_pcm_substream *substream)
 static void poodle_shutdown(struct snd_pcm_substream *substream)
 {
        /* set = unmute headphone */
-       locomo_gpio_write(&poodle_locomo_device.dev,
-               POODLE_LOCOMO_GPIO_MUTE_L, 1);
-       locomo_gpio_write(&poodle_locomo_device.dev,
-               POODLE_LOCOMO_GPIO_MUTE_R, 1);
+       locomo_gpio_write(poodle_pdata->locomo_dev,
+               poodle_pdata->gpio_mute_l, 1);
+       locomo_gpio_write(poodle_pdata->locomo_dev,
+               poodle_pdata->gpio_mute_r, 1);
 }
 
 static int poodle_hw_params(struct snd_pcm_substream *substream,
@@ -174,11 +176,11 @@ static int poodle_amp_event(struct snd_soc_dapm_widget *w,
        struct snd_kcontrol *k, int event)
 {
        if (SND_SOC_DAPM_EVENT_ON(event))
-               locomo_gpio_write(&poodle_locomo_device.dev,
-                       POODLE_LOCOMO_GPIO_AMP_ON, 0);
+               locomo_gpio_write(poodle_pdata->locomo_dev,
+                       poodle_pdata->gpio_amp_on, 0);
        else
-               locomo_gpio_write(&poodle_locomo_device.dev,
-                       POODLE_LOCOMO_GPIO_AMP_ON, 1);
+               locomo_gpio_write(poodle_pdata->locomo_dev,
+                       poodle_pdata->gpio_amp_on, 1);
 
        return 0;
 }
@@ -254,13 +256,14 @@ static int poodle_probe(struct platform_device *pdev)
        struct snd_soc_card *card = &poodle;
        int ret;
 
-       locomo_gpio_set_dir(&poodle_locomo_device.dev,
-               POODLE_LOCOMO_GPIO_AMP_ON, 0);
+       poodle_pdata = pdev->dev.platform_data;
+       locomo_gpio_set_dir(poodle_pdata->locomo_dev,
+               poodle_pdata->gpio_amp_on, 0);
        /* should we mute HP at startup - burning power ?*/
-       locomo_gpio_set_dir(&poodle_locomo_device.dev,
-               POODLE_LOCOMO_GPIO_MUTE_L, 0);
-       locomo_gpio_set_dir(&poodle_locomo_device.dev,
-               POODLE_LOCOMO_GPIO_MUTE_R, 0);
+       locomo_gpio_set_dir(poodle_pdata->locomo_dev,
+               poodle_pdata->gpio_mute_l, 0);
+       locomo_gpio_set_dir(poodle_pdata->locomo_dev,
+               poodle_pdata->gpio_mute_r, 0);
 
        card->dev = &pdev->dev;
 
index 58f8541..809ea34 100644 (file)
 #include <sound/pxa2xx-lib.h>
 #include <sound/dmaengine_pcm.h>
 
-#include <mach/hardware.h>
-#include <mach/regs-ac97.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
+
+#define PCDR   0x0040  /* PCM FIFO Data Register */
+#define MODR   0x0140  /* Modem FIFO Data Register */
+#define MCDR   0x0060  /* Mic-in FIFO Data Register */
 
 static void pxa2xx_ac97_warm_reset(struct ac97_controller *adrv)
 {
@@ -59,35 +61,30 @@ static struct ac97_controller_ops pxa2xx_ac97_ops = {
 };
 
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
-       .addr           = __PREG(PCDR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
        .chan_name      = "pcm_pcm_stereo_in",
        .maxburst       = 32,
 };
 
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
-       .addr           = __PREG(PCDR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
        .chan_name      = "pcm_pcm_stereo_out",
        .maxburst       = 32,
 };
 
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_out = {
-       .addr           = __PREG(MODR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_2_BYTES,
        .chan_name      = "pcm_aux_mono_out",
        .maxburst       = 16,
 };
 
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_in = {
-       .addr           = __PREG(MODR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_2_BYTES,
        .chan_name      = "pcm_aux_mono_in",
        .maxburst       = 16,
 };
 
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_mic_mono_in = {
-       .addr           = __PREG(MCDR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_2_BYTES,
        .chan_name      = "pcm_aux_mic_mono",
        .maxburst       = 16,
@@ -226,6 +223,7 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
        int ret;
        struct ac97_controller *ctrl;
        pxa2xx_audio_ops_t *pdata = pdev->dev.platform_data;
+       struct resource *regs;
        void **codecs_pdata;
 
        if (pdev->id != -1) {
@@ -233,6 +231,16 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs)
+               return -ENXIO;
+
+       pxa2xx_ac97_pcm_stereo_in.addr = regs->start + PCDR;
+       pxa2xx_ac97_pcm_stereo_out.addr = regs->start + PCDR;
+       pxa2xx_ac97_pcm_aux_mono_out.addr = regs->start + MODR;
+       pxa2xx_ac97_pcm_aux_mono_in.addr = regs->start + MODR;
+       pxa2xx_ac97_pcm_mic_mono_in.addr = regs->start + MCDR;
+
        ret = pxa2xx_ac97_hw_probe(pdev);
        if (ret) {
                dev_err(&pdev->dev, "PXA2xx AC97 hw probe error (%d)\n", ret);
index 5bfc1a9..746e6ec 100644 (file)
 #include <sound/pxa2xx-lib.h>
 #include <sound/dmaengine_pcm.h>
 
-#include <mach/hardware.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #include "pxa2xx-i2s.h"
 
 /*
  * I2S Controller Register and Bit Definitions
  */
-#define SACR0          __REG(0x40400000)  /* Global Control Register */
-#define SACR1          __REG(0x40400004)  /* Serial Audio I 2 S/MSB-Justified Control Register */
-#define SASR0          __REG(0x4040000C)  /* Serial Audio I 2 S/MSB-Justified Interface and FIFO Status Register */
-#define SAIMR          __REG(0x40400014)  /* Serial Audio Interrupt Mask Register */
-#define SAICR          __REG(0x40400018)  /* Serial Audio Interrupt Clear Register */
-#define SADIV          __REG(0x40400060)  /* Audio Clock Divider Register. */
-#define SADR           __REG(0x40400080)  /* Serial Audio Data Register (TX and RX FIFO access Register). */
+#define SACR0          (0x0000)        /* Global Control Register */
+#define SACR1          (0x0004)        /* Serial Audio I 2 S/MSB-Justified Control Register */
+#define SASR0          (0x000C)        /* Serial Audio I 2 S/MSB-Justified Interface and FIFO Status Register */
+#define SAIMR          (0x0014)        /* Serial Audio Interrupt Mask Register */
+#define SAICR          (0x0018)        /* Serial Audio Interrupt Clear Register */
+#define SADIV          (0x0060)        /* Audio Clock Divider Register. */
+#define SADR           (0x0080)        /* Serial Audio Data Register (TX and RX FIFO access Register). */
 
 #define SACR0_RFTH(x)  ((x) << 12)     /* Rx FIFO Interrupt or DMA Trigger Threshold */
 #define SACR0_TFTH(x)  ((x) << 8)      /* Tx FIFO Interrupt or DMA Trigger Threshold */
@@ -77,16 +76,15 @@ struct pxa_i2s_port {
 static struct pxa_i2s_port pxa_i2s;
 static struct clk *clk_i2s;
 static int clk_ena = 0;
+static void __iomem *i2s_reg_base;
 
 static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_out = {
-       .addr           = __PREG(SADR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
        .chan_name      = "tx",
        .maxburst       = 32,
 };
 
 static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_in = {
-       .addr           = __PREG(SADR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
        .chan_name      = "rx",
        .maxburst       = 32,
@@ -102,7 +100,7 @@ static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
                return PTR_ERR(clk_i2s);
 
        if (!snd_soc_dai_active(cpu_dai))
-               SACR0 = 0;
+               writel(0, i2s_reg_base + SACR0);
 
        return 0;
 }
@@ -114,7 +112,7 @@ static int pxa_i2s_wait(void)
 
        /* flush the Rx FIFO */
        for (i = 0; i < 16; i++)
-               SADR;
+               readl(i2s_reg_base + SADR);
        return 0;
 }
 
@@ -174,39 +172,39 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream,
 
        /* is port used by another stream */
        if (!(SACR0 & SACR0_ENB)) {
-               SACR0 = 0;
+               writel(0, i2s_reg_base + SACR0);
                if (pxa_i2s.master)
-                       SACR0 |= SACR0_BCKD;
+                       writel(readl(i2s_reg_base + SACR0) | (SACR0_BCKD), i2s_reg_base + SACR0);
 
-               SACR0 |= SACR0_RFTH(14) | SACR0_TFTH(1);
-               SACR1 |= pxa_i2s.fmt;
+               writel(readl(i2s_reg_base + SACR0) | (SACR0_RFTH(14) | SACR0_TFTH(1)), i2s_reg_base + SACR0);
+               writel(readl(i2s_reg_base + SACR1) | (pxa_i2s.fmt), i2s_reg_base + SACR1);
        }
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               SAIMR |= SAIMR_TFS;
+               writel(readl(i2s_reg_base + SAIMR) | (SAIMR_TFS), i2s_reg_base + SAIMR);
        else
-               SAIMR |= SAIMR_RFS;
+               writel(readl(i2s_reg_base + SAIMR) | (SAIMR_RFS), i2s_reg_base + SAIMR);
 
        switch (params_rate(params)) {
        case 8000:
-               SADIV = 0x48;
+               writel(0x48, i2s_reg_base + SADIV);
                break;
        case 11025:
-               SADIV = 0x34;
+               writel(0x34, i2s_reg_base + SADIV);
                break;
        case 16000:
-               SADIV = 0x24;
+               writel(0x24, i2s_reg_base + SADIV);
                break;
        case 22050:
-               SADIV = 0x1a;
+               writel(0x1a, i2s_reg_base + SADIV);
                break;
        case 44100:
-               SADIV = 0xd;
+               writel(0xd, i2s_reg_base + SADIV);
                break;
        case 48000:
-               SADIV = 0xc;
+               writel(0xc, i2s_reg_base + SADIV);
                break;
        case 96000: /* not in manual and possibly slightly inaccurate */
-               SADIV = 0x6;
+               writel(0x6, i2s_reg_base + SADIV);
                break;
        }
 
@@ -221,10 +219,10 @@ static int pxa2xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       SACR1 &= ~SACR1_DRPL;
+                       writel(readl(i2s_reg_base + SACR1) & (~SACR1_DRPL), i2s_reg_base + SACR1);
                else
-                       SACR1 &= ~SACR1_DREC;
-               SACR0 |= SACR0_ENB;
+                       writel(readl(i2s_reg_base + SACR1) & (~SACR1_DREC), i2s_reg_base + SACR1);
+               writel(readl(i2s_reg_base + SACR0) | (SACR0_ENB), i2s_reg_base + SACR0);
                break;
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -243,15 +241,15 @@ static void pxa2xx_i2s_shutdown(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               SACR1 |= SACR1_DRPL;
-               SAIMR &= ~SAIMR_TFS;
+               writel(readl(i2s_reg_base + SACR1) | (SACR1_DRPL), i2s_reg_base + SACR1);
+               writel(readl(i2s_reg_base + SAIMR) & (~SAIMR_TFS), i2s_reg_base + SAIMR);
        } else {
-               SACR1 |= SACR1_DREC;
-               SAIMR &= ~SAIMR_RFS;
+               writel(readl(i2s_reg_base + SACR1) | (SACR1_DREC), i2s_reg_base + SACR1);
+               writel(readl(i2s_reg_base + SAIMR) & (~SAIMR_RFS), i2s_reg_base + SAIMR);
        }
 
-       if ((SACR1 & (SACR1_DREC | SACR1_DRPL)) == (SACR1_DREC | SACR1_DRPL)) {
-               SACR0 &= ~SACR0_ENB;
+       if ((readl(i2s_reg_base + SACR1) & (SACR1_DREC | SACR1_DRPL)) == (SACR1_DREC | SACR1_DRPL)) {
+               writel(readl(i2s_reg_base + SACR0) & (~SACR0_ENB), i2s_reg_base + SACR0);
                pxa_i2s_wait();
                if (clk_ena) {
                        clk_disable_unprepare(clk_i2s);
@@ -264,13 +262,13 @@ static void pxa2xx_i2s_shutdown(struct snd_pcm_substream *substream,
 static int pxa2xx_soc_pcm_suspend(struct snd_soc_component *component)
 {
        /* store registers */
-       pxa_i2s.sacr0 = SACR0;
-       pxa_i2s.sacr1 = SACR1;
-       pxa_i2s.saimr = SAIMR;
-       pxa_i2s.sadiv = SADIV;
+       pxa_i2s.sacr0 = readl(i2s_reg_base + SACR0);
+       pxa_i2s.sacr1 = readl(i2s_reg_base + SACR1);
+       pxa_i2s.saimr = readl(i2s_reg_base + SAIMR);
+       pxa_i2s.sadiv = readl(i2s_reg_base + SADIV);
 
        /* deactivate link */
-       SACR0 &= ~SACR0_ENB;
+       writel(readl(i2s_reg_base + SACR0) & (~SACR0_ENB), i2s_reg_base + SACR0);
        pxa_i2s_wait();
        return 0;
 }
@@ -279,12 +277,12 @@ static int pxa2xx_soc_pcm_resume(struct snd_soc_component *component)
 {
        pxa_i2s_wait();
 
-       SACR0 = pxa_i2s.sacr0 & ~SACR0_ENB;
-       SACR1 = pxa_i2s.sacr1;
-       SAIMR = pxa_i2s.saimr;
-       SADIV = pxa_i2s.sadiv;
+       writel(pxa_i2s.sacr0 & ~SACR0_ENB, i2s_reg_base + SACR0);
+       writel(pxa_i2s.sacr1, i2s_reg_base + SACR1);
+       writel(pxa_i2s.saimr, i2s_reg_base + SAIMR);
+       writel(pxa_i2s.sadiv, i2s_reg_base + SADIV);
 
-       SACR0 = pxa_i2s.sacr0;
+       writel(pxa_i2s.sacr0, i2s_reg_base + SACR0);
 
        return 0;
 }
@@ -306,12 +304,12 @@ static int pxa2xx_i2s_probe(struct snd_soc_dai *dai)
         * the SACR0[RST] bit must also be set and cleared to reset all
         * I2S controller registers.
         */
-       SACR0 = SACR0_RST;
-       SACR0 = 0;
+       writel(SACR0_RST, i2s_reg_base + SACR0);
+       writel(0, i2s_reg_base + SACR0);
        /* Make sure RPL and REC are disabled */
-       SACR1 = SACR1_DRPL | SACR1_DREC;
+       writel(SACR1_DRPL | SACR1_DREC, i2s_reg_base + SACR1);
        /* Along with FIFO servicing */
-       SAIMR &= ~(SAIMR_RFS | SAIMR_TFS);
+       writel(readl(i2s_reg_base + SAIMR) & (~(SAIMR_RFS | SAIMR_TFS)), i2s_reg_base + SAIMR);
 
        snd_soc_dai_init_dma_data(dai, &pxa2xx_i2s_pcm_stereo_out,
                &pxa2xx_i2s_pcm_stereo_in);
@@ -371,6 +369,22 @@ static const struct snd_soc_component_driver pxa_i2s_component = {
 
 static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
 {
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       if (!res) {
+               dev_err(&pdev->dev, "missing MMIO resource\n");
+               return -ENXIO;
+       }
+
+       i2s_reg_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(i2s_reg_base)) {
+               dev_err(&pdev->dev, "ioremap failed\n");
+               return PTR_ERR(i2s_reg_base);
+       }
+
+       pxa2xx_i2s_pcm_stereo_out.addr = res->start + SADR;
+       pxa2xx_i2s_pcm_stereo_in.addr = res->start + SADR;
+
        return devm_snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
                                               &pxa_i2s_dai, 1);
 }
index 7c1384a..44303b6 100644 (file)
 #include <linux/timer.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
 
 #include <asm/mach-types.h>
-#include <mach/spitz.h>
 #include "../codecs/wm8750.h"
 #include "pxa2xx-i2s.h"
 
@@ -37,7 +36,7 @@
 
 static int spitz_jack_func;
 static int spitz_spk_func;
-static int spitz_mic_gpio;
+static struct gpio_desc *gpiod_mic, *gpiod_mute_l, *gpiod_mute_r;
 
 static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
 {
@@ -56,8 +55,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
                snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
                snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack");
-               gpio_set_value(SPITZ_GPIO_MUTE_L, 1);
-               gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
+               gpiod_set_value(gpiod_mute_l, 1);
+               gpiod_set_value(gpiod_mute_r, 1);
                break;
        case SPITZ_MIC:
                /* enable mic jack and bias, mute hp */
@@ -65,8 +64,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
                snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
-               gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
-               gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
+               gpiod_set_value(gpiod_mute_l, 0);
+               gpiod_set_value(gpiod_mute_r, 0);
                break;
        case SPITZ_LINE:
                /* enable line jack, disable mic bias and mute hp */
@@ -74,8 +73,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_enable_pin_unlocked(dapm, "Line Jack");
-               gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
-               gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
+               gpiod_set_value(gpiod_mute_l, 0);
+               gpiod_set_value(gpiod_mute_r, 0);
                break;
        case SPITZ_HEADSET:
                /* enable and unmute headset jack enable mic bias, mute L hp */
@@ -83,8 +82,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
                snd_soc_dapm_enable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
                snd_soc_dapm_enable_pin_unlocked(dapm, "Headset Jack");
-               gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
-               gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
+               gpiod_set_value(gpiod_mute_l, 0);
+               gpiod_set_value(gpiod_mute_r, 1);
                break;
        case SPITZ_HP_OFF:
 
@@ -93,8 +92,8 @@ static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
                snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Jack");
                snd_soc_dapm_disable_pin_unlocked(dapm, "Line Jack");
-               gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
-               gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
+               gpiod_set_value(gpiod_mute_l, 0);
+               gpiod_set_value(gpiod_mute_r, 0);
                break;
        }
 
@@ -199,7 +198,7 @@ static int spitz_set_spk(struct snd_kcontrol *kcontrol,
 static int spitz_mic_bias(struct snd_soc_dapm_widget *w,
        struct snd_kcontrol *k, int event)
 {
-       gpio_set_value_cansleep(spitz_mic_gpio, SND_SOC_DAPM_EVENT_ON(event));
+       gpiod_set_value_cansleep(gpiod_mic, SND_SOC_DAPM_EVENT_ON(event));
        return 0;
 }
 
@@ -287,39 +286,28 @@ static int spitz_probe(struct platform_device *pdev)
        struct snd_soc_card *card = &snd_soc_spitz;
        int ret;
 
-       if (machine_is_akita())
-               spitz_mic_gpio = AKITA_GPIO_MIC_BIAS;
-       else
-               spitz_mic_gpio = SPITZ_GPIO_MIC_BIAS;
-
-       ret = gpio_request(spitz_mic_gpio, "MIC GPIO");
-       if (ret)
-               goto err1;
-
-       ret = gpio_direction_output(spitz_mic_gpio, 0);
-       if (ret)
-               goto err2;
+       gpiod_mic = devm_gpiod_get(&pdev->dev, "mic", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_mic))
+               return PTR_ERR(gpiod_mic);
+       gpiod_mute_l = devm_gpiod_get(&pdev->dev, "mute-l", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_mute_l))
+               return PTR_ERR(gpiod_mute_l);
+       gpiod_mute_r = devm_gpiod_get(&pdev->dev, "mute-r", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod_mute_r))
+               return PTR_ERR(gpiod_mute_r);
 
        card->dev = &pdev->dev;
 
        ret = devm_snd_soc_register_card(&pdev->dev, card);
-       if (ret) {
+       if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
                        ret);
-               goto err2;
-       }
-
-       return 0;
 
-err2:
-       gpio_free(spitz_mic_gpio);
-err1:
        return ret;
 }
 
 static int spitz_remove(struct platform_device *pdev)
 {
-       gpio_free(spitz_mic_gpio);
        return 0;
 }
 
index 3b40b5f..30f83ca 100644 (file)
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
 
 #include <asm/mach-types.h>
-#include <mach/tosa.h>
-#include <mach/audio.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #define TOSA_HP        0
 #define TOSA_MIC_INT   1
@@ -33,6 +32,7 @@
 #define TOSA_SPK_ON    0
 #define TOSA_SPK_OFF   1
 
+static struct gpio_desc *tosa_mute;
 static int tosa_jack_func;
 static int tosa_spk_func;
 
@@ -128,7 +128,7 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol,
 static int tosa_hp_event(struct snd_soc_dapm_widget *w,
        struct snd_kcontrol *k, int event)
 {
-       gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 : 0);
+       gpiod_set_value(tosa_mute, SND_SOC_DAPM_EVENT_ON(event) ? 1 : 0);
        return 0;
 }
 
@@ -222,10 +222,11 @@ static int tosa_probe(struct platform_device *pdev)
        struct snd_soc_card *card = &tosa;
        int ret;
 
-       ret = gpio_request_one(TOSA_GPIO_L_MUTE, GPIOF_OUT_INIT_LOW,
-                              "Headphone Jack");
-       if (ret)
-               return ret;
+       tosa_mute = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_LOW);
+       if (IS_ERR(tosa_mute))
+               return dev_err_probe(&pdev->dev, PTR_ERR(tosa_mute),
+                                    "failed to get L_MUTE GPIO\n");
+       gpiod_set_consumer_name(tosa_mute, "Headphone Jack");
 
        card->dev = &pdev->dev;
 
@@ -233,24 +234,16 @@ static int tosa_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
                        ret);
-               gpio_free(TOSA_GPIO_L_MUTE);
        }
        return ret;
 }
 
-static int tosa_remove(struct platform_device *pdev)
-{
-       gpio_free(TOSA_GPIO_L_MUTE);
-       return 0;
-}
-
 static struct platform_driver tosa_driver = {
        .driver         = {
                .name   = "tosa-audio",
                .pm     = &snd_soc_pm_ops,
        },
        .probe          = tosa_probe,
-       .remove         = tosa_remove,
 };
 
 module_platform_driver(tosa_driver);
index f4a7cfe..020dcce 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/timer.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -21,9 +21,7 @@
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <mach/audio.h>
-#include <mach/z2.h>
+#include <linux/platform_data/asoc-pxa.h>
 
 #include "../codecs/wm8750.h"
 #include "pxa2xx-i2s.h"
@@ -90,7 +88,6 @@ static struct snd_soc_jack_pin hs_jack_pins[] = {
 /* Headset jack detection gpios */
 static struct snd_soc_jack_gpio hs_jack_gpios[] = {
        {
-               .gpio           = GPIO37_ZIPITZ2_HEADSET_DETECT,
                .name           = "hsdet-gpio",
                .report         = SND_JACK_HEADSET,
                .debounce_time  = 200,
@@ -197,6 +194,7 @@ static int __init z2_init(void)
        if (!z2_snd_device)
                return -ENOMEM;
 
+       hs_jack_gpios[0].gpiod_dev = &z2_snd_device->dev;
        platform_set_drvdata(z2_snd_device, &snd_soc_z2);
        ret = platform_device_add(z2_snd_device);
 
index 6f43db3..a827cc3 100644 (file)
@@ -2128,8 +2128,6 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
                        else
                                ret = soc_pcm_trigger(be_substream,
                                                      SNDRV_PCM_TRIGGER_START);
-
-                       ret = soc_pcm_trigger(be_substream, cmd);
                        if (ret) {
                                be->dpcm[stream].be_start--;
                                goto next;
index 3c435d3..33db334 100644 (file)
@@ -573,10 +573,14 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip,
        }
 
        /* FIXME - TEAC devices require the immediate interface setup */
-       if (rate != prev_rate && USB_ID_VENDOR(chip->usb_id) == 0x0644) {
-               usb_set_interface(chip->dev, fmt->iface, fmt->altsetting);
-               if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
-                       msleep(50);
+       if (USB_ID_VENDOR(chip->usb_id) == 0x0644) {
+               bool cur_base_48k = (rate % 48000 == 0);
+               bool prev_base_48k = (prev_rate % 48000 == 0);
+               if (cur_base_48k != prev_base_48k) {
+                       usb_set_interface(chip->dev, fmt->iface, fmt->altsetting);
+                       if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
+                               msleep(50);
+               }
        }
 
 validation:
index fdbdfb7..6a4af72 100644 (file)
@@ -552,10 +552,10 @@ int line6_init_pcm(struct usb_line6 *line6,
 
        line6pcm->max_packet_size_in =
                usb_maxpacket(line6->usbdev,
-                       usb_rcvisocpipe(line6->usbdev, ep_read), 0);
+                       usb_rcvisocpipe(line6->usbdev, ep_read));
        line6pcm->max_packet_size_out =
                usb_maxpacket(line6->usbdev,
-                       usb_sndisocpipe(line6->usbdev, ep_write), 1);
+                       usb_sndisocpipe(line6->usbdev, ep_write));
        if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) {
                dev_err(line6pcm->line6->ifcdev,
                        "cannot get proper max packet size\n");
index 7c6ca2b..bbff092 100644 (file)
@@ -1145,6 +1145,9 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
 
 static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
 {
+       struct usbmidi_out_port *port = substream->runtime->private_data;
+
+       cancel_work_sync(&port->ep->work);
        return substream_open(substream, 0, 0);
 }
 
@@ -1286,7 +1289,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
                pipe = usb_rcvintpipe(umidi->dev, ep_info->in_ep);
        else
                pipe = usb_rcvbulkpipe(umidi->dev, ep_info->in_ep);
-       length = usb_maxpacket(umidi->dev, pipe, 0);
+       length = usb_maxpacket(umidi->dev, pipe);
        for (i = 0; i < INPUT_URBS; ++i) {
                buffer = usb_alloc_coherent(umidi->dev, length, GFP_KERNEL,
                                            &ep->urbs[i]->transfer_dma);
@@ -1375,7 +1378,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
                pipe = usb_sndbulkpipe(umidi->dev, ep_info->out_ep);
        switch (umidi->usb_id) {
        default:
-               ep->max_transfer = usb_maxpacket(umidi->dev, pipe, 1);
+               ep->max_transfer = usb_maxpacket(umidi->dev, pipe);
                break;
                /*
                 * Various chips declare a packet size larger than 4 bytes, but
index 7ef7a8a..3c79567 100644 (file)
@@ -439,6 +439,31 @@ static const struct usbmix_name_map msi_mpg_x570s_carbon_max_wifi_alc4080_map[]
        {}
 };
 
+/* Gigabyte B450/550 Mobo */
+static const struct usbmix_name_map gigabyte_b450_map[] = {
+       { 24, NULL },                   /* OT, IEC958?, disabled */
+       { 21, "Speaker" },              /* OT */
+       { 29, "Speaker Playback" },     /* FU */
+       { 22, "Headphone" },            /* OT */
+       { 30, "Headphone Playback" },   /* FU */
+       { 11, "Line" },                 /* IT */
+       { 27, "Line Capture" },         /* FU */
+       { 12, "Mic" },                  /* IT */
+       { 28, "Mic Capture" },          /* FU */
+       { 9, "Front Mic" },             /* IT */
+       { 25, "Front Mic Capture" },    /* FU */
+       {}
+};
+
+static const struct usbmix_connector_map gigabyte_b450_connector_map[] = {
+       { 13, 21 },     /* Speaker */
+       { 14, 22 },     /* Headphone */
+       { 19, 11 },     /* Line */
+       { 20, 12 },     /* Mic */
+       { 17, 9 },      /* Front Mic */
+       {}
+};
+
 /*
  * Control map entries
  */
@@ -581,6 +606,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = trx40_mobo_map,
                .connector_map = trx40_mobo_connector_map,
        },
+       {       /* Gigabyte B450/550 Mobo */
+               .id = USB_ID(0x0414, 0xa00d),
+               .map = gigabyte_b450_map,
+               .connector_map = gigabyte_b450_connector_map,
+       },
        {       /* ASUS ROG Zenith II */
                .id = USB_ID(0x0b05, 0x1916),
                .map = asus_rog_map,
index 9d0e447..a4d32e8 100644 (file)
@@ -51,7 +51,7 @@ static int init_pipe_urbs(struct usb_stream_kernel *sk,
 {
        int u, p;
        int maxpacket = use_packsize ?
-               use_packsize : usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+               use_packsize : usb_maxpacket(dev, pipe);
        int transfer_length = maxpacket * sk->n_o_ps;
 
        for (u = 0; u < USB_STREAM_NURBS;
@@ -171,7 +171,7 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
        out_pipe = usb_sndisocpipe(dev, out_endpoint);
 
        max_packsize = use_packsize ?
-               use_packsize : usb_maxpacket(dev, in_pipe, 0);
+               use_packsize : usb_maxpacket(dev, in_pipe);
 
        /*
                t_period = period_frames / sample_rate
@@ -187,7 +187,7 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
        read_size += packets * USB_STREAM_URBDEPTH *
                (max_packsize + sizeof(struct usb_stream_packet));
 
-       max_packsize = usb_maxpacket(dev, out_pipe, 1);
+       max_packsize = usb_maxpacket(dev, out_pipe);
        write_size = max_packsize * packets * USB_STREAM_URBDEPTH;
 
        if (read_size >= 256*PAGE_SIZE || write_size >= 256*PAGE_SIZE) {
index cfc1ea5..9cd5e3a 100644 (file)
@@ -421,7 +421,7 @@ static int usx2y_urbs_allocate(struct snd_usx2y_substream *subs)
 
        pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
                        usb_rcvisocpipe(dev, subs->endpoint);
-       subs->maxpacksize = usb_maxpacket(dev, pipe, is_playback);
+       subs->maxpacksize = usb_maxpacket(dev, pipe);
        if (!subs->maxpacksize)
                return -EINVAL;
 
index db83522..240349b 100644 (file)
@@ -321,7 +321,7 @@ static int usx2y_usbpcm_urbs_allocate(struct snd_usx2y_substream *subs)
 
        pipe = is_playback ? usb_sndisocpipe(dev, subs->endpoint) :
                        usb_rcvisocpipe(dev, subs->endpoint);
-       subs->maxpacksize = usb_maxpacket(dev, pipe, is_playback);
+       subs->maxpacksize = usb_maxpacket(dev, pipe);
        if (!subs->maxpacksize)
                return -EINVAL;
 
index 11e8673..e83e6e4 100644 (file)
@@ -207,6 +207,8 @@ static void print_delayacct(struct taskstats *t)
               "THRASHING%12s%15s%15s\n"
               "      %15llu%15llu%15llums\n"
               "COMPACT  %12s%15s%15s\n"
+              "      %15llu%15llu%15llums\n"
+              "WPCOPY   %12s%15s%15s\n"
               "      %15llu%15llu%15llums\n",
               "count", "real total", "virtual total",
               "delay total", "delay average",
@@ -234,7 +236,11 @@ static void print_delayacct(struct taskstats *t)
               "count", "delay total", "delay average",
               (unsigned long long)t->compact_count,
               (unsigned long long)t->compact_delay_total,
-              average_ms(t->compact_delay_total, t->compact_count));
+              average_ms(t->compact_delay_total, t->compact_count),
+              "count", "delay total", "delay average",
+              (unsigned long long)t->wpcopy_count,
+              (unsigned long long)t->wpcopy_delay_total,
+              average_ms(t->wpcopy_delay_total, t->wpcopy_count));
 }
 
 static void task_context_switch_counts(struct taskstats *t)
index a2b233f..6c12295 100644 (file)
@@ -149,6 +149,7 @@ void print_usage(void)
                "  -r         Listen for rising edges\n"
                "  -f         Listen for falling edges\n"
                "  -w         Report the wall-clock time for events\n"
+               "  -t         Report the hardware timestamp for events\n"
                "  -b <n>     Debounce the line with period n microseconds\n"
                " [-c <n>]    Do <n> loops (optional, infinite loop if not stated)\n"
                "  -?         This helptext\n"
@@ -174,7 +175,7 @@ int main(int argc, char **argv)
 
        memset(&config, 0, sizeof(config));
        config.flags = GPIO_V2_LINE_FLAG_INPUT;
-       while ((c = getopt(argc, argv, "c:n:o:b:dsrfw?")) != -1) {
+       while ((c = getopt(argc, argv, "c:n:o:b:dsrfwt?")) != -1) {
                switch (c) {
                case 'c':
                        loops = strtoul(optarg, NULL, 10);
@@ -208,6 +209,9 @@ int main(int argc, char **argv)
                case 'w':
                        config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
                        break;
+               case 't':
+                       config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
+                       break;
                case '?':
                        print_usage();
                        return -1;
index ea97804..afdf93b 100644 (file)
@@ -16,11 +16,11 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
                 const unsigned long *bitmap2, int bits);
 int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
                 const unsigned long *bitmap2, unsigned int bits);
-int __bitmap_equal(const unsigned long *bitmap1,
-                  const unsigned long *bitmap2, unsigned int bits);
+bool __bitmap_equal(const unsigned long *bitmap1,
+                   const unsigned long *bitmap2, unsigned int bits);
 void bitmap_clear(unsigned long *map, unsigned int start, int len);
-int __bitmap_intersects(const unsigned long *bitmap1,
-                       const unsigned long *bitmap2, unsigned int bits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+                        const unsigned long *bitmap2, unsigned int bits);
 
 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
 #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -162,8 +162,8 @@ static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
 #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
 #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
 
-static inline int bitmap_equal(const unsigned long *src1,
-                       const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+                               const unsigned long *src2, unsigned int nbits)
 {
        if (small_const_nbits(nbits))
                return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -173,8 +173,9 @@ static inline int bitmap_equal(const unsigned long *src1,
        return __bitmap_equal(src1, src2, nbits);
 }
 
-static inline int bitmap_intersects(const unsigned long *src1,
-                       const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_intersects(const unsigned long *src1,
+                                    const unsigned long *src2,
+                                    unsigned int nbits)
 {
        if (small_const_nbits(nbits))
                return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
index ac19095..0197042 100644 (file)
 #define F_GETSIG       11      /* for sockets. */
 #endif
 
-#ifndef CONFIG_64BIT
 #ifndef F_GETLK64
 #define F_GETLK64      12      /*  using 'struct flock64' */
 #define F_SETLK64      13
 #define F_SETLKW64     14
 #endif
-#endif
 
 #ifndef F_SETOWN_EX
 #define F_SETOWN_EX    15
@@ -187,25 +185,19 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
-#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
 struct flock {
        short   l_type;
        short   l_whence;
        __kernel_off_t  l_start;
        __kernel_off_t  l_len;
        __kernel_pid_t  l_pid;
-       __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+       __ARCH_FLOCK_EXTRA_SYSID
 #endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+       __ARCH_FLOCK_PAD
 #endif
+};
 
 struct flock64 {
        short  l_type;
@@ -213,8 +205,9 @@ struct flock64 {
        __kernel_loff_t l_start;
        __kernel_loff_t l_len;
        __kernel_pid_t  l_pid;
+#ifdef __ARCH_FLOCK64_PAD
        __ARCH_FLOCK64_PAD
-};
 #endif
+};
 
 #endif /* _ASM_GENERIC_FCNTL_H */
index 1c48b0a..45fa180 100644 (file)
@@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
 
 /* kernel/ptrace.c */
 #define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
 
 /* kernel/sched/core.c */
 #define __NR_sched_setparam 118
@@ -779,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_kexec_file_load 294
 __SYSCALL(__NR_kexec_file_load,     sys_kexec_file_load)
 /* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
 #define __NR_clock_gettime64 403
 __SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
 #define __NR_clock_settime64 404
index edba4d9..da52065 100644 (file)
@@ -17,6 +17,8 @@
 #include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
 #elif defined(__alpha__)
 #include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
+#elif defined(__loongarch__)
+#include "../../../arch/loongarch/include/uapi/asm/bitsperlong.h"
 #else
 #include <asm-generic/bitsperlong.h>
 #endif
index db466ef..354f8cd 100644 (file)
@@ -72,31 +72,31 @@ int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
        return result != 0;
 }
 
-int __bitmap_equal(const unsigned long *bitmap1,
-               const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_equal(const unsigned long *bitmap1,
+                   const unsigned long *bitmap2, unsigned int bits)
 {
        unsigned int k, lim = bits/BITS_PER_LONG;
        for (k = 0; k < lim; ++k)
                if (bitmap1[k] != bitmap2[k])
-                       return 0;
+                       return false;
 
        if (bits % BITS_PER_LONG)
                if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
-                       return 0;
+                       return false;
 
-       return 1;
+       return true;
 }
 
-int __bitmap_intersects(const unsigned long *bitmap1,
-                       const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_intersects(const unsigned long *bitmap1,
+                        const unsigned long *bitmap2, unsigned int bits)
 {
        unsigned int k, lim = bits/BITS_PER_LONG;
        for (k = 0; k < lim; ++k)
                if (bitmap1[k] & bitmap2[k])
-                       return 1;
+                       return true;
 
        if (bits % BITS_PER_LONG)
                if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
-                       return 1;
-       return 0;
+                       return true;
+       return false;
 }
index 190b2f6..864bb9d 100644 (file)
@@ -185,7 +185,9 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
                "do_group_exit",
                "stop_this_cpu",
                "__invalid_creds",
-               "cpu_startup_entry",
+               "cpu_startup_entry",
+               "__ubsan_handle_builtin_unreachable",
+               "ex_handler_msr_mce",
        };
 
        if (!func)
index b4e9ef7..cf8ad50 100644 (file)
@@ -33,7 +33,7 @@ OPTIONS
         - a raw PMU event in the form of rN where N is a hexadecimal value
           that represents the raw register encoding with the layout of the
           event control registers as described by entries in
-          /sys/bus/event_sources/devices/cpu/format/*.
+          /sys/bus/event_source/devices/cpu/format/*.
 
         - a symbolic or raw PMU event followed by an optional colon
          and a list of event modifiers, e.g., cpu-cycles:p.  See the
index 8d1cde0..d8a33f4 100644 (file)
@@ -39,7 +39,7 @@ report::
        - a raw PMU event in the form of rN where N is a hexadecimal value
          that represents the raw register encoding with the layout of the
          event control registers as described by entries in
-         /sys/bus/event_sources/devices/cpu/format/*.
+         /sys/bus/event_source/devices/cpu/format/*.
 
         - a symbolic or raw PMU event followed by an optional colon
          and a list of event modifiers, e.g., cpu-cycles:p.  See the
index cac3dfb..c1fdba2 100644 (file)
@@ -41,7 +41,7 @@ Default is to monitor all CPUS.
        (use 'perf list' to list all events) or a raw PMU event in the form
        of rN where N is a hexadecimal value that represents the raw register
        encoding with the layout of the event control registers as described
-       by entries in /sys/bus/event_sources/devices/cpu/format/*.
+       by entries in /sys/bus/event_source/devices/cpu/format/*.
 
 -E <entries>::
 --entries=<entries>::
index be41721..df817d1 100644 (file)
@@ -5,9 +5,9 @@
 #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
 
 static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
-       E("spe-load",   "arm_spe_0/ts_enable=1,load_filter=1,store_filter=0,min_latency=%u/",   "arm_spe_0"),
-       E("spe-store",  "arm_spe_0/ts_enable=1,load_filter=0,store_filter=1/",                  "arm_spe_0"),
-       E("spe-ldst",   "arm_spe_0/ts_enable=1,load_filter=1,store_filter=1,min_latency=%u/",   "arm_spe_0"),
+       E("spe-load",   "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=0,min_latency=%u/",       "arm_spe_0"),
+       E("spe-store",  "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=0,store_filter=1/",                      "arm_spe_0"),
+       E("spe-ldst",   "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=1,min_latency=%u/",       "arm_spe_0"),
 };
 
 static char mem_ev_name[100];
index 8830618..3501399 100644 (file)
@@ -5,6 +5,7 @@
 #include "util/env.h"
 #include "util/pmu.h"
 #include "linux/string.h"
+#include "evsel.h"
 
 void arch_evsel__set_sample_weight(struct evsel *evsel)
 {
@@ -32,7 +33,7 @@ void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr)
 }
 
 /* Check whether the evsel's PMU supports the perf metrics */
-static bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
 {
        const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu";
 
@@ -57,6 +58,6 @@ bool arch_evsel__must_be_in_group(const struct evsel *evsel)
                return false;
 
        return evsel->name &&
-               (!strcasecmp(evsel->name, "slots") ||
+               (strcasestr(evsel->name, "slots") ||
                 strcasestr(evsel->name, "topdown"));
 }
diff --git a/tools/perf/arch/x86/util/evsel.h b/tools/perf/arch/x86/util/evsel.h
new file mode 100644 (file)
index 0000000..19ad169
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _EVSEL_H
+#define _EVSEL_H 1
+
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel);
+
+#endif
index f4d5422..f81a7cf 100644 (file)
@@ -4,6 +4,7 @@
 #include "util/pmu.h"
 #include "util/topdown.h"
 #include "topdown.h"
+#include "evsel.h"
 
 /* Check whether there is a PMU which supports the perf metrics. */
 bool topdown_sys_has_perf_metrics(void)
@@ -55,33 +56,19 @@ void arch_topdown_group_warn(void)
 
 #define TOPDOWN_SLOTS          0x0400
 
-static bool is_topdown_slots_event(struct evsel *counter)
-{
-       if (!counter->pmu_name)
-               return false;
-
-       if (strcmp(counter->pmu_name, "cpu"))
-               return false;
-
-       if (counter->core.attr.config == TOPDOWN_SLOTS)
-               return true;
-
-       return false;
-}
-
 /*
  * Check whether a topdown group supports sample-read.
  *
- * Only Topdown metic supports sample-read. The slots
+ * Only Topdown metric supports sample-read. The slots
  * event must be the leader of the topdown group.
  */
 
 bool arch_topdown_sample_read(struct evsel *leader)
 {
-       if (!pmu_have_event("cpu", "slots"))
+       if (!evsel__sys_has_perf_metrics(leader))
                return false;
 
-       if (is_topdown_slots_event(leader))
+       if (leader->core.attr.config == TOPDOWN_SLOTS)
                return true;
 
        return false;
index 80b525c..4898ee5 100644 (file)
@@ -928,8 +928,8 @@ percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
        double per_left;
        double per_right;
 
-       per_left  = PERCENT(left, lcl_hitm);
-       per_right = PERCENT(right, lcl_hitm);
+       per_left  = PERCENT(left, rmt_hitm);
+       per_right = PERCENT(right, rmt_hitm);
 
        return per_left - per_right;
 }
index b1200b7..23a33ac 100644 (file)
@@ -1083,7 +1083,7 @@ out_delete:
 static int __cmd_record(int argc, const char **argv)
 {
        const char *record_args[] = {
-               "record", "-R", "-m", "1024", "-c", "1", "--synth", "no",
+               "record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
        };
        unsigned int rec_argc, i, j, ret;
        const char **rec_argv;
index 783de7f..9bd20a5 100644 (file)
@@ -3,84 +3,84 @@
                "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
-               "BriefDescription": "CPU Cycles",
-               "PublicDescription": "Cycle Count"
+               "BriefDescription": "Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
-               "BriefDescription": "Instructions",
-               "PublicDescription": "Instruction Count"
+               "BriefDescription": "Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
-               "BriefDescription": "L1I Directory Writes",
-               "PublicDescription": "Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
-               "BriefDescription": "L1I Penalty Cycles",
-               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
-               "BriefDescription": "L1D Directory Writes",
-               "PublicDescription": "Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
-               "BriefDescription": "L1D Penalty Cycles",
-               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
-               "BriefDescription": "Problem-State CPU Cycles",
-               "PublicDescription": "Problem-State Cycle Count"
+               "BriefDescription": "Problem-State Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
-               "BriefDescription": "Problem-State Instructions",
-               "PublicDescription": "Problem-State Instruction Count"
+               "BriefDescription": "Problem-State Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
-               "BriefDescription": "Problem-State L1I Directory Writes",
-               "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1I Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
-               "BriefDescription": "Problem-State L1D Directory Writes",
-               "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1D Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
        }
 ]
index 3f28007..a8d391d 100644 (file)
                "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
-               "BriefDescription": "PRNG Functions",
-               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
-               "BriefDescription": "PRNG Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
-               "BriefDescription": "PRNG Blocked Functions",
-               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
-               "BriefDescription": "PRNG Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
-               "BriefDescription": "SHA Functions",
-               "PublicDescription": "Total number of SHA functions issued by the CPU"
+               "BriefDescription": "SHA Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
-               "BriefDescription": "SHA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+               "BriefDescription": "SHA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "SHA Blocked Functions",
-               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
-               "BriefDescription": "SHA Bloced Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
-               "BriefDescription": "DEA Functions",
-               "PublicDescription": "Total number of the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
-               "BriefDescription": "DEA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "DEA Blocked Functions",
-               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
-               "BriefDescription": "DEA Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
-               "BriefDescription": "AES Functions",
-               "PublicDescription": "Total number of AES functions issued by the CPU"
+               "BriefDescription": "AES Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
-               "BriefDescription": "AES Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+               "BriefDescription": "AES Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
-               "BriefDescription": "AES Blocked Functions",
-               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
-               "BriefDescription": "AES Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        }
 ]
index 86bd8ba..bf6a981 100644 (file)
                "EventCode": "128",
                "EventName": "L1I_L2_SOURCED_WRITES",
                "BriefDescription": "L1I L2 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "L1D_L2_SOURCED_WRITES",
                "BriefDescription": "L1D L2 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "L1I_L3_LOCAL_WRITES",
                "BriefDescription": "L1I L3 Local Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "L1D_L3_LOCAL_WRITES",
                "BriefDescription": "L1D L3 Local Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "L1I_L3_REMOTE_WRITES",
                "BriefDescription": "L1I L3 Remote Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L1D_L3_REMOTE_WRITES",
                "BriefDescription": "L1D L3 Remote Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "L1D_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1D Local Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "L1I_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1I Local Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "L1I_CACHELINE_INVALIDATES",
                "BriefDescription": "L1I Cacheline Invalidates",
-               "PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache"
+               "PublicDescription": "A cache line in the Level-1 Instruction Cache has been invalidated by a store on the same CPU as the Level-1 Instruction Cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
-               "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "142",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "ITLB1_MISSES",
                "BriefDescription": "ITLB1 Misses",
-               "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+               "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "DTLB1_MISSES",
                "BriefDescription": "DTLB1 Misses",
-               "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress"
+               "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L2C_STORES_SENT",
                "BriefDescription": "L2C Stores Sent",
-               "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache"
+               "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache."
        }
 ]
index 783de7f..9bd20a5 100644 (file)
@@ -3,84 +3,84 @@
                "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
-               "BriefDescription": "CPU Cycles",
-               "PublicDescription": "Cycle Count"
+               "BriefDescription": "Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
-               "BriefDescription": "Instructions",
-               "PublicDescription": "Instruction Count"
+               "BriefDescription": "Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
-               "BriefDescription": "L1I Directory Writes",
-               "PublicDescription": "Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
-               "BriefDescription": "L1I Penalty Cycles",
-               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
-               "BriefDescription": "L1D Directory Writes",
-               "PublicDescription": "Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
-               "BriefDescription": "L1D Penalty Cycles",
-               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
-               "BriefDescription": "Problem-State CPU Cycles",
-               "PublicDescription": "Problem-State Cycle Count"
+               "BriefDescription": "Problem-State Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
-               "BriefDescription": "Problem-State Instructions",
-               "PublicDescription": "Problem-State Instruction Count"
+               "BriefDescription": "Problem-State Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
-               "BriefDescription": "Problem-State L1I Directory Writes",
-               "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1I Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
-               "BriefDescription": "Problem-State L1D Directory Writes",
-               "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1D Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
        }
 ]
index 3f28007..a8d391d 100644 (file)
                "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
-               "BriefDescription": "PRNG Functions",
-               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
-               "BriefDescription": "PRNG Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
-               "BriefDescription": "PRNG Blocked Functions",
-               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
-               "BriefDescription": "PRNG Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
-               "BriefDescription": "SHA Functions",
-               "PublicDescription": "Total number of SHA functions issued by the CPU"
+               "BriefDescription": "SHA Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
-               "BriefDescription": "SHA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+               "BriefDescription": "SHA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "SHA Blocked Functions",
-               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
-               "BriefDescription": "SHA Bloced Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
-               "BriefDescription": "DEA Functions",
-               "PublicDescription": "Total number of the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
-               "BriefDescription": "DEA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "DEA Blocked Functions",
-               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
-               "BriefDescription": "DEA Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
-               "BriefDescription": "AES Functions",
-               "PublicDescription": "Total number of AES functions issued by the CPU"
+               "BriefDescription": "AES Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
-               "BriefDescription": "AES Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+               "BriefDescription": "AES Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
-               "BriefDescription": "AES Blocked Functions",
-               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
-               "BriefDescription": "AES Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        }
 ]
index 1a5e4f8..99c1b93 100644 (file)
@@ -11,7 +11,7 @@
                "EventCode": "129",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
        },
        {
                "Unit": "CPU-M-CF",
@@ -25,7 +25,7 @@
                "EventCode": "131",
                "EventName": "DTLB1_HPAGE_WRITES",
                "BriefDescription": "DTLB1 One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L1D_L2D_SOURCED_WRITES",
                "BriefDescription": "L1D L2D Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "ITLB1_MISSES",
                "BriefDescription": "ITLB1 Misses",
-               "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+               "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1I_L2I_SOURCED_WRITES",
                "BriefDescription": "L1I L2I Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "TX_C_TEND",
                "BriefDescription": "Completed TEND instructions in constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TX_NC_TEND",
                "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "L1D_ONNODE_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Node L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "L1D_ONNODE_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Node L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "149",
                "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "151",
                "EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "154",
                "EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "156",
                "EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "157",
                "EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "158",
                "EventName": "L1D_ONNODE_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D On-Node Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "159",
                "EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "160",
                "EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "161",
                "EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "162",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "163",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "164",
                "EventName": "L1I_ONNODE_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "165",
                "EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "166",
                "EventName": "L1I_ONNODE_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Node L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "167",
                "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "168",
                "EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "169",
                "EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "170",
                "EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "171",
                "EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "172",
                "EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "173",
                "EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "174",
                "EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "175",
                "EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "176",
                "EventName": "L1I_ONNODE_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I On-Node Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "177",
                "EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "178",
                "EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "179",
                "EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "218",
                "EventName": "TX_NC_TABORT",
                "BriefDescription": "Aborted transactions in non-constrained TX mode",
-               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "219",
                "EventName": "TX_C_TABORT_NO_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "220",
                "EventName": "TX_C_TABORT_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
        },
        {
                "Unit": "CPU-M-CF",
index fc762e9..1023d47 100644 (file)
@@ -3,56 +3,56 @@
                "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
-               "BriefDescription": "CPU Cycles",
-               "PublicDescription": "Cycle Count"
+               "BriefDescription": "Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
-               "BriefDescription": "Instructions",
-               "PublicDescription": "Instruction Count"
+               "BriefDescription": "Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
-               "BriefDescription": "L1I Directory Writes",
-               "PublicDescription": "Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
-               "BriefDescription": "L1I Penalty Cycles",
-               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
-               "BriefDescription": "L1D Directory Writes",
-               "PublicDescription": "Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
-               "BriefDescription": "L1D Penalty Cycles",
-               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
-               "BriefDescription": "Problem-State CPU Cycles",
-               "PublicDescription": "Problem-State Cycle Count"
+               "BriefDescription": "Problem-State Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
-               "BriefDescription": "Problem-State Instructions",
-               "PublicDescription": "Problem-State Instruction Count"
+               "BriefDescription": "Problem-State Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
        }
 ]
index 3f28007..a8d391d 100644 (file)
                "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
-               "BriefDescription": "PRNG Functions",
-               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
-               "BriefDescription": "PRNG Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
-               "BriefDescription": "PRNG Blocked Functions",
-               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
-               "BriefDescription": "PRNG Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
-               "BriefDescription": "SHA Functions",
-               "PublicDescription": "Total number of SHA functions issued by the CPU"
+               "BriefDescription": "SHA Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
-               "BriefDescription": "SHA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+               "BriefDescription": "SHA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "SHA Blocked Functions",
-               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
-               "BriefDescription": "SHA Bloced Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
-               "BriefDescription": "DEA Functions",
-               "PublicDescription": "Total number of the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
-               "BriefDescription": "DEA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "DEA Blocked Functions",
-               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
-               "BriefDescription": "DEA Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
-               "BriefDescription": "AES Functions",
-               "PublicDescription": "Total number of AES functions issued by the CPU"
+               "BriefDescription": "AES Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
-               "BriefDescription": "AES Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+               "BriefDescription": "AES Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
-               "BriefDescription": "AES Blocked Functions",
-               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
-               "BriefDescription": "AES Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        }
 ]
index 4942b20..ad40cc4 100644 (file)
                "EventCode": "128",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "DTLB2_WRITES",
                "BriefDescription": "DTLB2 Writes",
-               "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+               "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "DTLB2_MISSES",
                "BriefDescription": "DTLB2 Misses",
-               "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "DTLB2_HPAGE_WRITES",
                "BriefDescription": "DTLB2 One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
+               "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "DTLB2_GPAGE_WRITES",
                "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
-               "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+               "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L1D_L2D_SOURCED_WRITES",
                "BriefDescription": "L1D L2D Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "ITLB2_WRITES",
                "BriefDescription": "ITLB2 Writes",
-               "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+               "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "ITLB2_MISSES",
                "BriefDescription": "ITLB2 Misses",
-               "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1I_L2I_SOURCED_WRITES",
                "BriefDescription": "L1I L2I Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
-               "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+               "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
-               "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+               "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "TLB2_ENGINES_BUSY",
                "BriefDescription": "TLB2 Engines Busy",
-               "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+               "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "TX_C_TEND",
                "BriefDescription": "Completed TEND instructions in constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TX_NC_TEND",
                "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "L1C_TLB2_MISSES",
                "BriefDescription": "L1C TLB2 Misses",
-               "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+               "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D On-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "149",
                "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "151",
                "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "154",
                "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "156",
                "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "157",
                "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "158",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "162",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "163",
                "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "164",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "165",
                "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "166",
                "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I On-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "167",
                "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "168",
                "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "169",
                "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "170",
                "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "171",
                "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "172",
                "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "173",
                "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "174",
                "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "175",
                "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "224",
                "EventName": "BCD_DFP_EXECUTION_SLOTS",
                "BriefDescription": "BCD DFP Execution Slots",
-               "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+               "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "225",
                "EventName": "VX_BCD_EXECUTION_SLOTS",
                "BriefDescription": "VX BCD Execution Slots",
-               "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+               "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "226",
                "EventName": "DECIMAL_INSTRUCTIONS",
                "BriefDescription": "Decimal Instructions",
-               "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+               "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "232",
                "EventName": "LAST_HOST_TRANSLATIONS",
                "BriefDescription": "Last host translation done",
-               "PublicDescription": "Last Host Translation done"
+               "PublicDescription": "Last Host Translation done."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "243",
                "EventName": "TX_NC_TABORT",
                "BriefDescription": "Aborted transactions in non-constrained TX mode",
-               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "244",
                "EventName": "TX_C_TABORT_NO_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "245",
                "EventName": "TX_C_TABORT_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
        },
        {
                "Unit": "CPU-M-CF",
index fc762e9..1023d47 100644 (file)
@@ -3,56 +3,56 @@
                "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
-               "BriefDescription": "CPU Cycles",
-               "PublicDescription": "Cycle Count"
+               "BriefDescription": "Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
-               "BriefDescription": "Instructions",
-               "PublicDescription": "Instruction Count"
+               "BriefDescription": "Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
-               "BriefDescription": "L1I Directory Writes",
-               "PublicDescription": "Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
-               "BriefDescription": "L1I Penalty Cycles",
-               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
-               "BriefDescription": "L1D Directory Writes",
-               "PublicDescription": "Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
-               "BriefDescription": "L1D Penalty Cycles",
-               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
-               "BriefDescription": "Problem-State CPU Cycles",
-               "PublicDescription": "Problem-State Cycle Count"
+               "BriefDescription": "Problem-State Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
-               "BriefDescription": "Problem-State Instructions",
-               "PublicDescription": "Problem-State Instruction Count"
+               "BriefDescription": "Problem-State Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
        }
 ]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
deleted file mode 100644 (file)
index 3f28007..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-[
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "64",
-               "EventName": "PRNG_FUNCTIONS",
-               "BriefDescription": "PRNG Functions",
-               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "65",
-               "EventName": "PRNG_CYCLES",
-               "BriefDescription": "PRNG Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "66",
-               "EventName": "PRNG_BLOCKED_FUNCTIONS",
-               "BriefDescription": "PRNG Blocked Functions",
-               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "67",
-               "EventName": "PRNG_BLOCKED_CYCLES",
-               "BriefDescription": "PRNG Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "68",
-               "EventName": "SHA_FUNCTIONS",
-               "BriefDescription": "SHA Functions",
-               "PublicDescription": "Total number of SHA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "69",
-               "EventName": "SHA_CYCLES",
-               "BriefDescription": "SHA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "70",
-               "EventName": "SHA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "SHA Blocked Functions",
-               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "71",
-               "EventName": "SHA_BLOCKED_CYCLES",
-               "BriefDescription": "SHA Bloced Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "72",
-               "EventName": "DEA_FUNCTIONS",
-               "BriefDescription": "DEA Functions",
-               "PublicDescription": "Total number of the DEA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "73",
-               "EventName": "DEA_CYCLES",
-               "BriefDescription": "DEA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "74",
-               "EventName": "DEA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "DEA Blocked Functions",
-               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "75",
-               "EventName": "DEA_BLOCKED_CYCLES",
-               "BriefDescription": "DEA Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "76",
-               "EventName": "AES_FUNCTIONS",
-               "BriefDescription": "AES Functions",
-               "PublicDescription": "Total number of AES functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "77",
-               "EventName": "AES_CYCLES",
-               "BriefDescription": "AES Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "78",
-               "EventName": "AES_BLOCKED_FUNCTIONS",
-               "BriefDescription": "AES Blocked Functions",
-               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "79",
-               "EventName": "AES_BLOCKED_CYCLES",
-               "BriefDescription": "AES Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       }
-]
index ad79189..8b4380b 100644 (file)
@@ -1,6 +1,118 @@
 [
        {
                "Unit": "CPU-M-CF",
+               "EventCode": "64",
+               "EventName": "PRNG_FUNCTIONS",
+               "BriefDescription": "PRNG Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "65",
+               "EventName": "PRNG_CYCLES",
+               "BriefDescription": "PRNG Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "66",
+               "EventName": "PRNG_BLOCKED_FUNCTIONS",
+               "BriefDescription": "PRNG Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "67",
+               "EventName": "PRNG_BLOCKED_CYCLES",
+               "BriefDescription": "PRNG Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "68",
+               "EventName": "SHA_FUNCTIONS",
+               "BriefDescription": "SHA Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "69",
+               "EventName": "SHA_CYCLES",
+               "BriefDescription": "SHA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "70",
+               "EventName": "SHA_BLOCKED_FUNCTIONS",
+               "BriefDescription": "SHA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "71",
+               "EventName": "SHA_BLOCKED_CYCLES",
+               "BriefDescription": "SHA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "72",
+               "EventName": "DEA_FUNCTIONS",
+               "BriefDescription": "DEA Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "73",
+               "EventName": "DEA_CYCLES",
+               "BriefDescription": "DEA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "74",
+               "EventName": "DEA_BLOCKED_FUNCTIONS",
+               "BriefDescription": "DEA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "75",
+               "EventName": "DEA_BLOCKED_CYCLES",
+               "BriefDescription": "DEA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "76",
+               "EventName": "AES_FUNCTIONS",
+               "BriefDescription": "AES Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "77",
+               "EventName": "AES_CYCLES",
+               "BriefDescription": "AES Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "78",
+               "EventName": "AES_BLOCKED_FUNCTIONS",
+               "BriefDescription": "AES Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "79",
+               "EventName": "AES_BLOCKED_CYCLES",
+               "BriefDescription": "AES Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
                "EventCode": "80",
                "EventName": "ECC_FUNCTION_COUNT",
                "BriefDescription": "ECC Function Count",
index 8ac61f8..9c691c3 100644 (file)
                "EventCode": "128",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "DTLB2_WRITES",
                "BriefDescription": "DTLB2 Writes",
-               "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+               "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "130",
                "EventName": "DTLB2_MISSES",
                "BriefDescription": "DTLB2 Misses",
-               "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "DTLB2_HPAGE_WRITES",
                "BriefDescription": "DTLB2 One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page"
+               "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "DTLB2_GPAGE_WRITES",
                "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
-               "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+               "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L1D_L2D_SOURCED_WRITES",
                "BriefDescription": "L1D L2D Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "ITLB2_WRITES",
                "BriefDescription": "ITLB2 Writes",
-               "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+               "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "ITLB2_MISSES",
                "BriefDescription": "ITLB2 Misses",
-               "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1I_L2I_SOURCED_WRITES",
                "BriefDescription": "L1I L2I Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
-               "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+               "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
-               "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+               "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "TLB2_ENGINES_BUSY",
                "BriefDescription": "TLB2 Engines Busy",
-               "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+               "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "TX_C_TEND",
                "BriefDescription": "Completed TEND instructions in constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TX_NC_TEND",
                "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "L1C_TLB2_MISSES",
                "BriefDescription": "L1C TLB2 Misses",
-               "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+               "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D On-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "149",
                "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "151",
                "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "154",
                "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "156",
                "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "157",
                "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "158",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "162",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "163",
                "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "164",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "165",
                "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "166",
                "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I On-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "167",
                "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "168",
                "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "169",
                "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "170",
                "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "171",
                "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "172",
                "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "173",
                "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "174",
                "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "175",
                "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "224",
                "EventName": "BCD_DFP_EXECUTION_SLOTS",
                "BriefDescription": "BCD DFP Execution Slots",
-               "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+               "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "225",
                "EventName": "VX_BCD_EXECUTION_SLOTS",
                "BriefDescription": "VX BCD Execution Slots",
-               "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+               "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "226",
                "EventName": "DECIMAL_INSTRUCTIONS",
                "BriefDescription": "Decimal Instructions",
-               "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+               "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "232",
                "EventName": "LAST_HOST_TRANSLATIONS",
                "BriefDescription": "Last host translation done",
-               "PublicDescription": "Last Host Translation done"
+               "PublicDescription": "Last Host Translation done."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "243",
                "EventName": "TX_NC_TABORT",
                "BriefDescription": "Aborted transactions in non-constrained TX mode",
-               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "244",
                "EventName": "TX_C_TABORT_NO_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "245",
                "EventName": "TX_C_TABORT_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
        },
        {
                "Unit": "CPU-M-CF",
                "Unit": "CPU-M-CF",
                "EventCode": "264",
                "EventName": "DFLT_CC",
-               "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed",
+               "BriefDescription": "Increments DEFLATE CONVERSION CALL",
                "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed"
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "265",
                "EventName": "DFLT_CCFINISH",
-               "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2",
-               "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2"
+               "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+               "PublicDescription": " Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2 complete. "
        },
        {
                "Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/basic.json b/tools/perf/pmu-events/arch/s390/cf_z16/basic.json
new file mode 100644 (file)
index 0000000..1023d47
--- /dev/null
@@ -0,0 +1,58 @@
+[
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "0",
+               "EventName": "CPU_CYCLES",
+               "BriefDescription": "Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "1",
+               "EventName": "INSTRUCTIONS",
+               "BriefDescription": "Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "2",
+               "EventName": "L1I_DIR_WRITES",
+               "BriefDescription": "Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "3",
+               "EventName": "L1I_PENALTY_CYCLES",
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "4",
+               "EventName": "L1D_DIR_WRITES",
+               "BriefDescription": "Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "5",
+               "EventName": "L1D_PENALTY_CYCLES",
+               "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "32",
+               "EventName": "PROBLEM_STATE_CPU_CYCLES",
+               "BriefDescription": "Problem-State Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "33",
+               "EventName": "PROBLEM_STATE_INSTRUCTIONS",
+               "BriefDescription": "Problem-State Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
+       }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json
new file mode 100644 (file)
index 0000000..8b4380b
--- /dev/null
@@ -0,0 +1,142 @@
+[
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "64",
+               "EventName": "PRNG_FUNCTIONS",
+               "BriefDescription": "PRNG Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "65",
+               "EventName": "PRNG_CYCLES",
+               "BriefDescription": "PRNG Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "66",
+               "EventName": "PRNG_BLOCKED_FUNCTIONS",
+               "BriefDescription": "PRNG Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "67",
+               "EventName": "PRNG_BLOCKED_CYCLES",
+               "BriefDescription": "PRNG Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "68",
+               "EventName": "SHA_FUNCTIONS",
+               "BriefDescription": "SHA Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "69",
+               "EventName": "SHA_CYCLES",
+               "BriefDescription": "SHA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "70",
+               "EventName": "SHA_BLOCKED_FUNCTIONS",
+               "BriefDescription": "SHA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "71",
+               "EventName": "SHA_BLOCKED_CYCLES",
+               "BriefDescription": "SHA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "72",
+               "EventName": "DEA_FUNCTIONS",
+               "BriefDescription": "DEA Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "73",
+               "EventName": "DEA_CYCLES",
+               "BriefDescription": "DEA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "74",
+               "EventName": "DEA_BLOCKED_FUNCTIONS",
+               "BriefDescription": "DEA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "75",
+               "EventName": "DEA_BLOCKED_CYCLES",
+               "BriefDescription": "DEA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "76",
+               "EventName": "AES_FUNCTIONS",
+               "BriefDescription": "AES Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "77",
+               "EventName": "AES_CYCLES",
+               "BriefDescription": "AES Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "78",
+               "EventName": "AES_BLOCKED_FUNCTIONS",
+               "BriefDescription": "AES Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "79",
+               "EventName": "AES_BLOCKED_CYCLES",
+               "BriefDescription": "AES Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "80",
+               "EventName": "ECC_FUNCTION_COUNT",
+               "BriefDescription": "ECC Function Count",
+               "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "81",
+               "EventName": "ECC_CYCLES_COUNT",
+               "BriefDescription": "ECC Cycles Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the ECC coprocessor is busy performing the elliptic-curve cryptography (ECC) functions issued by the CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "82",
+               "EventName": "ECC_BLOCKED_FUNCTION_COUNT",
+               "BriefDescription": "Ecc Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions that are issued by the CPU and are blocked because the ECC coprocessor is busy performing a function issued by another CPU."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "83",
+               "EventName": "ECC_BLOCKED_CYCLES_COUNT",
+               "BriefDescription": "ECC Blocked Cycles Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the elliptic-curve cryptography (ECC) functions issued by the CPU because the ECC coprocessor is busy performing a function issued by another CPU."
+       }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
new file mode 100644 (file)
index 0000000..c306190
--- /dev/null
@@ -0,0 +1,492 @@
+[
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "128",
+               "EventName": "L1D_RO_EXCL_WRITES",
+               "BriefDescription": "L1D Read-only Exclusive Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "129",
+               "EventName": "DTLB2_WRITES",
+               "BriefDescription": "DTLB2 Writes",
+               "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the Level-1 Data cache. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "130",
+               "EventName": "DTLB2_MISSES",
+               "BriefDescription": "DTLB2 Misses",
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "131",
+               "EventName": "CRSTE_1MB_WRITES",
+               "BriefDescription": "One Megabyte CRSTE writes",
+               "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "132",
+               "EventName": "DTLB2_GPAGE_WRITES",
+               "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
+               "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "134",
+               "EventName": "ITLB2_WRITES",
+               "BriefDescription": "ITLB2 Writes",
+               "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "135",
+               "EventName": "ITLB2_MISSES",
+               "BriefDescription": "ITLB2 Misses",
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "137",
+               "EventName": "TLB2_PTE_WRITES",
+               "BriefDescription": "TLB2 Page Table Entry Writes",
+               "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "138",
+               "EventName": "TLB2_CRSTE_WRITES",
+               "BriefDescription": "TLB2 Combined Region and Segment Entry Writes",
+               "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "139",
+               "EventName": "TLB2_ENGINES_BUSY",
+               "BriefDescription": "TLB2 Engines Busy",
+               "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "140",
+               "EventName": "TX_C_TEND",
+               "BriefDescription": "Completed TEND instructions in constrained TX mode",
+               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "141",
+               "EventName": "TX_NC_TEND",
+               "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
+               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "143",
+               "EventName": "L1C_TLB2_MISSES",
+               "BriefDescription": "L1C TLB2 Misses",
+               "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "145",
+               "EventName": "DCW_REQ",
+               "BriefDescription": "Directory Write Level 1 Data Cache from Cache",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "146",
+               "EventName": "DCW_REQ_IV",
+               "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache with intervention."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "147",
+               "EventName": "DCW_REQ_CHIP_HIT",
+               "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Chip HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "148",
+               "EventName": "DCW_REQ_DRAWER_HIT",
+               "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Drawer HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "149",
+               "EventName": "DCW_ON_CHIP",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "150",
+               "EventName": "DCW_ON_CHIP_IV",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache with intervention."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "151",
+               "EventName": "DCW_ON_CHIP_CHIP_HIT",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Chip HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "152",
+               "EventName": "DCW_ON_CHIP_DRAWER_HIT",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Drawer HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "153",
+               "EventName": "DCW_ON_MODULE",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Cache",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "154",
+               "EventName": "DCW_ON_DRAWER",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Cache",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "155",
+               "EventName": "DCW_OFF_DRAWER",
+               "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Cache",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "156",
+               "EventName": "DCW_ON_CHIP_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Memory",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "157",
+               "EventName": "DCW_ON_MODULE_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Memory",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Module memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "158",
+               "EventName": "DCW_ON_DRAWER_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Memory",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "159",
+               "EventName": "DCW_OFF_DRAWER_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Memory",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "160",
+               "EventName": "IDCW_ON_MODULE_IV",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache with intervention."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "161",
+               "EventName": "IDCW_ON_MODULE_CHIP_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Chip Hit",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using chip horizontal persistence, Chip-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "162",
+               "EventName": "IDCW_ON_MODULE_DRAWER_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Drawer Hit",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "163",
+               "EventName": "IDCW_ON_DRAWER_IV",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache with intervention."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "164",
+               "EventName": "IDCW_ON_DRAWER_CHIP_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Chip Hit",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "165",
+               "EventName": "IDCW_ON_DRAWER_DRAWER_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Drawer Hit",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "166",
+               "EventName": "IDCW_OFF_DRAWER_IV",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache with intervention."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "167",
+               "EventName": "IDCW_OFF_DRAWER_CHIP_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Chip Hit",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "168",
+               "EventName": "IDCW_OFF_DRAWER_DRAWER_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Drawer Hit",
+               "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "169",
+               "EventName": "ICW_REQ",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced the requestors Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "170",
+               "EventName": "ICW_REQ_IV",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "171",
+               "EventName": "ICW_REQ_CHIP_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Chip HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "172",
+               "EventName": "ICW_REQ_DRAWER_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Drawer HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestor’s Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "173",
+               "EventName": "ICW_ON_CHIP",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "174",
+               "EventName": "ICW_ON_CHIP_IV",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Chip Level-2 cache with intervention."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "175",
+               "EventName": "ICW_ON_CHIP_CHIP_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Chip HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "176",
+               "EventName": "ICW_ON_CHIP_DRAWER_HIT",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Drawer HP Hit",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip level 2 cache using drawer level horizontal persistence, Drawer-HP hit."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "177",
+               "EventName": "ICW_ON_MODULE",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Cache",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "178",
+               "EventName": "ICW_ON_DRAWER",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Cache",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Drawer Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "179",
+               "EventName": "ICW_OFF_DRAWER",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Cache",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an Off-Drawer Level-2 cache."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "180",
+               "EventName": "ICW_ON_CHIP_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Memory",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "181",
+               "EventName": "ICW_ON_MODULE_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Memory",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Module memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "182",
+               "EventName": "ICW_ON_DRAWER_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Memory",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "183",
+               "EventName": "ICW_OFF_DRAWER_MEMORY",
+               "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Memory",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "224",
+               "EventName": "BCD_DFP_EXECUTION_SLOTS",
+               "BriefDescription": "Binary Coded Decimal to Decimal Floating Point conversions",
+               "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "225",
+               "EventName": "VX_BCD_EXECUTION_SLOTS",
+               "BriefDescription": "Count finished vector arithmetic Binary Coded Decimal instructions",
+               "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMP, VMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOP, VCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVD, VCVDG."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "226",
+               "EventName": "DECIMAL_INSTRUCTIONS",
+               "BriefDescription": "Decimal instruction dispatched",
+               "PublicDescription": "Decimal instruction dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "232",
+               "EventName": "LAST_HOST_TRANSLATIONS",
+               "BriefDescription": "Last host translation done",
+               "PublicDescription": "Last Host Translation done"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "244",
+               "EventName": "TX_NC_TABORT",
+               "BriefDescription": "Aborted transactions in unconstrained TX mode",
+               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "245",
+               "EventName": "TX_C_TABORT_NO_SPECIAL",
+               "BriefDescription": "Aborted transactions in constrained TX mode",
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "246",
+               "EventName": "TX_C_TABORT_SPECIAL",
+               "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "248",
+               "EventName": "DFLT_ACCESS",
+               "BriefDescription": "Cycles CPU spent obtaining access to Deflate unit",
+               "PublicDescription": "Cycles CPU spent obtaining access to Deflate unit"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "253",
+               "EventName": "DFLT_CYCLES",
+               "BriefDescription": "Cycles CPU is using Deflate unit",
+               "PublicDescription": "Cycles CPU is using Deflate unit"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "256",
+               "EventName": "SORTL",
+               "BriefDescription": "Count SORTL instructions",
+               "PublicDescription": "Increments by one for every SORT LISTS instruction executed."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "265",
+               "EventName": "DFLT_CC",
+               "BriefDescription": "Increments DEFLATE CONVERSION CALL",
+               "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "266",
+               "EventName": "DFLT_CCFINISH",
+               "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+               "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "267",
+               "EventName": "NNPA_INVOCATIONS",
+               "BriefDescription": "NNPA Total invocations",
+               "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "268",
+               "EventName": "NNPA_COMPLETIONS",
+               "BriefDescription": "NNPA Total completions",
+               "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed that ended in Condition Codes 0, 1 or 2."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "269",
+               "EventName": "NNPA_WAIT_LOCK",
+               "BriefDescription": "Cycles spent obtaining NNPA lock",
+               "PublicDescription": "Cycles CPU spent obtaining access to IBM Z Integrated Accelerator for AI."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "270",
+               "EventName": "NNPA_HOLD_LOCK",
+               "BriefDescription": "Cycles spent holding NNPA lock",
+               "PublicDescription": "Cycles CPU is using IBM Z Integrated Accelerator for AI."
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "448",
+               "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
+               "BriefDescription": "Cycle count with one thread active",
+               "PublicDescription": "Cycle count with one thread active"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "449",
+               "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
+               "BriefDescription": "Cycle count with two threads active",
+               "PublicDescription": "Cycle count with two threads active"
+       }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
new file mode 100644 (file)
index 0000000..1a0034f
--- /dev/null
@@ -0,0 +1,7 @@
+[
+  {
+    "BriefDescription": "Transaction count",
+    "MetricName": "transaction",
+    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+  }
+]
index 783de7f..9bd20a5 100644 (file)
@@ -3,84 +3,84 @@
                "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
-               "BriefDescription": "CPU Cycles",
-               "PublicDescription": "Cycle Count"
+               "BriefDescription": "Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
-               "BriefDescription": "Instructions",
-               "PublicDescription": "Instruction Count"
+               "BriefDescription": "Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
-               "BriefDescription": "L1I Directory Writes",
-               "PublicDescription": "Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
-               "BriefDescription": "L1I Penalty Cycles",
-               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
-               "BriefDescription": "L1D Directory Writes",
-               "PublicDescription": "Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
-               "BriefDescription": "L1D Penalty Cycles",
-               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
-               "BriefDescription": "Problem-State CPU Cycles",
-               "PublicDescription": "Problem-State Cycle Count"
+               "BriefDescription": "Problem-State Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
-               "BriefDescription": "Problem-State Instructions",
-               "PublicDescription": "Problem-State Instruction Count"
+               "BriefDescription": "Problem-State Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
-               "BriefDescription": "Problem-State L1I Directory Writes",
-               "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1I Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
-               "BriefDescription": "Problem-State L1D Directory Writes",
-               "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1D Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
        }
 ]
index 3f28007..a8d391d 100644 (file)
                "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
-               "BriefDescription": "PRNG Functions",
-               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
-               "BriefDescription": "PRNG Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
-               "BriefDescription": "PRNG Blocked Functions",
-               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
-               "BriefDescription": "PRNG Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
-               "BriefDescription": "SHA Functions",
-               "PublicDescription": "Total number of SHA functions issued by the CPU"
+               "BriefDescription": "SHA Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
-               "BriefDescription": "SHA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+               "BriefDescription": "SHA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "SHA Blocked Functions",
-               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
-               "BriefDescription": "SHA Bloced Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
-               "BriefDescription": "DEA Functions",
-               "PublicDescription": "Total number of the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
-               "BriefDescription": "DEA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "DEA Blocked Functions",
-               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
-               "BriefDescription": "DEA Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
-               "BriefDescription": "AES Functions",
-               "PublicDescription": "Total number of AES functions issued by the CPU"
+               "BriefDescription": "AES Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
-               "BriefDescription": "AES Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+               "BriefDescription": "AES Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
-               "BriefDescription": "AES Blocked Functions",
-               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
-               "BriefDescription": "AES Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        }
 ]
index 86b29fd..6ebbdba 100644 (file)
@@ -4,14 +4,14 @@
                "EventCode": "128",
                "EventName": "L1D_L2_SOURCED_WRITES",
                "BriefDescription": "L1D L2 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from the Level-2 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "129",
                "EventName": "L1I_L2_SOURCED_WRITES",
                "BriefDescription": "L1I L2 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "L2C_STORES_SENT",
                "BriefDescription": "L2C Stores Sent",
-               "PublicDescription": "Incremented by one for every store sent to Level-2 cache"
+               "PublicDescription": "Incremented by one for every store sent to Level-2 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "134",
                "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "136",
                "EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "DTLB1_HPAGE_WRITES",
                "BriefDescription": "DTLB1 One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "L1D_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1D Local Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+               "PublicDescription": "A directory write to the Level-1 Data Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "142",
                "EventName": "L1I_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1I Local Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
+               "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
        }
 ]
index 783de7f..9bd20a5 100644 (file)
@@ -3,84 +3,84 @@
                "Unit": "CPU-M-CF",
                "EventCode": "0",
                "EventName": "CPU_CYCLES",
-               "BriefDescription": "CPU Cycles",
-               "PublicDescription": "Cycle Count"
+               "BriefDescription": "Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "1",
                "EventName": "INSTRUCTIONS",
-               "BriefDescription": "Instructions",
-               "PublicDescription": "Instruction Count"
+               "BriefDescription": "Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "2",
                "EventName": "L1I_DIR_WRITES",
-               "BriefDescription": "L1I Directory Writes",
-               "PublicDescription": "Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "3",
                "EventName": "L1I_PENALTY_CYCLES",
-               "BriefDescription": "L1I Penalty Cycles",
-               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "4",
                "EventName": "L1D_DIR_WRITES",
-               "BriefDescription": "L1D Directory Writes",
-               "PublicDescription": "Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "5",
                "EventName": "L1D_PENALTY_CYCLES",
-               "BriefDescription": "L1D Penalty Cycles",
-               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "32",
                "EventName": "PROBLEM_STATE_CPU_CYCLES",
-               "BriefDescription": "Problem-State CPU Cycles",
-               "PublicDescription": "Problem-State Cycle Count"
+               "BriefDescription": "Problem-State Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "33",
                "EventName": "PROBLEM_STATE_INSTRUCTIONS",
-               "BriefDescription": "Problem-State Instructions",
-               "PublicDescription": "Problem-State Instruction Count"
+               "BriefDescription": "Problem-State Instruction Count",
+               "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "34",
                "EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
-               "BriefDescription": "Problem-State L1I Directory Writes",
-               "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "35",
                "EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1I Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+               "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "36",
                "EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
-               "BriefDescription": "Problem-State L1D Directory Writes",
-               "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+               "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "37",
                "EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
-               "BriefDescription": "Problem-State L1D Penalty Cycles",
-               "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+               "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+               "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
        }
 ]
index 3f28007..a8d391d 100644 (file)
                "Unit": "CPU-M-CF",
                "EventCode": "64",
                "EventName": "PRNG_FUNCTIONS",
-               "BriefDescription": "PRNG Functions",
-               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "65",
                "EventName": "PRNG_CYCLES",
-               "BriefDescription": "PRNG Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+               "BriefDescription": "PRNG Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "66",
                "EventName": "PRNG_BLOCKED_FUNCTIONS",
-               "BriefDescription": "PRNG Blocked Functions",
-               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "67",
                "EventName": "PRNG_BLOCKED_CYCLES",
-               "BriefDescription": "PRNG Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "PRNG Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "68",
                "EventName": "SHA_FUNCTIONS",
-               "BriefDescription": "SHA Functions",
-               "PublicDescription": "Total number of SHA functions issued by the CPU"
+               "BriefDescription": "SHA Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "69",
                "EventName": "SHA_CYCLES",
-               "BriefDescription": "SHA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+               "BriefDescription": "SHA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "70",
                "EventName": "SHA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "SHA Blocked Functions",
-               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "71",
                "EventName": "SHA_BLOCKED_CYCLES",
-               "BriefDescription": "SHA Bloced Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "SHA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "72",
                "EventName": "DEA_FUNCTIONS",
-               "BriefDescription": "DEA Functions",
-               "PublicDescription": "Total number of the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "73",
                "EventName": "DEA_CYCLES",
-               "BriefDescription": "DEA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+               "BriefDescription": "DEA Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "74",
                "EventName": "DEA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "DEA Blocked Functions",
-               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "75",
                "EventName": "DEA_BLOCKED_CYCLES",
-               "BriefDescription": "DEA Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "DEA Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "76",
                "EventName": "AES_FUNCTIONS",
-               "BriefDescription": "AES Functions",
-               "PublicDescription": "Total number of AES functions issued by the CPU"
+               "BriefDescription": "AES Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "77",
                "EventName": "AES_CYCLES",
-               "BriefDescription": "AES Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+               "BriefDescription": "AES Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "78",
                "EventName": "AES_BLOCKED_FUNCTIONS",
-               "BriefDescription": "AES Blocked Functions",
-               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Function Count",
+               "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "79",
                "EventName": "AES_BLOCKED_CYCLES",
-               "BriefDescription": "AES Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+               "BriefDescription": "AES Blocked Cycle Count",
+               "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
        }
 ]
index f40cbed..9e76558 100644 (file)
                "EventCode": "130",
                "EventName": "L1D_L2I_SOURCED_WRITES",
                "BriefDescription": "L1D L2I Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "131",
                "EventName": "L1I_L2I_SOURCED_WRITES",
                "BriefDescription": "L1I L2I Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "132",
                "EventName": "L1D_L2D_SOURCED_WRITES",
                "BriefDescription": "L1D L2D Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "133",
                "EventName": "DTLB1_WRITES",
                "BriefDescription": "DTLB1 Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "135",
                "EventName": "L1D_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1D Local Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+               "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "137",
                "EventName": "L1I_LMEM_SOURCED_WRITES",
                "BriefDescription": "L1I Local Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "138",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "139",
                "EventName": "DTLB1_HPAGE_WRITES",
                "BriefDescription": "DTLB1 One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "140",
                "EventName": "ITLB1_WRITES",
                "BriefDescription": "ITLB1 Writes",
-               "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+               "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "141",
                "EventName": "TLB2_PTE_WRITES",
                "BriefDescription": "TLB2 PTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "142",
                "EventName": "TLB2_CRSTE_HPAGE_WRITES",
                "BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "143",
                "EventName": "TLB2_CRSTE_WRITES",
                "BriefDescription": "TLB2 CRSTE Writes",
-               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+               "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "144",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "145",
                "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "146",
                "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "147",
                "EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D On-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "148",
                "EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1D Off-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "149",
                "EventName": "TX_NC_TEND",
                "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "150",
                "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "151",
                "EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "152",
                "EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "153",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "154",
                "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "155",
                "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "156",
                "EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I On-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "157",
                "EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
                "BriefDescription": "L1I Off-Book L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "158",
                "EventName": "TX_C_TEND",
                "BriefDescription": "Completed TEND instructions in constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "159",
                "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "160",
                "EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "161",
                "EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV",
                "BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "177",
                "EventName": "TX_NC_TABORT",
                "BriefDescription": "Aborted transactions in non-constrained TX mode",
-               "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode"
+               "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "178",
                "EventName": "TX_C_TABORT_NO_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
        },
        {
                "Unit": "CPU-M-CF",
                "EventCode": "179",
                "EventName": "TX_C_TABORT_SPECIAL",
                "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
        }
 ]
index 61641a3..a918e1a 100644 (file)
@@ -5,3 +5,4 @@ Family-model,Version,Filename,EventType
 ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
 ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
 ^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
+^IBM.393[12].*3\.7.[[:xdigit:]]+$,3,cf_z16,core
index 6b24958..f8bdf78 100644 (file)
@@ -37,7 +37,7 @@
     {
         "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
         "MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
-        "MetricGroup": "SMT",
+        "MetricGroup": "SMT;TmaL1",
         "MetricName": "Slots_Utilization",
         "Unit": "cpu_core"
     },
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
-        "MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+        "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+        "MetricExpr": "( FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5 ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
         "MetricGroup": "Cor;Flops;HPC",
         "MetricName": "FP_Arith_Utilization",
-        "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting.",
+        "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+        "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
         "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
         "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
         "MetricName": "ILP",
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
-        "MetricGroup": "Bad;BadSpec;BrMispredicts",
-        "MetricName": "IpMispredict",
-        "Unit": "cpu_core"
-    },
-    {
         "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
         "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
         "MetricGroup": "SMT",
         "Unit": "cpu_core"
     },
     {
+        "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+        "MetricGroup": "Prefetches",
+        "MetricName": "IpSWPF",
+        "Unit": "cpu_core"
+    },
+    {
         "BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
         "MetricExpr": "INST_RETIRED.ANY",
         "MetricGroup": "Summary;TmaL1",
         "Unit": "cpu_core"
     },
     {
+        "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+        "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+        "MetricGroup": "Pipeline;Ret",
+        "MetricName": "Strings_Cycles",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
+        "MetricGroup": "Pipeline;Ret;Retire",
+        "MetricName": "IpAssist",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "",
+        "MetricExpr": "UOPS_EXECUTED.THREAD / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+        "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+        "MetricName": "Execute",
+        "Unit": "cpu_core"
+    },
+    {
         "BriefDescription": "Average number of Uops issued by front-end when it issued something",
         "MetricExpr": "UOPS_ISSUED.ANY / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
         "MetricGroup": "Fed;FetchBW",
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "Number of Instructions per non-speculative DSB miss",
+        "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+        "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+        "MetricGroup": "DSBmiss",
+        "MetricName": "DSB_Switch_Cost",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
         "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
         "MetricGroup": "DSBmiss;Fed",
         "MetricName": "IpDSB_Miss_Ret",
         "Unit": "cpu_core"
     },
     {
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "MetricGroup": "Bad;BadSpec;BrMispredicts",
+        "MetricName": "IpMispredict",
+        "Unit": "cpu_core"
+    },
+    {
         "BriefDescription": "Fraction of branches that are non-taken conditionals",
         "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
         "MetricGroup": "Bad;Branches;CodeGen;PGO",
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
         "MetricGroup": "Mem;MemoryBound;MemoryLat",
         "MetricName": "Load_Miss_Real_Latency",
-        "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings.",
         "Unit": "cpu_core"
     },
     {
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
-        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
-        "MetricGroup": "Mem;MemoryBW",
-        "MetricName": "L1D_Cache_Fill_BW",
-        "Unit": "cpu_core"
-    },
-    {
-        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
-        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
-        "MetricGroup": "Mem;MemoryBW",
-        "MetricName": "L2_Cache_Fill_BW",
-        "Unit": "cpu_core"
-    },
-    {
-        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
-        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
-        "MetricGroup": "Mem;MemoryBW",
-        "MetricName": "L3_Cache_Fill_BW",
-        "Unit": "cpu_core"
-    },
-    {
-        "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
-        "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
-        "MetricGroup": "Mem;MemoryBW;Offcore",
-        "MetricName": "L3_Cache_Access_BW",
-        "Unit": "cpu_core"
-    },
-    {
         "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
         "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
         "MetricGroup": "Mem;CacheMisses",
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
         "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
         "MetricGroup": "Mem;CacheMisses;Offcore",
         "MetricName": "L2MPKI_All",
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "L2 cache misses per kilo instruction for all demand loads  (including speculative)",
+        "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads  (including speculative)",
         "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
         "MetricGroup": "Mem;CacheMisses",
         "MetricName": "L2MPKI_Load",
         "Unit": "cpu_core"
     },
     {
-        "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
+        "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
         "MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
         "MetricGroup": "Mem;CacheMisses",
         "MetricName": "FB_HPKI",
         "Unit": "cpu_core"
     },
     {
+        "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L1D_Cache_Fill_BW",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L2_Cache_Fill_BW",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L3_Cache_Fill_BW",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW;Offcore",
+        "MetricName": "L3_Cache_Access_BW",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L1D_Cache_Fill_BW_1T",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L2_Cache_Fill_BW_1T",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L3_Cache_Fill_BW_1T",
+        "Unit": "cpu_core"
+    },
+    {
+        "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW;Offcore",
+        "MetricName": "L3_Cache_Access_BW_1T",
+        "Unit": "cpu_core"
+    },
+    {
         "BriefDescription": "Average CPU Utilization",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
         "MetricGroup": "HPC;Summary",
         "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
         "MetricGroup": "Cor;Flops;HPC",
         "MetricName": "GFLOPs",
+        "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine.",
         "Unit": "cpu_core"
     },
     {
     },
     {
         "BriefDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls",
-        "MetricExpr": "TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
+        "MetricExpr": "(TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE))",
         "MetricGroup": "TopdownL1",
         "MetricName": "Backend_Bound_Aux",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that UOPS must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.  All of these subevents count backend stalls, in slots, due to a resource limitation.   These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based.  These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.  ",
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.CORE:k / CPU_CLK_UNHALTED.CORE",
+        "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@k / CPU_CLK_UNHALTED.CORE",
         "MetricName": "Kernel_Utilization",
         "Unit": "cpu_atom"
     },
     },
     {
         "BriefDescription": "Estimated Pause cost. In percent",
-        "MetricExpr": "100 * SERIALIZATION.NON_C01_MS_SCB / ( 5 * CPU_CLK_UNHALTED.CORE )",
+        "MetricExpr": "100 * SERIALIZATION.NON_C01_MS_SCB / (5 * CPU_CLK_UNHALTED.CORE)",
         "MetricName": "Estimated_Pause_Cost",
         "Unit": "cpu_atom"
     },
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
new file mode 100644 (file)
index 0000000..8f94978
--- /dev/null
@@ -0,0 +1,530 @@
+[
+    {
+        "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+        "MetricExpr": "100 * (( BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) ) / TOPDOWN.SLOTS)",
+        "MetricGroup": "Ret",
+        "MetricName": "Branching_Overhead"
+    },
+    {
+        "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+        "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "MetricGroup": "Ret;Summary",
+        "MetricName": "IPC"
+    },
+    {
+        "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+        "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
+        "MetricGroup": "Pipeline;Mem",
+        "MetricName": "CPI"
+    },
+    {
+        "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+        "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "MetricGroup": "Pipeline",
+        "MetricName": "CLKS"
+    },
+    {
+        "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+        "MetricExpr": "TOPDOWN.SLOTS",
+        "MetricGroup": "TmaL1",
+        "MetricName": "SLOTS"
+    },
+    {
+        "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+        "MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
+        "MetricGroup": "SMT;TmaL1",
+        "MetricName": "Slots_Utilization"
+    },
+    {
+        "BriefDescription": "The ratio of Executed- by Issued-Uops",
+        "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+        "MetricGroup": "Cor;Pipeline",
+        "MetricName": "Execute_per_Issue",
+        "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+    },
+    {
+        "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+        "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+        "MetricGroup": "Ret;SMT;TmaL1",
+        "MetricName": "CoreIPC"
+    },
+    {
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 ) / CPU_CLK_UNHALTED.DISTRIBUTED",
+        "MetricGroup": "Ret;Flops",
+        "MetricName": "FLOPc"
+    },
+    {
+        "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+        "MetricExpr": "( FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5 ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+        "MetricGroup": "Cor;Flops;HPC",
+        "MetricName": "FP_Arith_Utilization",
+        "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+    },
+    {
+        "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+        "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+        "MetricName": "ILP"
+    },
+    {
+        "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+        "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+        "MetricGroup": "SMT",
+        "MetricName": "CORE_CLKS"
+    },
+    {
+        "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+        "MetricGroup": "InsType",
+        "MetricName": "IpLoad"
+    },
+    {
+        "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+        "MetricGroup": "InsType",
+        "MetricName": "IpStore"
+    },
+    {
+        "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricGroup": "Branches;Fed;InsType",
+        "MetricName": "IpBranch"
+    },
+    {
+        "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "MetricGroup": "Branches;Fed;PGO",
+        "MetricName": "IpCall"
+    },
+    {
+        "BriefDescription": "Instruction per taken branch",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "MetricGroup": "Branches;Fed;PGO",
+        "MetricName": "BpTkBranch"
+    },
+    {
+        "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 )",
+        "MetricGroup": "Flops;InsType",
+        "MetricName": "IpFLOP"
+    },
+    {
+        "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / ( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.VECTOR) )",
+        "MetricGroup": "Flops;InsType",
+        "MetricName": "IpArith",
+        "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+    },
+    {
+        "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+        "MetricGroup": "Flops;FpScalar;InsType",
+        "MetricName": "IpArith_Scalar_SP",
+        "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+    },
+    {
+        "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+        "MetricGroup": "Flops;FpScalar;InsType",
+        "MetricName": "IpArith_Scalar_DP",
+        "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+    },
+    {
+        "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF )",
+        "MetricGroup": "Flops;FpVector;InsType",
+        "MetricName": "IpArith_AVX128",
+        "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+    },
+    {
+        "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF )",
+        "MetricGroup": "Flops;FpVector;InsType",
+        "MetricName": "IpArith_AVX256",
+        "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+    },
+    {
+        "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF )",
+        "MetricGroup": "Flops;FpVector;InsType",
+        "MetricName": "IpArith_AVX512",
+        "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+    },
+    {
+        "BriefDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.BF16",
+        "MetricGroup": "Flops;FpVector;InsType;Server",
+        "MetricName": "IpArith_AMX_F16",
+        "PublicDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+    },
+    {
+        "BriefDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.INT8",
+        "MetricGroup": "IntVector;InsType;Server",
+        "MetricName": "IpArith_AMX_Int8",
+        "PublicDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+    },
+    {
+        "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+        "MetricGroup": "Prefetches",
+        "MetricName": "IpSWPF"
+    },
+    {
+        "BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
+        "MetricExpr": "INST_RETIRED.ANY",
+        "MetricGroup": "Summary;TmaL1",
+        "MetricName": "Instructions"
+    },
+    {
+        "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+        "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+        "MetricGroup": "Pipeline;Ret",
+        "MetricName": "Strings_Cycles"
+    },
+    {
+        "BriefDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / cpu@ASSISTS.ANY\\,umask\\=0x1B@",
+        "MetricGroup": "Pipeline;Ret;Retire",
+        "MetricName": "IpAssist"
+    },
+    {
+        "BriefDescription": "",
+        "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+        "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+        "MetricName": "Execute"
+    },
+    {
+        "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+        "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+        "MetricGroup": "Fed;FetchBW",
+        "MetricName": "Fetch_UpC"
+    },
+    {
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+        "MetricGroup": "DSB;Fed;FetchBW",
+        "MetricName": "DSB_Coverage"
+    },
+    {
+        "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+        "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+        "MetricGroup": "DSBmiss",
+        "MetricName": "DSB_Switch_Cost"
+    },
+    {
+        "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+        "MetricGroup": "DSBmiss;Fed",
+        "MetricName": "IpDSB_Miss_Ret"
+    },
+    {
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "MetricGroup": "Bad;BadSpec;BrMispredicts",
+        "MetricName": "IpMispredict"
+    },
+    {
+        "BriefDescription": "Fraction of branches that are non-taken conditionals",
+        "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricGroup": "Bad;Branches;CodeGen;PGO",
+        "MetricName": "Cond_NT"
+    },
+    {
+        "BriefDescription": "Fraction of branches that are taken conditionals",
+        "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricGroup": "Bad;Branches;CodeGen;PGO",
+        "MetricName": "Cond_TK"
+    },
+    {
+        "BriefDescription": "Fraction of branches that are CALL or RET",
+        "MetricExpr": "( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricGroup": "Bad;Branches",
+        "MetricName": "CallRet"
+    },
+    {
+        "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+        "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricGroup": "Bad;Branches",
+        "MetricName": "Jump"
+    },
+    {
+        "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+        "MetricExpr": "1 - ( (BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES) + ((BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES) )",
+        "MetricGroup": "Bad;Branches",
+        "MetricName": "Other_Branches"
+    },
+    {
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+        "MetricGroup": "Mem;MemoryBound;MemoryLat",
+        "MetricName": "Load_Miss_Real_Latency"
+    },
+    {
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "MetricGroup": "Mem;MemoryBound;MemoryBW",
+        "MetricName": "MLP"
+    },
+    {
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+        "MetricExpr": "1000 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses",
+        "MetricName": "L1MPKI_Load"
+    },
+    {
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;Backend;CacheMisses",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+        "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses;Offcore",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads  (including speculative)",
+        "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses",
+        "MetricName": "L2MPKI_Load"
+    },
+    {
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "BriefDescription": "L2 cache hits per kilo instruction for all demand loads  (including speculative)",
+        "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses",
+        "MetricName": "L2HPKI_Load"
+    },
+    {
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses",
+        "MetricName": "L3MPKI"
+    },
+    {
+        "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+        "MetricGroup": "Mem;CacheMisses",
+        "MetricName": "FB_HPKI"
+    },
+    {
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricConstraint": "NO_NMI_WATCHDOG",
+        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 4 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+        "MetricGroup": "Mem;MemoryTLB",
+        "MetricName": "Page_Walks_Utilization"
+    },
+    {
+        "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+        "MetricGroup": "Mem;MemoryBW;Offcore",
+        "MetricName": "L3_Cache_Access_BW"
+    },
+    {
+        "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+        "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
+        "MetricGroup": "L2Evicts;Mem;Server",
+        "MetricName": "L2_Evictions_Silent_PKI"
+    },
+    {
+        "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+        "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
+        "MetricGroup": "L2Evicts;Mem;Server",
+        "MetricName": "L2_Evictions_NonSilent_PKI"
+    },
+    {
+        "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L1D_Cache_Fill_BW_1T"
+    },
+    {
+        "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L2_Cache_Fill_BW_1T"
+    },
+    {
+        "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW",
+        "MetricName": "L3_Cache_Fill_BW_1T"
+    },
+    {
+        "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+        "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+        "MetricGroup": "Mem;MemoryBW;Offcore",
+        "MetricName": "L3_Cache_Access_BW_1T"
+    },
+    {
+        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "MetricGroup": "HPC;Summary",
+        "MetricName": "CPU_Utilization"
+    },
+    {
+        "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+        "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time",
+        "MetricGroup": "Summary;Power",
+        "MetricName": "Average_Frequency"
+    },
+    {
+        "BriefDescription": "Giga Floating Point Operations Per Second",
+        "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 ) / 1000000000 ) / duration_time",
+        "MetricGroup": "Cor;Flops;HPC",
+        "MetricName": "GFLOPs",
+        "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+    },
+    {
+        "BriefDescription": "Tera Integer (matrix) Operations Per Second",
+        "MetricExpr": "( 8 * AMX_OPS_RETIRED.INT8 /  1000000000000 ) / duration_time",
+        "MetricGroup": "Cor;HPC;IntVector;Server",
+        "MetricName": "TIOPS"
+    },
+    {
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+        "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricGroup": "Power",
+        "MetricName": "Turbo_Utilization"
+    },
+    {
+        "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+        "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0",
+        "MetricGroup": "SMT",
+        "MetricName": "SMT_2T_Utilization"
+    },
+    {
+        "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+        "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+        "MetricGroup": "OS",
+        "MetricName": "Kernel_Utilization"
+    },
+    {
+        "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+        "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+        "MetricGroup": "OS",
+        "MetricName": "Kernel_CPI"
+    },
+    {
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+        "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+        "MetricExpr": "1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD ) / ( uncore_cha_0@event\\=0x1@ / duration_time )",
+        "MetricGroup": "Mem;MemoryLat;SoC",
+        "MetricName": "MEM_Read_Latency"
+    },
+    {
+        "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+        "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=1@",
+        "MetricGroup": "Mem;MemoryBW;SoC",
+        "MetricName": "MEM_Parallel_Reads"
+    },
+    {
+        "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+        "MetricExpr": "( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM ) / uncore_cha_0@event\\=0x1@ )",
+        "MetricGroup": "Mem;MemoryLat;SoC;Server",
+        "MetricName": "MEM_PMM_Read_Latency"
+    },
+    {
+        "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+        "MetricExpr": " 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR ) / uncore_cha_0@event\\=0x1@",
+        "MetricGroup": "Mem;MemoryLat;SoC;Server",
+        "MetricName": "MEM_DRAM_Read_Latency"
+    },
+    {
+        "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+        "MetricExpr": "( ( 64 * UNC_M_PMM_RPQ_INSERTS / 1000000000 ) / duration_time )",
+        "MetricGroup": "Mem;MemoryBW;SoC;Server",
+        "MetricName": "PMM_Read_BW"
+    },
+    {
+        "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+        "MetricExpr": "( ( 64 * UNC_M_PMM_WPQ_INSERTS / 1000000000 ) / duration_time )",
+        "MetricGroup": "Mem;MemoryBW;SoC;Server",
+        "MetricName": "PMM_Write_BW"
+    },
+    {
+        "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+        "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1000000000 / duration_time",
+        "MetricGroup": "IoBW;Mem;SoC;Server",
+        "MetricName": "IO_Write_BW"
+    },
+    {
+        "BriefDescription": "Socket actual clocks when any core is active on that socket",
+        "MetricExpr": "uncore_cha_0@event\\=0x1@",
+        "MetricGroup": "SoC",
+        "MetricName": "Socket_CLKS"
+    },
+    {
+        "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+        "MetricGroup": "Branches;OS",
+        "MetricName": "IpFarBranch"
+    },
+    {
+        "BriefDescription": "C1 residency percent per core",
+        "MetricExpr": "(cstate_core@c1\\-residency@ / msr@tsc@) * 100",
+        "MetricGroup": "Power",
+        "MetricName": "C1_Core_Residency"
+    },
+    {
+        "BriefDescription": "C6 residency percent per core",
+        "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+        "MetricGroup": "Power",
+        "MetricName": "C6_Core_Residency"
+    },
+    {
+        "BriefDescription": "C2 residency percent per package",
+        "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+        "MetricGroup": "Power",
+        "MetricName": "C2_Pkg_Residency"
+    },
+    {
+        "BriefDescription": "C6 residency percent per package",
+        "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+        "MetricGroup": "Power",
+        "MetricName": "C6_Pkg_Residency"
+    }
+]
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
new file mode 100755 (executable)
index 0000000..c920d35
--- /dev/null
@@ -0,0 +1,92 @@
+#!/bin/sh
+# Check Arm SPE doesn't hang when there are forks
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+skip_if_no_arm_spe_event() {
+       perf list | egrep -q 'arm_spe_[0-9]+//' && return 0
+       return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+       echo "failed: no compiler, install gcc"
+       exit 2
+fi
+
+TEST_PROGRAM_SOURCE=$(mktemp /tmp/__perf_test.program.XXXXX.c)
+TEST_PROGRAM=$(mktemp /tmp/__perf_test.program.XXXXX)
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+PERF_RECORD_LOG=$(mktemp /tmp/__perf_test.log.XXXXX)
+
+cleanup_files()
+{
+       echo "Cleaning up files..."
+       rm -f ${PERF_RECORD_LOG}
+       rm -f ${PERF_DATA}
+       rm -f ${TEST_PROGRAM_SOURCE}
+       rm -f ${TEST_PROGRAM}
+}
+
+trap cleanup_files exit term int
+
+# compile test program
+cat << EOF > $TEST_PROGRAM_SOURCE
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+int workload() {
+  while (1)
+    sqrt(rand());
+  return 0;
+}
+
+int main() {
+  switch (fork()) {
+    case 0:
+      return workload();
+    case -1:
+      return 1;
+    default:
+      wait(NULL);
+  }
+  return 0;
+}
+EOF
+
+echo "Compiling test program..."
+CFLAGS="-lm"
+cc $TEST_PROGRAM_SOURCE $CFLAGS -o $TEST_PROGRAM || exit 1
+
+echo "Recording workload..."
+perf record -o ${PERF_DATA} -e arm_spe/period=65536/ -vvv -- $TEST_PROGRAM > ${PERF_RECORD_LOG} 2>&1 &
+PERFPID=$!
+
+# Check if perf hangs by checking the perf-record logs.
+sleep 1
+log0=$(wc -l $PERF_RECORD_LOG)
+echo Log lines = $log0
+sleep 1
+log1=$(wc -l $PERF_RECORD_LOG)
+echo Log lines after 1 second = $log1
+
+kill $PERFPID
+wait $PERFPID
+# test program may leave an orphan process running the workload
+killall $(basename $TEST_PROGRAM)
+
+if [ "$log0" = "$log1" ];
+then
+        echo "SPE hang test: FAIL"
+        exit 1
+else
+        echo "SPE hang test: PASS"
+fi
+
+exit 0
index 3a9fd4d..97047a1 100644 (file)
@@ -196,7 +196,9 @@ struct dso {
                u32              status_seen;
                u64              file_size;
                struct list_head open_entry;
+               u64              elf_base_addr;
                u64              debug_frame_offset;
+               u64              eh_frame_hdr_addr;
                u64              eh_frame_hdr_offset;
        } data;
        /* bpf prog information */
index 41e29fc..3762269 100644 (file)
@@ -169,29 +169,63 @@ static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
        __v;                                                    \
        })
 
-static u64 elf_section_offset(int fd, const char *name)
+static int elf_section_address_and_offset(int fd, const char *name, u64 *address, u64 *offset)
 {
        Elf *elf;
        GElf_Ehdr ehdr;
        GElf_Shdr shdr;
-       u64 offset = 0;
+       int ret;
 
        elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
        if (elf == NULL)
+               return -1;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               goto out_err;
+
+       if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
+               goto out_err;
+
+       *address = shdr.sh_addr;
+       *offset = shdr.sh_offset;
+       ret = 0;
+out_err:
+       elf_end(elf);
+       return ret;
+}
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static u64 elf_section_offset(int fd, const char *name)
+{
+       u64 address, offset;
+
+       if (elf_section_address_and_offset(fd, name, &address, &offset))
                return 0;
 
-       do {
-               if (gelf_getehdr(elf, &ehdr) == NULL)
-                       break;
+       return offset;
+}
+#endif
 
-               if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
-                       break;
+static u64 elf_base_address(int fd)
+{
+       Elf *elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       GElf_Phdr phdr;
+       u64 retval = 0;
+       size_t i, phdrnum = 0;
 
-               offset = shdr.sh_offset;
-       } while (0);
+       if (elf == NULL)
+               return 0;
+       (void)elf_getphdrnum(elf, &phdrnum);
+       /* PT_LOAD segments are sorted by p_vaddr, so the first has the minimum p_vaddr. */
+       for (i = 0; i < phdrnum; i++) {
+               if (gelf_getphdr(elf, i, &phdr) && phdr.p_type == PT_LOAD) {
+                       retval = phdr.p_vaddr & -getpagesize();
+                       break;
+               }
+       }
 
        elf_end(elf);
-       return offset;
+       return retval;
 }
 
 #ifndef NO_LIBUNWIND_DEBUG_FRAME
@@ -248,8 +282,7 @@ struct eh_frame_hdr {
 } __packed;
 
 static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
-                              u64 offset, u64 *table_data, u64 *segbase,
-                              u64 *fde_count)
+                              u64 offset, u64 *table_data_offset, u64 *fde_count)
 {
        struct eh_frame_hdr hdr;
        u8 *enc = (u8 *) &hdr.enc;
@@ -265,35 +298,47 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
        dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
 
        *fde_count  = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
-       *segbase    = offset;
-       *table_data = (enc - (u8 *) &hdr) + offset;
+       *table_data_offset = enc - (u8 *) &hdr;
        return 0;
 }
 
-static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
                                     u64 *table_data, u64 *segbase,
                                     u64 *fde_count)
 {
-       int ret = -EINVAL, fd;
-       u64 offset = dso->data.eh_frame_hdr_offset;
+       struct map *map;
+       u64 base_addr = UINT64_MAX;
+       int ret, fd;
 
-       if (offset == 0) {
-               fd = dso__data_get_fd(dso, machine);
+       if (dso->data.eh_frame_hdr_offset == 0) {
+               fd = dso__data_get_fd(dso, ui->machine);
                if (fd < 0)
                        return -EINVAL;
 
                /* Check the .eh_frame section for unwinding info */
-               offset = elf_section_offset(fd, ".eh_frame_hdr");
-               dso->data.eh_frame_hdr_offset = offset;
+               ret = elf_section_address_and_offset(fd, ".eh_frame_hdr",
+                                                    &dso->data.eh_frame_hdr_addr,
+                                                    &dso->data.eh_frame_hdr_offset);
+               dso->data.elf_base_addr = elf_base_address(fd);
                dso__data_put_fd(dso);
+               if (ret || dso->data.eh_frame_hdr_offset == 0)
+                       return -EINVAL;
        }
 
-       if (offset)
-               ret = unwind_spec_ehframe(dso, machine, offset,
-                                         table_data, segbase,
-                                         fde_count);
-
-       return ret;
+       maps__for_each_entry(ui->thread->maps, map) {
+               if (map->dso == dso && map->start < base_addr)
+                       base_addr = map->start;
+       }
+       base_addr -= dso->data.elf_base_addr;
+       /* Address of .eh_frame_hdr */
+       *segbase = base_addr + dso->data.eh_frame_hdr_addr;
+       ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset,
+                                  table_data, fde_count);
+       if (ret)
+               return ret;
+       /* binary_search_table offset plus .eh_frame_hdr address */
+       *table_data += *segbase;
+       return 0;
 }
 
 #ifndef NO_LIBUNWIND_DEBUG_FRAME
@@ -388,14 +433,14 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
        pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
 
        /* Check the .eh_frame section for unwinding info */
-       if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+       if (!read_unwind_spec_eh_frame(map->dso, ui,
                                       &table_data, &segbase, &fde_count)) {
                memset(&di, 0, sizeof(di));
                di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
                di.start_ip = map->start;
                di.end_ip   = map->end;
-               di.u.rti.segbase    = map->start + segbase - map->pgoff;
-               di.u.rti.table_data = map->start + table_data - map->pgoff;
+               di.u.rti.segbase    = segbase;
+               di.u.rti.table_data = table_data;
                di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
                                      / sizeof(unw_word_t);
                ret = dwarf_search_unwind_table(as, ip, &di, pi,
index c25b2fd..cd1a30d 100644 (file)
@@ -23,6 +23,3 @@ TODO
 
 5. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
    for the new region
-
-6. Update comments in tests/basic_api.c to match the style used in
-   tests/alloc_*.c
index fbc1ce1..a7bc180 100644 (file)
@@ -26,8 +26,8 @@ static int memblock_initialization_check(void)
 /*
  * A simple test that adds a memory block of a specified base address
  * and size to the collection of available memory regions (memblock.memory).
- * It checks if a new entry was created and if region counter and total memory
- * were correctly updated.
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
  */
 static int memblock_add_simple_check(void)
 {
@@ -53,10 +53,10 @@ static int memblock_add_simple_check(void)
 }
 
 /*
- * A simple test that adds a memory block of a specified base address, size
+ * A simple test that adds a memory block of a specified base address, size,
  * NUMA node and memory flags to the collection of available memory regions.
- * It checks if the new entry, region counter and total memory size have
- * expected values.
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
  */
 static int memblock_add_node_simple_check(void)
 {
@@ -87,9 +87,15 @@ static int memblock_add_node_simple_check(void)
 
 /*
  * A test that tries to add two memory blocks that don't overlap with one
- * another. It checks if two correctly initialized entries were added to the
- * collection of available memory regions (memblock.memory) and if this
- * change was reflected in memblock.memory's total size and region counter.
+ * another:
+ *
+ *  |        +--------+        +--------+  |
+ *  |        |   r1   |        |   r2   |  |
+ *  +--------+--------+--------+--------+--+
+ *
+ * Expect to add two correctly initialized entries to the collection of
+ * available memory regions (memblock.memory). The total size and
+ * region counter fields get updated.
  */
 static int memblock_add_disjoint_check(void)
 {
@@ -124,11 +130,21 @@ static int memblock_add_disjoint_check(void)
 }
 
 /*
- * A test that tries to add two memory blocks, where the second one overlaps
- * with the beginning of the first entry (that is r1.base < r2.base + r2.size).
- * After this, it checks if two entries are merged into one region that starts
- * at r2.base and has size of two regions minus their intersection. It also
- * verifies the reported total size of the available memory and region counter.
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the beginning of r1 (that is r1.base < r2.base + r2.size):
+ *
+ *  |    +----+----+------------+          |
+ *  |    |    |r2  |   r1       |          |
+ *  +----+----+----+------------+----------+
+ *       ^    ^
+ *       |    |
+ *       |    r1.base
+ *       |
+ *       r2.base
+ *
+ * Expect to merge the two entries into one region that starts at r2.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
  */
 static int memblock_add_overlap_top_check(void)
 {
@@ -162,12 +178,21 @@ static int memblock_add_overlap_top_check(void)
 }
 
 /*
- * A test that tries to add two memory blocks, where the second one overlaps
- * with the end of the first entry (that is r2.base < r1.base + r1.size).
- * After this, it checks if two entries are merged into one region that starts
- * at r1.base and has size of two regions minus their intersection. It verifies
- * that memblock can still see only one entry and has a correct total size of
- * the available memory.
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the end of r1 (that is r2.base < r1.base + r1.size):
+ *
+ *  |  +--+------+----------+              |
+ *  |  |  | r1   | r2       |              |
+ *  +--+--+------+----------+--------------+
+ *     ^  ^
+ *     |  |
+ *     |  r2.base
+ *     |
+ *     r1.base
+ *
+ * Expect to merge the two entries into one region that starts at r1.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
  */
 static int memblock_add_overlap_bottom_check(void)
 {
@@ -201,11 +226,19 @@ static int memblock_add_overlap_bottom_check(void)
 }
 
 /*
- * A test that tries to add two memory blocks, where the second one is
- * within the range of the first entry (that is r1.base < r2.base &&
- * r2.base + r2.size < r1.base + r1.size). It checks if two entries are merged
- * into one region that stays the same. The counter and total size of available
- * memory are expected to not be updated.
+ * A test that tries to add two memory blocks r1 and r2, where r2 is
+ * within the range of r1 (that is r1.base < r2.base &&
+ * r2.base + r2.size < r1.base + r1.size):
+ *
+ *  |   +-------+--+-----------------------+
+ *  |   |       |r2|      r1               |
+ *  +---+-------+--+-----------------------+
+ *      ^
+ *      |
+ *      r1.base
+ *
+ * Expect to merge two entries into one region that stays the same.
+ * The counter and total size of available memory are not updated.
  */
 static int memblock_add_within_check(void)
 {
@@ -236,8 +269,8 @@ static int memblock_add_within_check(void)
 }
 
 /*
- * A simple test that tries to add the same memory block twice. The counter
- * and total size of available memory are expected to not be updated.
+ * A simple test that tries to add the same memory block twice. Expect
+ * the counter and total size of available memory to not be updated.
  */
 static int memblock_add_twice_check(void)
 {
@@ -270,12 +303,12 @@ static int memblock_add_checks(void)
        return 0;
 }
 
- /*
 * A simple test that marks a memory block of a specified base address
 * and size as reserved and to the collection of reserved memory regions
-  * (memblock.reserved). It checks if a new entry was created and if region
 * counter and total memory size were correctly updated.
 */
+/*
+ * A simple test that marks a memory block of a specified base address
+ * and size as reserved and to the collection of reserved memory regions
+ * (memblock.reserved). Expect to create a new entry. The region counter
* and total memory size are updated.
+ */
 static int memblock_reserve_simple_check(void)
 {
        struct memblock_region *rgn;
@@ -297,10 +330,15 @@ static int memblock_reserve_simple_check(void)
 }
 
 /*
- * A test that tries to mark two memory blocks that don't overlap as reserved
- * and checks if two entries were correctly added to the collection of reserved
- * memory regions (memblock.reserved) and if this change was reflected in
- * memblock.reserved's total size and region counter.
+ * A test that tries to mark two memory blocks that don't overlap as reserved:
+ *
+ *  |        +--+      +----------------+  |
+ *  |        |r1|      |       r2       |  |
+ *  +--------+--+------+----------------+--+
+ *
+ * Expect to add two entries to the collection of reserved memory regions
+ * (memblock.reserved). The total size and region counter for
+ * memblock.reserved are updated.
  */
 static int memblock_reserve_disjoint_check(void)
 {
@@ -335,13 +373,22 @@ static int memblock_reserve_disjoint_check(void)
 }
 
 /*
- * A test that tries to mark two memory blocks as reserved, where the
- * second one overlaps with the beginning of the first (that is
- * r1.base < r2.base + r2.size).
- * It checks if two entries are merged into one region that starts at r2.base
- * and has size of two regions minus their intersection. The test also verifies
- * that memblock can still see only one entry and has a correct total size of
- * the reserved memory.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the beginning of r1 (that is
+ * r1.base < r2.base + r2.size):
+ *
+ *  |  +--------------+--+--------------+  |
+ *  |  |       r2     |  |     r1       |  |
+ *  +--+--------------+--+--------------+--+
+ *     ^              ^
+ *     |              |
+ *     |              r1.base
+ *     |
+ *     r2.base
+ *
+ * Expect to merge two entries into one region that starts at r2.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
  */
 static int memblock_reserve_overlap_top_check(void)
 {
@@ -375,13 +422,22 @@ static int memblock_reserve_overlap_top_check(void)
 }
 
 /*
- * A test that tries to mark two memory blocks as reserved, where the
- * second one overlaps with the end of the first entry (that is
- * r2.base < r1.base + r1.size).
- * It checks if two entries are merged into one region that starts at r1.base
- * and has size of two regions minus their intersection. It verifies that
- * memblock can still see only one entry and has a correct total size of the
- * reserved memory.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the end of r1 (that is
+ * r2.base < r1.base + r1.size):
+ *
+ *  |  +--------------+--+--------------+  |
+ *  |  |       r1     |  |     r2       |  |
+ *  +--+--------------+--+--------------+--+
+ *     ^              ^
+ *     |              |
+ *     |              r2.base
+ *     |
+ *     r1.base
+ *
+ * Expect to merge two entries into one region that starts at r1.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
  */
 static int memblock_reserve_overlap_bottom_check(void)
 {
@@ -415,12 +471,21 @@ static int memblock_reserve_overlap_bottom_check(void)
 }
 
 /*
- * A test that tries to mark two memory blocks as reserved, where the second
- * one is within the range of the first entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if two entries are merged into one region that stays the
- * same. The counter and total size of available memory are expected to not be
- * updated.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 is within the range of r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ *  | +-----+--+---------------------------|
+ *  | |     |r2|          r1               |
+ *  +-+-----+--+---------------------------+
+ *    ^     ^
+ *    |     |
+ *    |     r2.base
+ *    |
+ *    r1.base
+ *
+ * Expect to merge two entries into one region that stays the same. The
+ * counter and total size of available memory are not updated.
  */
 static int memblock_reserve_within_check(void)
 {
@@ -452,7 +517,7 @@ static int memblock_reserve_within_check(void)
 
 /*
  * A simple test that tries to reserve the same memory block twice.
- * The region counter and total size of reserved memory are expected to not
+ * Expect the region counter and total size of reserved memory to not
  * be updated.
  */
 static int memblock_reserve_twice_check(void)
@@ -485,14 +550,22 @@ static int memblock_reserve_checks(void)
        return 0;
 }
 
- /*
-  * A simple test that tries to remove the first entry of the array of
-  * available memory regions. By "removing" a region we mean overwriting it
-  * with the next region in memblock.memory. To check this is the case, the
-  * test adds two memory blocks and verifies that the value of the latter
-  * was used to erase r1 region.  It also checks if the region counter and
-  * total size were updated to expected values.
-  */
+/*
+ * A simple test that tries to remove a region r1 from the array of
+ * available memory regions. By "removing" a region we mean overwriting it
+ * with the next region r2 in memblock.memory:
+ *
+ *  |  ......          +----------------+  |
+ *  |  : r1 :          |       r2       |  |
+ *  +--+----+----------+----------------+--+
+ *                     ^
+ *                     |
+ *                     rgn.base
+ *
+ * Expect to add two memory blocks r1 and r2 and then remove r1 so that
+ * r2 is the first available region. The region counter and total size
+ * are updated.
+ */
 static int memblock_remove_simple_check(void)
 {
        struct memblock_region *rgn;
@@ -522,11 +595,22 @@ static int memblock_remove_simple_check(void)
        return 0;
 }
 
- /*
-  * A test that tries to remove a region that was not registered as available
-  * memory (i.e. has no corresponding entry in memblock.memory). It verifies
-  * that array, regions counter and total size were not modified.
-  */
+/*
+ * A test that tries to remove a region r2 that was not registered as
+ * available memory (i.e. has no corresponding entry in memblock.memory):
+ *
+ *                     +----------------+
+ *                     |       r2       |
+ *                     +----------------+
+ *  |  +----+                              |
+ *  |  | r1 |                              |
+ *  +--+----+------------------------------+
+ *     ^
+ *     |
+ *     rgn.base
+ *
+ * Expect the array, regions counter and total size to not be modified.
+ */
 static int memblock_remove_absent_check(void)
 {
        struct memblock_region *rgn;
@@ -556,11 +640,23 @@ static int memblock_remove_absent_check(void)
 }
 
 /*
- * A test that tries to remove a region which overlaps with the beginning of
- * the already existing entry r1 (that is r1.base < r2.base + r2.size). It
- * checks if only the intersection of both regions is removed from the available
- * memory pool. The test also checks if the regions counter and total size are
- * updated to expected values.
+ * A test that tries to remove a region r2 that overlaps with the
+ * beginning of the already existing entry r1
+ * (that is r1.base < r2.base + r2.size):
+ *
+ *           +-----------------+
+ *           |       r2        |
+ *           +-----------------+
+ *  |                 .........+--------+  |
+ *  |                 :     r1 |  rgn   |  |
+ *  +-----------------+--------+--------+--+
+ *                    ^        ^
+ *                    |        |
+ *                    |        rgn.base
+ *                    r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
  */
 static int memblock_remove_overlap_top_check(void)
 {
@@ -596,11 +692,21 @@ static int memblock_remove_overlap_top_check(void)
 }
 
 /*
- * A test that tries to remove a region which overlaps with the end of the
- * first entry (that is r2.base < r1.base + r1.size). It checks if only the
- * intersection of both regions is removed from the available memory pool.
- * The test also checks if the regions counter and total size are updated to
- * expected values.
+ * A test that tries to remove a region r2 that overlaps with the end of
+ * the already existing region r1 (that is r2.base < r1.base + r1.size):
+ *
+ *        +--------------------------------+
+ *        |               r2               |
+ *        +--------------------------------+
+ *  | +---+.....                           |
+ *  | |rgn| r1 :                           |
+ *  +-+---+----+---------------------------+
+ *    ^
+ *    |
+ *    r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
  */
 static int memblock_remove_overlap_bottom_check(void)
 {
@@ -633,13 +739,23 @@ static int memblock_remove_overlap_bottom_check(void)
 }
 
 /*
- * A test that tries to remove a region which is within the range of the
- * already existing entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if the region is split into two - one that ends at r2.base and
- * second that starts at r2.base + size, with appropriate sizes. The test
- * also checks if the region counter and total size were updated to
- * expected values.
+ * A test that tries to remove a region r2 that is within the range of
+ * the already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ *                  +----+
+ *                  | r2 |
+ *                  +----+
+ *  | +-------------+....+---------------+ |
+ *  | |     rgn1    | r1 |     rgn2      | |
+ *  +-+-------------+----+---------------+-+
+ *    ^
+ *    |
+ *    r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size are updated.
  */
 static int memblock_remove_within_check(void)
 {
@@ -690,12 +806,19 @@ static int memblock_remove_checks(void)
 }
 
 /*
- * A simple test that tries to free a memory block that was marked earlier
- * as reserved. By "freeing" a region we mean overwriting it with the next
- * entry in memblock.reserved. To check this is the case, the test reserves
- * two memory regions and verifies that the value of the latter was used to
- * erase r1 region.
- * The test also checks if the region counter and total size were updated.
+ * A simple test that tries to free a memory block r1 that was marked
+ * earlier as reserved. By "freeing" a region we mean overwriting it with
+ * the next entry r2 in memblock.reserved:
+ *
+ *  |              ......           +----+ |
+ *  |              : r1 :           | r2 | |
+ *  +--------------+----+-----------+----+-+
+ *                                  ^
+ *                                  |
+ *                                  rgn.base
+ *
+ * Expect to reserve two memory regions and then erase r1 region with the
+ * value of r2. The region counter and total size are updated.
  */
 static int memblock_free_simple_check(void)
 {
@@ -726,11 +849,22 @@ static int memblock_free_simple_check(void)
        return 0;
 }
 
- /*
-  * A test that tries to free a region that was not marked as reserved
-  * (i.e. has no corresponding entry in memblock.reserved). It verifies
-  * that array, regions counter and total size were not modified.
-  */
+/*
+ * A test that tries to free a region r2 that was not marked as reserved
+ * (i.e. has no corresponding entry in memblock.reserved):
+ *
+ *                     +----------------+
+ *                     |       r2       |
+ *                     +----------------+
+ *  |  +----+                              |
+ *  |  | r1 |                              |
+ *  +--+----+------------------------------+
+ *     ^
+ *     |
+ *     rgn.base
+ *
+ * The array, regions counter and total size are not modified.
+ */
 static int memblock_free_absent_check(void)
 {
        struct memblock_region *rgn;
@@ -760,11 +894,23 @@ static int memblock_free_absent_check(void)
 }
 
 /*
- * A test that tries to free a region which overlaps with the beginning of
- * the already existing entry r1 (that is r1.base < r2.base + r2.size). It
- * checks if only the intersection of both regions is freed. The test also
- * checks if the regions counter and total size are updated to expected
- * values.
+ * A test that tries to free a region r2 that overlaps with the beginning
+ * of the already existing entry r1 (that is r1.base < r2.base + r2.size):
+ *
+ *     +----+
+ *     | r2 |
+ *     +----+
+ *  |    ...+--------------+               |
+ *  |    :  |    r1        |               |
+ *  +----+--+--------------+---------------+
+ *       ^  ^
+ *       |  |
+ *       |  rgn.base
+ *       |
+ *       r1.base
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
  */
 static int memblock_free_overlap_top_check(void)
 {
@@ -798,10 +944,18 @@ static int memblock_free_overlap_top_check(void)
 }
 
 /*
- * A test that tries to free a region which overlaps with the end of the
- * first entry (that is r2.base < r1.base + r1.size). It checks if only the
- * intersection of both regions is freed. The test also checks if the
- * regions counter and total size are updated to expected values.
+ * A test that tries to free a region r2 that overlaps with the end of
+ * the already existing entry r1 (that is r2.base < r1.base + r1.size):
+ *
+ *                   +----------------+
+ *                   |       r2       |
+ *                   +----------------+
+ *  |    +-----------+.....                |
+ *  |    |       r1  |    :                |
+ *  +----+-----------+----+----------------+
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
  */
 static int memblock_free_overlap_bottom_check(void)
 {
@@ -835,13 +989,23 @@ static int memblock_free_overlap_bottom_check(void)
 }
 
 /*
- * A test that tries to free a region which is within the range of the
- * already existing entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if the region is split into two - one that ends at r2.base and
- * second that starts at r2.base + size, with appropriate sizes. It is
- * expected that the region counter and total size fields were updated t
- * reflect that change.
+ * A test that tries to free a region r2 that is within the range of the
+ * already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ *                    +----+
+ *                    | r2 |
+ *                    +----+
+ *  |    +------------+....+---------------+
+ *  |    |    rgn1    | r1 |     rgn2      |
+ *  +----+------------+----+---------------+
+ *       ^
+ *       |
+ *       r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size fields are updated.
  */
 static int memblock_free_within_check(void)
 {
index f64d909..fd8ddce 100644 (file)
@@ -3,6 +3,9 @@
 
 CFLAGS += $(shell pkg-config --cflags alsa)
 LDLIBS += $(shell pkg-config --libs alsa)
+ifeq ($(LDLIBS),)
+LDLIBS += -lasound
+endif
 
 TEST_GEN_PROGS := mixer-test
 
index bb50b5a..9158213 100644 (file)
@@ -6,6 +6,7 @@
  * supported and is expected to segfault.
  */
 
+#include <kselftest.h>
 #include <signal.h>
 #include <ucontext.h>
 #include <sys/prctl.h>
@@ -40,6 +41,7 @@ static bool sve_get_vls(struct tdescr *td)
        /* We need at least two VLs */
        if (nvls < 2) {
                fprintf(stderr, "Only %d VL supported\n", nvls);
+               td->result = KSFT_SKIP;
                return false;
        }
 
index 6c62bfb..0c44265 100644 (file)
@@ -39,7 +39,7 @@ struct {
        __type(value, stack_trace_t);
 } stack_amap SEC(".maps");
 
-SEC("kprobe/urandom_read")
+SEC("kprobe/urandom_read_iter")
 int oncpu(struct pt_regs *args)
 {
        __u32 max_len = sizeof(struct bpf_stack_build_id)
index bc1c407..5f362c0 100644 (file)
@@ -64,6 +64,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
                device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
        static const char * const binder_features[] = {
                "oneway_spam_detection",
+               "extended_error",
        };
 
        change_mountns(_metadata);
index 40211cd..7992969 100644 (file)
@@ -4,7 +4,7 @@ CFLAGS = -Wall \
          -O2
 
 TEST_PROGS := fw_run_tests.sh
-TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh
+TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_upload.sh fw_lib.sh
 TEST_GEN_FILES := fw_namespace
 
 include ../lib.mk
index bf634dd..6e40251 100644 (file)
@@ -3,3 +3,4 @@ CONFIG_FW_LOADER=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+CONFIG_FW_UPLOAD=y
index c2a2a10..1a99aea 100755 (executable)
@@ -11,6 +11,9 @@ TEST_REQS_FW_SET_CUSTOM_PATH="yes"
 TEST_DIR=$(dirname $0)
 source $TEST_DIR/fw_lib.sh
 
+RUN_XZ="xz -C crc32 --lzma2=dict=2MiB"
+RUN_ZSTD="zstd -q"
+
 check_mods
 check_setup
 verify_reqs
@@ -211,7 +214,7 @@ read_firmwares()
        else
                fwfile="$FW"
        fi
-       if [ "$1" = "xzonly" ]; then
+       if [ "$1" = "componly" ]; then
                fwfile="${fwfile}-orig"
        fi
        for i in $(seq 0 3); do
@@ -235,7 +238,7 @@ read_partial_firmwares()
                fwfile="${FW}"
        fi
 
-       if [ "$1" = "xzonly" ]; then
+       if [ "$1" = "componly" ]; then
                fwfile="${fwfile}-orig"
        fi
 
@@ -409,10 +412,8 @@ test_request_firmware_nowait_custom()
        config_unset_uevent
        RANDOM_FILE_PATH=$(setup_random_file)
        RANDOM_FILE="$(basename $RANDOM_FILE_PATH)"
-       if [ "$2" = "both" ]; then
-               xz -9 -C crc32 -k $RANDOM_FILE_PATH
-       elif [ "$2" = "xzonly" ]; then
-               xz -9 -C crc32 $RANDOM_FILE_PATH
+       if [ -n "$2" -a "$2" != "normal" ]; then
+               compress_"$2"_"$COMPRESS_FORMAT" $RANDOM_FILE_PATH
        fi
        config_set_name $RANDOM_FILE
        config_trigger_async
@@ -435,6 +436,32 @@ test_request_partial_firmware_into_buf()
        echo "OK"
 }
 
+do_tests ()
+{
+       mode="$1"
+       suffix="$2"
+
+       for i in $(seq 1 5); do
+               test_batched_request_firmware$suffix $i $mode
+       done
+
+       for i in $(seq 1 5); do
+               test_batched_request_firmware_into_buf$suffix $i $mode
+       done
+
+       for i in $(seq 1 5); do
+               test_batched_request_firmware_direct$suffix $i $mode
+       done
+
+       for i in $(seq 1 5); do
+               test_request_firmware_nowait_uevent$suffix $i $mode
+       done
+
+       for i in $(seq 1 5); do
+               test_request_firmware_nowait_custom$suffix $i $mode
+       done
+}
+
 # Only continue if batched request triggers are present on the
 # test-firmware driver
 test_config_present
@@ -442,25 +469,7 @@ test_config_present
 # test with the file present
 echo
 echo "Testing with the file present..."
-for i in $(seq 1 5); do
-       test_batched_request_firmware $i normal
-done
-
-for i in $(seq 1 5); do
-       test_batched_request_firmware_into_buf $i normal
-done
-
-for i in $(seq 1 5); do
-       test_batched_request_firmware_direct $i normal
-done
-
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_uevent $i normal
-done
-
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_custom $i normal
-done
+do_tests normal
 
 # Partial loads cannot use fallback, so do not repeat tests.
 test_request_partial_firmware_into_buf 0 10
@@ -472,25 +481,7 @@ test_request_partial_firmware_into_buf 2 10
 # a hung task, which would require a hard reset.
 echo
 echo "Testing with the file missing..."
-for i in $(seq 1 5); do
-       test_batched_request_firmware_nofile $i
-done
-
-for i in $(seq 1 5); do
-       test_batched_request_firmware_into_buf_nofile $i
-done
-
-for i in $(seq 1 5); do
-       test_batched_request_firmware_direct_nofile $i
-done
-
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_uevent_nofile $i
-done
-
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_custom_nofile $i
-done
+do_tests nofile _nofile
 
 # Partial loads cannot use fallback, so do not repeat tests.
 test_request_partial_firmware_into_buf_nofile 0 10
@@ -498,55 +489,58 @@ test_request_partial_firmware_into_buf_nofile 0 5
 test_request_partial_firmware_into_buf_nofile 1 6
 test_request_partial_firmware_into_buf_nofile 2 10
 
-test "$HAS_FW_LOADER_COMPRESS" != "yes" && exit 0
+test_request_firmware_compressed ()
+{
+       export COMPRESS_FORMAT="$1"
 
-# test with both files present
-xz -9 -C crc32 -k $FW
-config_set_name $NAME
-echo
-echo "Testing with both plain and xz files present..."
-for i in $(seq 1 5); do
-       test_batched_request_firmware $i both
-done
+       # test with both files present
+       compress_both_"$COMPRESS_FORMAT" $FW
+       compress_both_"$COMPRESS_FORMAT" $FW_INTO_BUF
 
-for i in $(seq 1 5); do
-       test_batched_request_firmware_into_buf $i both
-done
+       config_set_name $NAME
+       echo
+       echo "Testing with both plain and $COMPRESS_FORMAT files present..."
+       do_tests both
 
-for i in $(seq 1 5); do
-       test_batched_request_firmware_direct $i both
-done
+       # test with only compressed file present
+       mv "$FW" "${FW}-orig"
+       mv "$FW_INTO_BUF" "${FW_INTO_BUF}-orig"
 
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_uevent $i both
-done
+       config_set_name $NAME
+       echo
+       echo "Testing with only $COMPRESS_FORMAT file present..."
+       do_tests componly
 
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_custom $i both
-done
+       mv "${FW}-orig" "$FW"
+       mv "${FW_INTO_BUF}-orig" "$FW_INTO_BUF"
+}
 
-# test with only xz file present
-mv "$FW" "${FW}-orig"
-echo
-echo "Testing with only xz file present..."
-for i in $(seq 1 5); do
-       test_batched_request_firmware $i xzonly
-done
-
-for i in $(seq 1 5); do
-       test_batched_request_firmware_into_buf $i xzonly
-done
-
-for i in $(seq 1 5); do
-       test_batched_request_firmware_direct $i xzonly
-done
-
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_uevent $i xzonly
-done
-
-for i in $(seq 1 5); do
-       test_request_firmware_nowait_custom $i xzonly
-done
+compress_both_XZ ()
+{
+       $RUN_XZ -k "$@"
+}
+
+compress_componly_XZ ()
+{
+       $RUN_XZ "$@"
+}
+
+compress_both_ZSTD ()
+{
+       $RUN_ZSTD -k "$@"
+}
+
+compress_componly_ZSTD ()
+{
+       $RUN_ZSTD --rm "$@"
+}
+
+if test "$HAS_FW_LOADER_COMPRESS_XZ" = "yes"; then
+       test_request_firmware_compressed XZ
+fi
+
+if test "$HAS_FW_LOADER_COMPRESS_ZSTD" = "yes"; then
+       test_request_firmware_compressed ZSTD
+fi
 
 exit 0
index 5b8c0fe..7bffd67 100755 (executable)
@@ -62,7 +62,9 @@ check_setup()
 {
        HAS_FW_LOADER_USER_HELPER="$(kconfig_has CONFIG_FW_LOADER_USER_HELPER=y)"
        HAS_FW_LOADER_USER_HELPER_FALLBACK="$(kconfig_has CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y)"
-       HAS_FW_LOADER_COMPRESS="$(kconfig_has CONFIG_FW_LOADER_COMPRESS=y)"
+       HAS_FW_LOADER_COMPRESS_XZ="$(kconfig_has CONFIG_FW_LOADER_COMPRESS_XZ=y)"
+       HAS_FW_LOADER_COMPRESS_ZSTD="$(kconfig_has CONFIG_FW_LOADER_COMPRESS_ZSTD=y)"
+       HAS_FW_UPLOAD="$(kconfig_has CONFIG_FW_UPLOAD=y)"
        PROC_FW_IGNORE_SYSFS_FALLBACK="0"
        PROC_FW_FORCE_SYSFS_FALLBACK="0"
 
@@ -98,9 +100,14 @@ check_setup()
 
        OLD_FWPATH="$(cat /sys/module/firmware_class/parameters/path)"
 
-       if [ "$HAS_FW_LOADER_COMPRESS" = "yes" ]; then
+       if [ "$HAS_FW_LOADER_COMPRESS_XZ" = "yes" ]; then
                if ! which xz 2> /dev/null > /dev/null; then
-                       HAS_FW_LOADER_COMPRESS=""
+                       HAS_FW_LOADER_COMPRESS_XZ=""
+               fi
+       fi
+       if [ "$HAS_FW_LOADER_COMPRESS_ZSTD" = "yes" ]; then
+               if ! which zstd 2> /dev/null > /dev/null; then
+                       HAS_FW_LOADER_COMPRESS_ZSTD=""
                fi
        fi
 }
@@ -113,6 +120,12 @@ verify_reqs()
                        exit 0
                fi
        fi
+       if [ "$TEST_REQS_FW_UPLOAD" = "yes" ]; then
+               if [ ! "$HAS_FW_UPLOAD" = "yes" ]; then
+                       echo "firmware upload disabled so ignoring test"
+                       exit 0
+               fi
+       fi
 }
 
 setup_tmp_file()
index 7773770..f6d95a2 100755 (executable)
@@ -22,6 +22,10 @@ run_tests()
        proc_set_force_sysfs_fallback $1
        proc_set_ignore_sysfs_fallback $2
        $TEST_DIR/fw_fallback.sh
+
+       proc_set_force_sysfs_fallback $1
+       proc_set_ignore_sysfs_fallback $2
+       $TEST_DIR/fw_upload.sh
 }
 
 run_test_config_0001()
diff --git a/tools/testing/selftests/firmware/fw_upload.sh b/tools/testing/selftests/firmware/fw_upload.sh
new file mode 100755 (executable)
index 0000000..c7a6f06
--- /dev/null
@@ -0,0 +1,214 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# This validates the user-initiated fw upload mechanism of the firmware
+# loader. It verifies that one or more firmware devices can be created
+# for a device driver. It also verifies the data transfer, the
+# cancellation support, and the error flows.
+set -e
+
+TEST_REQS_FW_UPLOAD="yes"
+TEST_DIR=$(dirname $0)
+
+progress_states="preparing transferring  programming"
+errors="hw-error
+       timeout
+       device-busy
+       invalid-file-size
+       read-write-error
+       flash-wearout"
+error_abort="user-abort"
+fwname1=fw1
+fwname2=fw2
+fwname3=fw3
+
+source $TEST_DIR/fw_lib.sh
+
+check_mods
+check_setup
+verify_reqs
+
+trap "upload_finish" EXIT
+
+upload_finish() {
+       local fwdevs="$fwname1 $fwname2 $fwname3"
+
+       for name in $fwdevs; do
+               if [ -e "$DIR/$name" ]; then
+                       echo -n "$name" > "$DIR"/upload_unregister
+               fi
+       done
+}
+
+upload_fw() {
+       local name="$1"
+       local file="$2"
+
+       echo 1 > "$DIR"/"$name"/loading
+       cat "$file" > "$DIR"/"$name"/data
+       echo 0 > "$DIR"/"$name"/loading
+}
+
+verify_fw() {
+       local name="$1"
+       local file="$2"
+
+       echo -n "$name" > "$DIR"/config_upload_name
+       if ! cmp "$file" "$DIR"/upload_read > /dev/null 2>&1; then
+               echo "$0: firmware compare for $name did not match" >&2
+               exit 1
+       fi
+
+       echo "$0: firmware upload for $name works" >&2
+       return 0
+}
+
+inject_error() {
+       local name="$1"
+       local status="$2"
+       local error="$3"
+
+       echo 1 > "$DIR"/"$name"/loading
+       echo -n "inject":"$status":"$error" > "$DIR"/"$name"/data
+       echo 0 > "$DIR"/"$name"/loading
+}
+
+await_status() {
+       local name="$1"
+       local expected="$2"
+       local status
+       local i
+
+       let i=0
+       while [ $i -lt 50 ]; do
+               status=$(cat "$DIR"/"$name"/status)
+               if [ "$status" = "$expected" ]; then
+                       return 0;
+               fi
+               sleep 1e-03
+               let i=$i+1
+       done
+
+       echo "$0: Invalid status: Expected $expected, Actual $status" >&2
+       return 1;
+}
+
+await_idle() {
+       local name="$1"
+
+       await_status "$name" "idle"
+       return $?
+}
+
+expect_error() {
+       local name="$1"
+       local expected="$2"
+       local error=$(cat "$DIR"/"$name"/error)
+
+       if [ "$error" != "$expected" ]; then
+               echo "Invalid error: Expected $expected, Actual $error" >&2
+               return 1
+       fi
+
+       return 0
+}
+
+random_firmware() {
+       local bs="$1"
+       local count="$2"
+       local file=$(mktemp -p /tmp uploadfwXXX.bin)
+
+       dd if=/dev/urandom of="$file" bs="$bs" count="$count" > /dev/null 2>&1
+       echo "$file"
+}
+
+test_upload_cancel() {
+       local name="$1"
+       local status
+
+       for status in $progress_states; do
+               inject_error $name $status $error_abort
+               if ! await_status $name $status; then
+                       exit 1
+               fi
+
+               echo 1 > "$DIR"/"$name"/cancel
+
+               if ! await_idle $name; then
+                       exit 1
+               fi
+
+               if ! expect_error $name "$status":"$error_abort"; then
+                       exit 1
+               fi
+       done
+
+       echo "$0: firmware upload cancellation works"
+       return 0
+}
+
+test_error_handling() {
+       local name=$1
+       local status
+       local error
+
+       for status in $progress_states; do
+               for error in $errors; do
+                       inject_error $name $status $error
+
+                       if ! await_idle $name; then
+                               exit 1
+                       fi
+
+                       if ! expect_error $name "$status":"$error"; then
+                               exit 1
+                       fi
+
+               done
+       done
+       echo "$0: firmware upload error handling works"
+}
+
+test_fw_too_big() {
+       local name=$1
+       local fw_too_big=`random_firmware 512 5`
+       local expected="preparing:invalid-file-size"
+
+       upload_fw $name $fw_too_big
+       rm -f $fw_too_big
+
+       if ! await_idle $name; then
+               exit 1
+       fi
+
+       if ! expect_error $name $expected; then
+               exit 1
+       fi
+
+       echo "$0: oversized firmware error handling works"
+}
+
+echo -n "$fwname1" > "$DIR"/upload_register
+echo -n "$fwname2" > "$DIR"/upload_register
+echo -n "$fwname3" > "$DIR"/upload_register
+
+test_upload_cancel $fwname1
+test_error_handling $fwname1
+test_fw_too_big $fwname1
+
+fw_file1=`random_firmware 512 4`
+fw_file2=`random_firmware 512 3`
+fw_file3=`random_firmware 512 2`
+
+upload_fw $fwname1 $fw_file1
+upload_fw $fwname2 $fw_file2
+upload_fw $fwname3 $fw_file3
+
+verify_fw ${fwname1} ${fw_file1}
+verify_fw ${fwname2} ${fw_file2}
+verify_fw ${fwname3} ${fw_file3}
+
+echo -n "$fwname1" > "$DIR"/upload_unregister
+echo -n "$fwname2" > "$DIR"/upload_unregister
+echo -n "$fwname3" > "$DIR"/upload_unregister
+
+exit 0
index 312d237..be754f5 100644 (file)
@@ -25,6 +25,8 @@ if [ $L -ne 256 ]; then
   exit_fail
 fi
 
+cat kprobe_events >> $testlog
+
 echo 1 > events/kprobes/enable
 echo 0 > events/kprobes/enable
 echo > kprobe_events
index 46f39ee..5d52f64 100644 (file)
@@ -2,10 +2,14 @@ CONFIG_LKDTM=y
 CONFIG_DEBUG_LIST=y
 CONFIG_SLAB_FREELIST_HARDENED=y
 CONFIG_FORTIFY_SOURCE=y
+CONFIG_GCC_PLUGIN_STACKLEAK=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
+CONFIG_INIT_ON_FREE_DEFAULT_ON=y
 CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
 CONFIG_UBSAN=y
 CONFIG_UBSAN_BOUNDS=y
 CONFIG_UBSAN_TRAP=y
 CONFIG_STACKPROTECTOR_STRONG=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_SLUB_DEBUG_ON=y
index 243c781..65e53eb 100644 (file)
@@ -64,16 +64,17 @@ REFCOUNT_DEC_AND_TEST_SATURATED Saturation detected: still saturated
 REFCOUNT_SUB_AND_TEST_SATURATED Saturation detected: still saturated
 #REFCOUNT_TIMING timing only
 #ATOMIC_TIMING timing only
-USERCOPY_HEAP_SIZE_TO
-USERCOPY_HEAP_SIZE_FROM
-USERCOPY_HEAP_WHITELIST_TO
-USERCOPY_HEAP_WHITELIST_FROM
+USERCOPY_SLAB_SIZE_TO
+USERCOPY_SLAB_SIZE_FROM
+USERCOPY_SLAB_WHITELIST_TO
+USERCOPY_SLAB_WHITELIST_FROM
 USERCOPY_STACK_FRAME_TO
 USERCOPY_STACK_FRAME_FROM
 USERCOPY_STACK_BEYOND
 USERCOPY_KERNEL
 STACKLEAK_ERASING OK: the rest of the thread stack is properly erased
 CFI_FORWARD_PROTO
+CFI_BACKWARD call trace:|ok: control flow unchanged
 FORTIFIED_STRSCPY
 FORTIFIED_OBJECT
 FORTIFIED_SUBOBJECT
index f508657..86e621b 100755 (executable)
@@ -1,15 +1,14 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
-# This test is for the accept_unsolicited_na feature to
+# This test is for the accept_untracked_na feature to
 # enable RFC9131 behaviour. The following is the test-matrix.
 # drop   accept  fwding                   behaviour
 # ----   ------  ------  ----------------------------------------------
-#    1        X       X  Drop NA packet and don't pass up the stack
-#    0        0       X  Pass NA packet up the stack, don't update NC
-#    0        1       0  Pass NA packet up the stack, don't update NC
-#    0        1       1  Pass NA packet up the stack, and add a STALE
-#                           NC entry
+#    1        X       X  Don't update NC
+#    0        0       X  Don't update NC
+#    0        1       0  Don't update NC
+#    0        1       1  Add a STALE NC entry
 
 ret=0
 # Kselftest framework requirement - SKIP code is 4.
@@ -72,7 +71,7 @@ setup()
        set -e
 
        local drop_unsolicited_na=$1
-       local accept_unsolicited_na=$2
+       local accept_untracked_na=$2
        local forwarding=$3
 
        # Setup two namespaces and a veth tunnel across them.
@@ -93,7 +92,7 @@ setup()
        ${IP_ROUTER_EXEC} sysctl -qw \
                 ${ROUTER_CONF}.drop_unsolicited_na=${drop_unsolicited_na}
        ${IP_ROUTER_EXEC} sysctl -qw \
-                ${ROUTER_CONF}.accept_unsolicited_na=${accept_unsolicited_na}
+                ${ROUTER_CONF}.accept_untracked_na=${accept_untracked_na}
        ${IP_ROUTER_EXEC} sysctl -qw ${ROUTER_CONF}.disable_ipv6=0
        ${IP_ROUTER} addr add ${ROUTER_ADDR_WITH_MASK} dev ${ROUTER_INTF}
 
@@ -144,13 +143,13 @@ link_up() {
 
 verify_ndisc() {
        local drop_unsolicited_na=$1
-       local accept_unsolicited_na=$2
+       local accept_untracked_na=$2
        local forwarding=$3
 
        neigh_show_output=$(${IP_ROUTER} neigh show \
                 to ${HOST_ADDR} dev ${ROUTER_INTF} nud stale)
        if [ ${drop_unsolicited_na} -eq 0 ] && \
-                       [ ${accept_unsolicited_na} -eq 1 ] && \
+                       [ ${accept_untracked_na} -eq 1 ] && \
                        [ ${forwarding} -eq 1 ]; then
                # Neighbour entry expected to be present for 011 case
                [[ ${neigh_show_output} ]]
@@ -179,14 +178,14 @@ test_unsolicited_na_combination() {
        test_unsolicited_na_common $1 $2 $3
        test_msg=("test_unsolicited_na: "
                "drop_unsolicited_na=$1 "
-               "accept_unsolicited_na=$2 "
+               "accept_untracked_na=$2 "
                "forwarding=$3")
        log_test $? 0 "${test_msg[*]}"
        cleanup
 }
 
 test_unsolicited_na_combinations() {
-       # Args: drop_unsolicited_na accept_unsolicited_na forwarding
+       # Args: drop_unsolicited_na accept_untracked_na forwarding
 
        # Expect entry
        test_unsolicited_na_combination 0 1 1
index 7d15e10..edf1e6f 100644 (file)
@@ -389,6 +389,8 @@ int main(int argc, char **argv)
                error(1, errno, "ip link set mtu");
        if (system("ip addr add dev lo 172.17.0.1/24"))
                error(1, errno, "ip addr add");
+       if (system("sysctl -w net.ipv4.conf.lo.accept_local=1"))
+               error(1, errno, "sysctl lo.accept_local");
 
        run_test();
 
index 69c3ead..474bae8 100644 (file)
@@ -482,7 +482,7 @@ usage:
        }
        if (not)
                return 0;
-       if (testdevs && testdevs->next == 0 && !device)
+       if (testdevs && !testdevs->next && !device)
                device = testdevs->name;
        for (entry = testdevs; entry; entry = entry->next) {
                int     status;
index 8fcbc50..ce1b01d 100644 (file)
@@ -23,7 +23,7 @@
 
 struct kvm_vfio_group {
        struct list_head node;
-       struct vfio_group *vfio_group;
+       struct file *file;
 };
 
 struct kvm_vfio {
@@ -32,118 +32,61 @@ struct kvm_vfio {
        bool noncoherent;
 };
 
-static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
+static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
 {
-       struct vfio_group *vfio_group;
-       struct vfio_group *(*fn)(struct file *);
+       void (*fn)(struct file *file, struct kvm *kvm);
 
-       fn = symbol_get(vfio_group_get_external_user);
-       if (!fn)
-               return ERR_PTR(-EINVAL);
-
-       vfio_group = fn(filep);
-
-       symbol_put(vfio_group_get_external_user);
-
-       return vfio_group;
-}
-
-static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
-                                              struct file *filep)
-{
-       bool ret, (*fn)(struct vfio_group *, struct file *);
-
-       fn = symbol_get(vfio_external_group_match_file);
-       if (!fn)
-               return false;
-
-       ret = fn(group, filep);
-
-       symbol_put(vfio_external_group_match_file);
-
-       return ret;
-}
-
-static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
-{
-       void (*fn)(struct vfio_group *);
-
-       fn = symbol_get(vfio_group_put_external_user);
-       if (!fn)
-               return;
-
-       fn(vfio_group);
-
-       symbol_put(vfio_group_put_external_user);
-}
-
-static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
-{
-       void (*fn)(struct vfio_group *, struct kvm *);
-
-       fn = symbol_get(vfio_group_set_kvm);
+       fn = symbol_get(vfio_file_set_kvm);
        if (!fn)
                return;
 
-       fn(group, kvm);
+       fn(file, kvm);
 
-       symbol_put(vfio_group_set_kvm);
+       symbol_put(vfio_file_set_kvm);
 }
 
-static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
+static bool kvm_vfio_file_enforced_coherent(struct file *file)
 {
-       long (*fn)(struct vfio_group *, unsigned long);
-       long ret;
+       bool (*fn)(struct file *file);
+       bool ret;
 
-       fn = symbol_get(vfio_external_check_extension);
+       fn = symbol_get(vfio_file_enforced_coherent);
        if (!fn)
                return false;
 
-       ret = fn(vfio_group, VFIO_DMA_CC_IOMMU);
+       ret = fn(file);
 
-       symbol_put(vfio_external_check_extension);
+       symbol_put(vfio_file_enforced_coherent);
 
-       return ret > 0;
+       return ret;
 }
 
-#ifdef CONFIG_SPAPR_TCE_IOMMU
-static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
+static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
 {
-       int (*fn)(struct vfio_group *);
-       int ret = -EINVAL;
+       struct iommu_group *(*fn)(struct file *file);
+       struct iommu_group *ret;
 
-       fn = symbol_get(vfio_external_user_iommu_id);
+       fn = symbol_get(vfio_file_iommu_group);
        if (!fn)
-               return ret;
+               return NULL;
 
-       ret = fn(vfio_group);
+       ret = fn(file);
 
-       symbol_put(vfio_external_user_iommu_id);
+       symbol_put(vfio_file_iommu_group);
 
        return ret;
 }
 
-static struct iommu_group *kvm_vfio_group_get_iommu_group(
-               struct vfio_group *group)
-{
-       int group_id = kvm_vfio_external_user_iommu_id(group);
-
-       if (group_id < 0)
-               return NULL;
-
-       return iommu_group_get_by_id(group_id);
-}
-
+#ifdef CONFIG_SPAPR_TCE_IOMMU
 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
-               struct vfio_group *vfio_group)
+                                            struct kvm_vfio_group *kvg)
 {
-       struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
+       struct iommu_group *grp = kvm_vfio_file_iommu_group(kvg->file);
 
        if (WARN_ON_ONCE(!grp))
                return;
 
        kvm_spapr_tce_release_iommu_group(kvm, grp);
-       iommu_group_put(grp);
 }
 #endif
 
@@ -163,7 +106,7 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
        mutex_lock(&kv->lock);
 
        list_for_each_entry(kvg, &kv->group_list, node) {
-               if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) {
+               if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
                        noncoherent = true;
                        break;
                }
@@ -181,149 +124,162 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
        mutex_unlock(&kv->lock);
 }
 
-static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
 {
        struct kvm_vfio *kv = dev->private;
-       struct vfio_group *vfio_group;
        struct kvm_vfio_group *kvg;
-       int32_t __user *argp = (int32_t __user *)(unsigned long)arg;
-       struct fd f;
-       int32_t fd;
+       struct file *filp;
        int ret;
 
-       switch (attr) {
-       case KVM_DEV_VFIO_GROUP_ADD:
-               if (get_user(fd, argp))
-                       return -EFAULT;
-
-               f = fdget(fd);
-               if (!f.file)
-                       return -EBADF;
-
-               vfio_group = kvm_vfio_group_get_external_user(f.file);
-               fdput(f);
+       filp = fget(fd);
+       if (!filp)
+               return -EBADF;
 
-               if (IS_ERR(vfio_group))
-                       return PTR_ERR(vfio_group);
-
-               mutex_lock(&kv->lock);
+       /* Ensure the FD is a vfio group FD.*/
+       if (!kvm_vfio_file_iommu_group(filp)) {
+               ret = -EINVAL;
+               goto err_fput;
+       }
 
-               list_for_each_entry(kvg, &kv->group_list, node) {
-                       if (kvg->vfio_group == vfio_group) {
-                               mutex_unlock(&kv->lock);
-                               kvm_vfio_group_put_external_user(vfio_group);
-                               return -EEXIST;
-                       }
-               }
+       mutex_lock(&kv->lock);
 
-               kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
-               if (!kvg) {
-                       mutex_unlock(&kv->lock);
-                       kvm_vfio_group_put_external_user(vfio_group);
-                       return -ENOMEM;
+       list_for_each_entry(kvg, &kv->group_list, node) {
+               if (kvg->file == filp) {
+                       ret = -EEXIST;
+                       goto err_unlock;
                }
+       }
 
-               list_add_tail(&kvg->node, &kv->group_list);
-               kvg->vfio_group = vfio_group;
+       kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
+       if (!kvg) {
+               ret = -ENOMEM;
+               goto err_unlock;
+       }
 
-               kvm_arch_start_assignment(dev->kvm);
+       kvg->file = filp;
+       list_add_tail(&kvg->node, &kv->group_list);
 
-               mutex_unlock(&kv->lock);
+       kvm_arch_start_assignment(dev->kvm);
 
-               kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
+       mutex_unlock(&kv->lock);
 
-               kvm_vfio_update_coherency(dev);
+       kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+       kvm_vfio_update_coherency(dev);
 
-               return 0;
+       return 0;
+err_unlock:
+       mutex_unlock(&kv->lock);
+err_fput:
+       fput(filp);
+       return ret;
+}
 
-       case KVM_DEV_VFIO_GROUP_DEL:
-               if (get_user(fd, argp))
-                       return -EFAULT;
+static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+{
+       struct kvm_vfio *kv = dev->private;
+       struct kvm_vfio_group *kvg;
+       struct fd f;
+       int ret;
 
-               f = fdget(fd);
-               if (!f.file)
-                       return -EBADF;
+       f = fdget(fd);
+       if (!f.file)
+               return -EBADF;
 
-               ret = -ENOENT;
+       ret = -ENOENT;
 
-               mutex_lock(&kv->lock);
+       mutex_lock(&kv->lock);
 
-               list_for_each_entry(kvg, &kv->group_list, node) {
-                       if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
-                                                               f.file))
-                               continue;
+       list_for_each_entry(kvg, &kv->group_list, node) {
+               if (kvg->file != f.file)
+                       continue;
 
-                       list_del(&kvg->node);
-                       kvm_arch_end_assignment(dev->kvm);
+               list_del(&kvg->node);
+               kvm_arch_end_assignment(dev->kvm);
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-                       kvm_spapr_tce_release_vfio_group(dev->kvm,
-                                                        kvg->vfio_group);
+               kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
 #endif
-                       kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
-                       kvm_vfio_group_put_external_user(kvg->vfio_group);
-                       kfree(kvg);
-                       ret = 0;
-                       break;
-               }
+               kvm_vfio_file_set_kvm(kvg->file, NULL);
+               fput(kvg->file);
+               kfree(kvg);
+               ret = 0;
+               break;
+       }
 
-               mutex_unlock(&kv->lock);
+       mutex_unlock(&kv->lock);
 
-               fdput(f);
+       fdput(f);
 
-               kvm_vfio_update_coherency(dev);
+       kvm_vfio_update_coherency(dev);
 
-               return ret;
+       return ret;
+}
 
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-       case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
-               struct kvm_vfio_spapr_tce param;
-               struct kvm_vfio *kv = dev->private;
-               struct vfio_group *vfio_group;
-               struct kvm_vfio_group *kvg;
-               struct fd f;
-               struct iommu_group *grp;
+static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
+                                       void __user *arg)
+{
+       struct kvm_vfio_spapr_tce param;
+       struct kvm_vfio *kv = dev->private;
+       struct kvm_vfio_group *kvg;
+       struct fd f;
+       int ret;
 
-               if (copy_from_user(&param, (void __user *)arg,
-                               sizeof(struct kvm_vfio_spapr_tce)))
-                       return -EFAULT;
+       if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
+               return -EFAULT;
 
-               f = fdget(param.groupfd);
-               if (!f.file)
-                       return -EBADF;
+       f = fdget(param.groupfd);
+       if (!f.file)
+               return -EBADF;
 
-               vfio_group = kvm_vfio_group_get_external_user(f.file);
-               fdput(f);
+       ret = -ENOENT;
 
-               if (IS_ERR(vfio_group))
-                       return PTR_ERR(vfio_group);
+       mutex_lock(&kv->lock);
 
-               grp = kvm_vfio_group_get_iommu_group(vfio_group);
+       list_for_each_entry(kvg, &kv->group_list, node) {
+               struct iommu_group *grp;
+
+               if (kvg->file != f.file)
+                       continue;
+
+               grp = kvm_vfio_file_iommu_group(kvg->file);
                if (WARN_ON_ONCE(!grp)) {
-                       kvm_vfio_group_put_external_user(vfio_group);
-                       return -EIO;
+                       ret = -EIO;
+                       goto err_fdput;
                }
 
-               ret = -ENOENT;
-
-               mutex_lock(&kv->lock);
+               ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
+                                                      grp);
+               break;
+       }
 
-               list_for_each_entry(kvg, &kv->group_list, node) {
-                       if (kvg->vfio_group != vfio_group)
-                               continue;
+err_fdput:
+       mutex_unlock(&kv->lock);
+       fdput(f);
+       return ret;
+}
+#endif
 
-                       ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
-                                       param.tablefd, grp);
-                       break;
-               }
+static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
+                             void __user *arg)
+{
+       int32_t __user *argp = arg;
+       int32_t fd;
 
-               mutex_unlock(&kv->lock);
+       switch (attr) {
+       case KVM_DEV_VFIO_GROUP_ADD:
+               if (get_user(fd, argp))
+                       return -EFAULT;
+               return kvm_vfio_group_add(dev, fd);
 
-               iommu_group_put(grp);
-               kvm_vfio_group_put_external_user(vfio_group);
+       case KVM_DEV_VFIO_GROUP_DEL:
+               if (get_user(fd, argp))
+                       return -EFAULT;
+               return kvm_vfio_group_del(dev, fd);
 
-               return ret;
-       }
-#endif /* CONFIG_SPAPR_TCE_IOMMU */
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+       case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
+               return kvm_vfio_group_set_spapr_tce(dev, arg);
+#endif
        }
 
        return -ENXIO;
@@ -334,7 +290,8 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
 {
        switch (attr->group) {
        case KVM_DEV_VFIO_GROUP:
-               return kvm_vfio_set_group(dev, attr->attr, attr->addr);
+               return kvm_vfio_set_group(dev, attr->attr,
+                                         u64_to_user_ptr(attr->addr));
        }
 
        return -ENXIO;
@@ -367,10 +324,10 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
 
        list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-               kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
+               kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
 #endif
-               kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
-               kvm_vfio_group_put_external_user(kvg->vfio_group);
+               kvm_vfio_file_set_kvm(kvg->file, NULL);
+               fput(kvg->file);
                list_del(&kvg->node);
                kfree(kvg);
                kvm_arch_end_assignment(dev->kvm);